No vector stores found.
;
}
- return (
-
-
-
-
- ID
- Name
- Created
- Completed
- Cancelled
- Failed
- In Progress
- Total
- Usage Bytes
- Provider ID
- Provider Vector DB ID
-
-
-
- {stores.map(store => {
- const fileCounts = store.file_counts;
- const metadata = store.metadata || {};
- const providerId = metadata.provider_id ?? "";
- const providerDbId = metadata.provider_vector_db_id ?? "";
+ // Filter stores based on search term
+ const filteredStores = stores.filter(store => {
+ if (!searchTerm) return true;
- return (
- router.push(`/logs/vector-stores/${store.id}`)}
- className="cursor-pointer hover:bg-muted/50"
- >
-
-
-
- {store.name}
-
- {new Date(store.created_at * 1000).toLocaleString()}
-
- {fileCounts.completed}
- {fileCounts.cancelled}
- {fileCounts.failed}
- {fileCounts.in_progress}
- {fileCounts.total}
- {store.usage_bytes}
- {providerId}
- {providerDbId}
-
- );
- })}
-
-
+ const searchLower = searchTerm.toLowerCase();
+ return (
+ store.id.toLowerCase().includes(searchLower) ||
+ (store.name && store.name.toLowerCase().includes(searchLower)) ||
+ (store.metadata?.provider_id &&
+ String(store.metadata.provider_id)
+ .toLowerCase()
+ .includes(searchLower)) ||
+ (store.metadata?.provider_vector_db_id &&
+ String(store.metadata.provider_vector_db_id)
+ .toLowerCase()
+ .includes(searchLower))
+ );
+ });
+
+ return (
+
+ {/* Search Bar */}
+
+
+ setSearchTerm(e.target.value)}
+ className="pl-10"
+ />
+
+
+
+
+
+
+ ID
+ Name
+ Created
+ Completed
+ Cancelled
+ Failed
+ In Progress
+ Total
+ Usage Bytes
+ Provider ID
+ Provider Vector DB ID
+ Actions
+
+
+
+ {filteredStores.map(store => {
+ const fileCounts = store.file_counts;
+ const metadata = store.metadata || {};
+ const providerId = metadata.provider_id ?? "";
+ const providerDbId = metadata.provider_vector_db_id ?? "";
+
+ return (
+
+ router.push(`/logs/vector-stores/${store.id}`)
+ }
+ className="cursor-pointer hover:bg-muted/50"
+ >
+
+
+
+ {store.name}
+
+ {new Date(store.created_at * 1000).toLocaleString()}
+
+ {fileCounts.completed}
+ {fileCounts.cancelled}
+ {fileCounts.failed}
+ {fileCounts.in_progress}
+ {fileCounts.total}
+ {store.usage_bytes}
+ {providerId}
+ {providerDbId}
+
+
+
+
+
+
+
+ );
+ })}
+
+
+
);
};
return (
-
Vector Stores
+
+
Vector Stores
+
+
{renderContent()}
+
+ {/* Create Vector Store Modal */}
+ {showVectorStoreModal && (
+
+
+
+
+ {editingStore ? "Edit Vector Store" : "Create New Vector Store"}
+
+
+
+
+
+
+
+
+ )}
);
}
diff --git a/src/llama_stack_ui/bin/cli.js b/src/llama_stack_ui/bin/cli.js
new file mode 100755
index 000000000..6069d2f22
--- /dev/null
+++ b/src/llama_stack_ui/bin/cli.js
@@ -0,0 +1,34 @@
+#!/usr/bin/env node
+
+const { spawn } = require('child_process');
+const path = require('path');
+
+const port = process.env.LLAMA_STACK_UI_PORT || 8322;
+const uiDir = path.resolve(__dirname, '..');
+const serverPath = path.join(uiDir, '.next', 'standalone', 'ui', 'src', 'llama_stack_ui', 'server.js');
+const serverDir = path.dirname(serverPath);
+
+console.log(`Starting Llama Stack UI on http://localhost:${port}`);
+
+const child = spawn(process.execPath, [serverPath], {
+ cwd: serverDir,
+ stdio: 'inherit',
+ env: {
+ ...process.env,
+ PORT: port,
+ },
+});
+
+process.on('SIGINT', () => {
+ child.kill('SIGINT');
+ process.exit(0);
+});
+
+process.on('SIGTERM', () => {
+ child.kill('SIGTERM');
+ process.exit(0);
+});
+
+child.on('exit', (code) => {
+ process.exit(code);
+});
diff --git a/src/llama_stack_ui/components/prompts/prompt-editor.test.tsx b/src/llama_stack_ui/components/prompts/prompt-editor.test.tsx
index 458a5f942..70e0e4e66 100644
--- a/src/llama_stack_ui/components/prompts/prompt-editor.test.tsx
+++ b/src/llama_stack_ui/components/prompts/prompt-editor.test.tsx
@@ -2,7 +2,7 @@ import React from "react";
import { render, screen, fireEvent } from "@testing-library/react";
import "@testing-library/jest-dom";
import { PromptEditor } from "./prompt-editor";
-import type { Prompt, PromptFormData } from "./types";
+import type { Prompt } from "./types";
describe("PromptEditor", () => {
const mockOnSave = jest.fn();
diff --git a/src/llama_stack_ui/components/vector-stores/vector-store-detail.test.tsx b/src/llama_stack_ui/components/vector-stores/vector-store-detail.test.tsx
index 08f90ac0d..78bec8147 100644
--- a/src/llama_stack_ui/components/vector-stores/vector-store-detail.test.tsx
+++ b/src/llama_stack_ui/components/vector-stores/vector-store-detail.test.tsx
@@ -12,6 +12,20 @@ jest.mock("next/navigation", () => ({
}),
}));
+// Mock NextAuth
+jest.mock("next-auth/react", () => ({
+ useSession: () => ({
+ data: {
+ accessToken: "mock-access-token",
+ user: {
+ id: "mock-user-id",
+ email: "test@example.com",
+ },
+ },
+ status: "authenticated",
+ }),
+}));
+
describe("VectorStoreDetailView", () => {
const defaultProps = {
store: null,
diff --git a/src/llama_stack_ui/components/vector-stores/vector-store-detail.tsx b/src/llama_stack_ui/components/vector-stores/vector-store-detail.tsx
index d3d0fa249..f5b6281e7 100644
--- a/src/llama_stack_ui/components/vector-stores/vector-store-detail.tsx
+++ b/src/llama_stack_ui/components/vector-stores/vector-store-detail.tsx
@@ -1,16 +1,18 @@
"use client";
import { useRouter } from "next/navigation";
+import { useState, useEffect } from "react";
import type { VectorStore } from "llama-stack-client/resources/vector-stores/vector-stores";
import type { VectorStoreFile } from "llama-stack-client/resources/vector-stores/files";
import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card";
import { Skeleton } from "@/components/ui/skeleton";
import { Button } from "@/components/ui/button";
+import { useAuthClient } from "@/hooks/use-auth-client";
+import { Edit2, Trash2, X } from "lucide-react";
import {
DetailLoadingView,
DetailErrorView,
DetailNotFoundView,
- DetailLayout,
PropertiesCard,
PropertyItem,
} from "@/components/layout/detail-layout";
@@ -23,6 +25,7 @@ import {
TableHeader,
TableRow,
} from "@/components/ui/table";
+import { VectorStoreEditor, VectorStoreFormData } from "./vector-store-editor";
interface VectorStoreDetailViewProps {
store: VectorStore | null;
@@ -43,21 +46,122 @@ export function VectorStoreDetailView({
errorFiles,
id,
}: VectorStoreDetailViewProps) {
- const title = "Vector Store Details";
const router = useRouter();
+ const client = useAuthClient();
+ const [isDeleting, setIsDeleting] = useState(false);
+ const [showEditModal, setShowEditModal] = useState(false);
+ const [modalError, setModalError] = useState
(null);
+ const [showSuccessState, setShowSuccessState] = useState(false);
+
+ // Handle ESC key to close modal
+ useEffect(() => {
+ const handleEscape = (event: KeyboardEvent) => {
+ if (event.key === "Escape" && showEditModal) {
+ handleCancel();
+ }
+ };
+
+ document.addEventListener("keydown", handleEscape);
+ return () => document.removeEventListener("keydown", handleEscape);
+ }, [showEditModal]);
const handleFileClick = (fileId: string) => {
router.push(`/logs/vector-stores/${id}/files/${fileId}`);
};
+ const handleEditVectorStore = () => {
+ setShowEditModal(true);
+ setModalError(null);
+ setShowSuccessState(false);
+ };
+
+ const handleCancel = () => {
+ setShowEditModal(false);
+ setModalError(null);
+ setShowSuccessState(false);
+ };
+
+ const handleSaveVectorStore = async (formData: VectorStoreFormData) => {
+ try {
+ setModalError(null);
+
+ // Update existing vector store (same logic as list page)
+ const updateParams: {
+ name?: string;
+ extra_body?: Record;
+ } = {};
+
+ // Only include fields that have changed or are provided
+ if (formData.name && formData.name !== store?.name) {
+ updateParams.name = formData.name;
+ }
+
+ // Add all parameters to extra_body (except provider_id which can't be changed)
+ const extraBody: Record = {};
+ if (formData.embedding_model) {
+ extraBody.embedding_model = formData.embedding_model;
+ }
+ if (formData.embedding_dimension) {
+ extraBody.embedding_dimension = formData.embedding_dimension;
+ }
+
+ if (Object.keys(extraBody).length > 0) {
+ updateParams.extra_body = extraBody;
+ }
+
+ await client.vectorStores.update(id, updateParams);
+
+ // Show success state
+ setShowSuccessState(true);
+ setModalError(
+ "ā
Vector store updated successfully! You can close this modal and refresh the page to see changes."
+ );
+ } catch (err: unknown) {
+ console.error("Failed to update vector store:", err);
+ const errorMessage =
+ err instanceof Error ? err.message : "Failed to update vector store";
+ setModalError(errorMessage);
+ }
+ };
+
+ const handleDeleteVectorStore = async () => {
+ if (
+ !confirm(
+ "Are you sure you want to delete this vector store? This action cannot be undone."
+ )
+ ) {
+ return;
+ }
+
+ setIsDeleting(true);
+
+ try {
+ await client.vectorStores.delete(id);
+ // Redirect to the vector stores list after successful deletion
+ router.push("/logs/vector-stores");
+ } catch (err: unknown) {
+ console.error("Failed to delete vector store:", err);
+ const errorMessage = err instanceof Error ? err.message : "Unknown error";
+ alert(`Failed to delete vector store: ${errorMessage}`);
+ } finally {
+ setIsDeleting(false);
+ }
+ };
+
if (errorStore) {
- return ;
+ return (
+
+ );
}
if (isLoadingStore) {
- return ;
+ return ;
}
if (!store) {
- return ;
+ return ;
}
const mainContent = (
@@ -138,6 +242,73 @@ export function VectorStoreDetailView({
);
return (
-
+ <>
+
+
Vector Store Details
+
+
+
+
+
+
+
{mainContent}
+
{sidebar}
+
+
+ {/* Edit Vector Store Modal */}
+ {showEditModal && (
+
+
+
+
Edit Vector Store
+
+
+
+
+
+
+
+ )}
+ >
);
}
diff --git a/src/llama_stack_ui/components/vector-stores/vector-store-editor.tsx b/src/llama_stack_ui/components/vector-stores/vector-store-editor.tsx
new file mode 100644
index 000000000..719a2a9fd
--- /dev/null
+++ b/src/llama_stack_ui/components/vector-stores/vector-store-editor.tsx
@@ -0,0 +1,235 @@
+"use client";
+
+import { useState, useEffect } from "react";
+import { Button } from "@/components/ui/button";
+import { Input } from "@/components/ui/input";
+import { Label } from "@/components/ui/label";
+import { Card, CardContent } from "@/components/ui/card";
+import {
+ Select,
+ SelectContent,
+ SelectItem,
+ SelectTrigger,
+ SelectValue,
+} from "@/components/ui/select";
+import { useAuthClient } from "@/hooks/use-auth-client";
+import type { Model } from "llama-stack-client/resources/models";
+
+export interface VectorStoreFormData {
+ name: string;
+ embedding_model?: string;
+ embedding_dimension?: number;
+ provider_id?: string;
+}
+
+interface VectorStoreEditorProps {
+ onSave: (formData: VectorStoreFormData) => Promise;
+ onCancel: () => void;
+ error?: string | null;
+ initialData?: VectorStoreFormData;
+ showSuccessState?: boolean;
+ isEditing?: boolean;
+}
+
+export function VectorStoreEditor({
+ onSave,
+ onCancel,
+ error,
+ initialData,
+ showSuccessState,
+ isEditing = false,
+}: VectorStoreEditorProps) {
+ const client = useAuthClient();
+ const [formData, setFormData] = useState(
+ initialData || {
+ name: "",
+ embedding_model: "",
+ embedding_dimension: 768,
+ provider_id: "",
+ }
+ );
+ const [loading, setLoading] = useState(false);
+ const [models, setModels] = useState([]);
+ const [modelsLoading, setModelsLoading] = useState(true);
+ const [modelsError, setModelsError] = useState(null);
+
+ const embeddingModels = models.filter(
+ model => model.custom_metadata?.model_type === "embedding"
+ );
+
+ useEffect(() => {
+ const fetchModels = async () => {
+ try {
+ setModelsLoading(true);
+ setModelsError(null);
+ const modelList = await client.models.list();
+ setModels(modelList);
+
+ // Set default embedding model if available
+ const embeddingModelsList = modelList.filter(model => {
+ return model.custom_metadata?.model_type === "embedding";
+ });
+ if (embeddingModelsList.length > 0 && !formData.embedding_model) {
+ setFormData(prev => ({
+ ...prev,
+ embedding_model: embeddingModelsList[0].id,
+ }));
+ }
+ } catch (err) {
+ console.error("Failed to load models:", err);
+ setModelsError(
+ err instanceof Error ? err.message : "Failed to load models"
+ );
+ } finally {
+ setModelsLoading(false);
+ }
+ };
+
+ fetchModels();
+ }, [client]);
+
+ const handleSubmit = async (e: React.FormEvent) => {
+ e.preventDefault();
+ setLoading(true);
+
+ try {
+ await onSave(formData);
+ } finally {
+ setLoading(false);
+ }
+ };
+
+ return (
+
+
+
+
+
+ );
+}
diff --git a/src/llama_stack_ui/lib/contents-api.ts b/src/llama_stack_ui/lib/contents-api.ts
index f4920f3db..35456faff 100644
--- a/src/llama_stack_ui/lib/contents-api.ts
+++ b/src/llama_stack_ui/lib/contents-api.ts
@@ -34,9 +34,35 @@ export class ContentsAPI {
async getFileContents(
vectorStoreId: string,
- fileId: string
+ fileId: string,
+ includeEmbeddings: boolean = true,
+ includeMetadata: boolean = true
): Promise {
- return this.client.vectorStores.files.content(vectorStoreId, fileId);
+ try {
+ // Use query parameters to pass embeddings and metadata flags (OpenAI-compatible pattern)
+ const extraQuery: Record = {};
+ if (includeEmbeddings) {
+ extraQuery.include_embeddings = true;
+ }
+ if (includeMetadata) {
+ extraQuery.include_metadata = true;
+ }
+
+ const result = await this.client.vectorStores.files.content(
+ vectorStoreId,
+ fileId,
+ {
+ query: {
+ include_embeddings: includeEmbeddings,
+ include_metadata: includeMetadata,
+ },
+ }
+ );
+ return result;
+ } catch (error) {
+ console.error("ContentsAPI.getFileContents error:", error);
+ throw error;
+ }
}
async getContent(
@@ -70,11 +96,15 @@ export class ContentsAPI {
order?: string;
after?: string;
before?: string;
+ includeEmbeddings?: boolean;
+ includeMetadata?: boolean;
}
): Promise {
- const fileContents = await this.client.vectorStores.files.content(
+ const fileContents = await this.getFileContents(
vectorStoreId,
- fileId
+ fileId,
+ options?.includeEmbeddings ?? true,
+ options?.includeMetadata ?? true
);
const contentItems: VectorStoreContentItem[] = [];
@@ -82,7 +112,7 @@ export class ContentsAPI {
const rawContent = content as Record;
// Extract actual fields from the API response
- const embedding = rawContent.embedding || undefined;
+ const embedding = rawContent.embedding as number[] | undefined;
const created_timestamp =
rawContent.created_timestamp ||
rawContent.created_at ||
diff --git a/src/llama_stack_ui/next.config.ts b/src/llama_stack_ui/next.config.ts
index e9ffa3083..9f4a74eca 100644
--- a/src/llama_stack_ui/next.config.ts
+++ b/src/llama_stack_ui/next.config.ts
@@ -1,7 +1,13 @@
import type { NextConfig } from "next";
const nextConfig: NextConfig = {
- /* config options here */
+ typescript: {
+ ignoreBuildErrors: true,
+ },
+ output: "standalone",
+ images: {
+ unoptimized: true,
+ },
};
export default nextConfig;
diff --git a/src/llama_stack_ui/package-lock.json b/src/llama_stack_ui/package-lock.json
index 14e34b720..aa8b2ac26 100644
--- a/src/llama_stack_ui/package-lock.json
+++ b/src/llama_stack_ui/package-lock.json
@@ -1,12 +1,13 @@
{
- "name": "ui",
- "version": "0.1.0",
+ "name": "llama-stack-ui",
+ "version": "0.4.0-alpha.1",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
- "name": "ui",
- "version": "0.1.0",
+ "name": "llama-stack-ui",
+ "version": "0.4.0-alpha.1",
+ "license": "MIT",
"dependencies": {
"@radix-ui/react-collapsible": "^1.1.12",
"@radix-ui/react-dialog": "^1.1.15",
@@ -20,7 +21,7 @@
"class-variance-authority": "^0.7.1",
"clsx": "^2.1.1",
"framer-motion": "^12.23.24",
- "llama-stack-client": "github:llamastack/llama-stack-client-typescript",
+ "llama-stack-client": "^0.3.1",
"lucide-react": "^0.545.0",
"next": "15.5.4",
"next-auth": "^4.24.11",
@@ -9684,8 +9685,9 @@
"license": "MIT"
},
"node_modules/llama-stack-client": {
- "version": "0.4.0-alpha.1",
- "resolved": "git+ssh://git@github.com/llamastack/llama-stack-client-typescript.git#78de4862c4b7d77939ac210fa9f9bde77a2c5c5f",
+ "version": "0.3.1",
+ "resolved": "https://registry.npmjs.org/llama-stack-client/-/llama-stack-client-0.3.1.tgz",
+ "integrity": "sha512-4aYoF2aAQiBSfxyZEtczeQmJn8q9T22ePDqGhR+ej5RG6a8wvl5B3v7ZoKuFkft+vcP/kbJ58GQZEPLekxekZA==",
"license": "MIT",
"dependencies": {
"@types/node": "^18.11.18",
diff --git a/src/llama_stack_ui/package.json b/src/llama_stack_ui/package.json
index fb7dbee75..41afc9a11 100644
--- a/src/llama_stack_ui/package.json
+++ b/src/llama_stack_ui/package.json
@@ -1,11 +1,31 @@
{
- "name": "ui",
- "version": "0.1.0",
- "private": true,
+ "name": "llama-stack-ui",
+ "version": "0.4.0-alpha.4",
+ "description": "Web UI for Llama Stack",
+ "license": "MIT",
+ "author": "Llama Stack ",
+ "repository": {
+ "type": "git",
+ "url": "https://github.com/llamastack/llama-stack.git",
+ "directory": "llama_stack_ui"
+ },
+ "bin": {
+ "llama-stack-ui": "bin/cli.js"
+ },
+ "files": [
+ "bin",
+ ".next",
+ "public",
+ "next.config.ts",
+ "instrumentation.ts",
+ "tsconfig.json",
+ "package.json"
+ ],
"scripts": {
"dev": "next dev --turbopack --port ${LLAMA_STACK_UI_PORT:-8322}",
- "build": "next build",
+ "build": "next build && node scripts/postbuild.js",
"start": "next start",
+ "prepublishOnly": "npm run build",
"lint": "next lint",
"format": "prettier --write \"./**/*.{ts,tsx}\"",
"format:check": "prettier --check \"./**/*.{ts,tsx}\"",
@@ -25,7 +45,7 @@
"class-variance-authority": "^0.7.1",
"clsx": "^2.1.1",
"framer-motion": "^12.23.24",
- "llama-stack-client": "github:llamastack/llama-stack-client-typescript",
+ "llama-stack-client": "^0.3.1",
"lucide-react": "^0.545.0",
"next": "15.5.4",
"next-auth": "^4.24.11",
diff --git a/src/llama_stack_ui/scripts/postbuild.js b/src/llama_stack_ui/scripts/postbuild.js
new file mode 100644
index 000000000..4b4dbdf5d
--- /dev/null
+++ b/src/llama_stack_ui/scripts/postbuild.js
@@ -0,0 +1,40 @@
+const fs = require('fs');
+const path = require('path');
+
+// Copy public directory to standalone
+const publicSrc = path.join(__dirname, '..', 'public');
+const publicDest = path.join(__dirname, '..', '.next', 'standalone', 'ui', 'src', 'llama_stack_ui', 'public');
+
+if (fs.existsSync(publicSrc) && !fs.existsSync(publicDest)) {
+ console.log('Copying public directory to standalone...');
+ copyDir(publicSrc, publicDest);
+}
+
+// Copy .next/static to standalone
+const staticSrc = path.join(__dirname, '..', '.next', 'static');
+const staticDest = path.join(__dirname, '..', '.next', 'standalone', 'ui', 'src', 'llama_stack_ui', '.next', 'static');
+
+if (fs.existsSync(staticSrc) && !fs.existsSync(staticDest)) {
+ console.log('Copying .next/static to standalone...');
+ copyDir(staticSrc, staticDest);
+}
+
+function copyDir(src, dest) {
+ if (!fs.existsSync(dest)) {
+ fs.mkdirSync(dest, { recursive: true });
+ }
+
+ const files = fs.readdirSync(src);
+ files.forEach((file) => {
+ const srcFile = path.join(src, file);
+ const destFile = path.join(dest, file);
+
+ if (fs.statSync(srcFile).isDirectory()) {
+ copyDir(srcFile, destFile);
+ } else {
+ fs.copyFileSync(srcFile, destFile);
+ }
+ });
+}
+
+console.log('Postbuild complete!');
diff --git a/tests/common/mcp.py b/tests/common/mcp.py
index 644becd2d..085575ec0 100644
--- a/tests/common/mcp.py
+++ b/tests/common/mcp.py
@@ -244,8 +244,14 @@ def make_mcp_server(required_auth_token: str | None = None, tools: dict[str, Cal
timeout = 2
start_time = time.time()
- server_url = f"http://localhost:{port}/sse"
- logger.debug(f"Waiting for MCP server thread to start on port {port}")
+ # Determine the appropriate host for the server URL based on test environment
+ # - For library client and server mode: use localhost (both on same host)
+ # - For docker mode: use host.docker.internal (container needs to reach host)
+ import os
+
+ mcp_host = os.environ.get("LLAMA_STACK_TEST_MCP_HOST", "localhost")
+ server_url = f"http://{mcp_host}:{port}/sse"
+ logger.debug(f"Waiting for MCP server thread to start on port {port} (accessible via {mcp_host})")
while time.time() - start_time < timeout:
if server_thread.is_alive():
diff --git a/tests/external/llama-stack-api-weather/src/llama_stack_api_weather/weather.py b/tests/external/llama-stack-api-weather/src/llama_stack_api_weather/weather.py
index e97a9d8fb..9c399b7bf 100644
--- a/tests/external/llama-stack-api-weather/src/llama_stack_api_weather/weather.py
+++ b/tests/external/llama-stack-api-weather/src/llama_stack_api_weather/weather.py
@@ -6,9 +6,7 @@
from typing import Protocol
-from llama_stack.apis.version import LLAMA_STACK_API_V1
-from llama_stack.providers.datatypes import Api, ProviderSpec, RemoteProviderSpec
-from llama_stack.schema_utils import webmethod
+from llama_stack_api import LLAMA_STACK_API_V1, Api, ProviderSpec, RemoteProviderSpec, webmethod
def available_providers() -> list[ProviderSpec]:
diff --git a/tests/integration/README.md b/tests/integration/README.md
index f581073ae..3559b785c 100644
--- a/tests/integration/README.md
+++ b/tests/integration/README.md
@@ -211,3 +211,23 @@ def test_asymmetric_embeddings(llama_stack_client, embedding_model_id):
assert query_response.embeddings is not None
```
+
+## TypeScript Client Replays
+
+TypeScript SDK tests can run alongside Python tests when testing against `server:` stacks. Set `TS_CLIENT_PATH` to the path or version of `llama-stack-client-typescript` to enable:
+
+```bash
+# Use published npm package (responses suite)
+TS_CLIENT_PATH=^0.3.2 scripts/integration-tests.sh --stack-config server:ci-tests --suite responses --setup gpt
+
+# Use local checkout from ~/.cache (recommended for development)
+git clone https://github.com/llamastack/llama-stack-client-typescript.git ~/.cache/llama-stack-client-typescript
+TS_CLIENT_PATH=~/.cache/llama-stack-client-typescript scripts/integration-tests.sh --stack-config server:ci-tests --suite responses --setup gpt
+
+# Run base suite with TypeScript tests
+TS_CLIENT_PATH=~/.cache/llama-stack-client-typescript scripts/integration-tests.sh --stack-config server:ci-tests --suite base --setup ollama
+```
+
+TypeScript tests run immediately after Python tests pass, using the same replay fixtures. The mapping between Python suites/setups and TypeScript test files is defined in `tests/integration/client-typescript/suites.json`.
+
+If `TS_CLIENT_PATH` is unset, TypeScript tests are skipped entirely.
diff --git a/tests/integration/batches/conftest.py b/tests/integration/batches/conftest.py
index 3ab8df3d9..4dc5b7993 100644
--- a/tests/integration/batches/conftest.py
+++ b/tests/integration/batches/conftest.py
@@ -14,7 +14,7 @@ from io import BytesIO
import pytest
-from llama_stack.apis.files import OpenAIFilePurpose
+from llama_stack_api import OpenAIFilePurpose
class BatchHelper:
diff --git a/tests/integration/ci_matrix.json b/tests/integration/ci_matrix.json
index 858176dff..43678e5c7 100644
--- a/tests/integration/ci_matrix.json
+++ b/tests/integration/ci_matrix.json
@@ -1,6 +1,7 @@
{
"default": [
{"suite": "base", "setup": "ollama"},
+ {"suite": "base", "setup": "ollama-postgres", "allowed_clients": ["server"], "stack_config": "server:ci-tests::run-with-postgres-store.yaml"},
{"suite": "vision", "setup": "ollama-vision"},
{"suite": "responses", "setup": "gpt"},
{"suite": "base-vllm-subset", "setup": "vllm"}
diff --git a/tests/integration/client-typescript/__tests__/inference.test.ts b/tests/integration/client-typescript/__tests__/inference.test.ts
new file mode 100644
index 000000000..b0734fed7
--- /dev/null
+++ b/tests/integration/client-typescript/__tests__/inference.test.ts
@@ -0,0 +1,104 @@
+// Copyright (c) Meta Platforms, Inc. and affiliates.
+// All rights reserved.
+//
+// This source code is licensed under the terms described in the LICENSE file in
+// the root directory of this source tree.
+
+/**
+ * Integration tests for Inference API (Chat Completions).
+ * Ported from: llama-stack/tests/integration/inference/test_openai_completion.py
+ *
+ * IMPORTANT: Test cases must match EXACTLY with Python tests to use recorded API responses.
+ */
+
+import { createTestClient, requireTextModel } from '../setup';
+
+describe('Inference API - Chat Completions', () => {
+ // Test cases matching llama-stack/tests/integration/test_cases/inference/chat_completion.json
+ const chatCompletionTestCases = [
+ {
+ id: 'non_streaming_01',
+ question: 'Which planet do humans live on?',
+ expected: 'earth',
+ testId:
+ 'tests/integration/inference/test_openai_completion.py::test_openai_chat_completion_non_streaming[client_with_models-txt=ollama/llama3.2:3b-instruct-fp16-inference:chat_completion:non_streaming_01]',
+ },
+ {
+ id: 'non_streaming_02',
+ question: 'Which planet has rings around it with a name starting with letter S?',
+ expected: 'saturn',
+ testId:
+ 'tests/integration/inference/test_openai_completion.py::test_openai_chat_completion_non_streaming[client_with_models-txt=ollama/llama3.2:3b-instruct-fp16-inference:chat_completion:non_streaming_02]',
+ },
+ ];
+
+ const streamingTestCases = [
+ {
+ id: 'streaming_01',
+ question: "What's the name of the Sun in latin?",
+ expected: 'sol',
+ testId:
+ 'tests/integration/inference/test_openai_completion.py::test_openai_chat_completion_streaming[client_with_models-txt=ollama/llama3.2:3b-instruct-fp16-inference:chat_completion:streaming_01]',
+ },
+ {
+ id: 'streaming_02',
+ question: 'What is the name of the US captial?',
+ expected: 'washington',
+ testId:
+ 'tests/integration/inference/test_openai_completion.py::test_openai_chat_completion_streaming[client_with_models-txt=ollama/llama3.2:3b-instruct-fp16-inference:chat_completion:streaming_02]',
+ },
+ ];
+
+ test.each(chatCompletionTestCases)(
+ 'chat completion non-streaming: $id',
+ async ({ question, expected, testId }) => {
+ const client = createTestClient(testId);
+ const textModel = requireTextModel();
+
+ const response = await client.chat.completions.create({
+ model: textModel,
+ messages: [
+ {
+ role: 'user',
+ content: question,
+ },
+ ],
+ stream: false,
+ });
+
+ // Non-streaming responses have choices with message property
+ const choice = response.choices[0];
+ expect(choice).toBeDefined();
+ if (!choice || !('message' in choice)) {
+ throw new Error('Expected non-streaming response with message');
+ }
+ const content = choice.message.content;
+ expect(content).toBeDefined();
+ const messageContent = typeof content === 'string' ? content.toLowerCase().trim() : '';
+ expect(messageContent.length).toBeGreaterThan(0);
+ expect(messageContent).toContain(expected.toLowerCase());
+ },
+ );
+
+ test.each(streamingTestCases)('chat completion streaming: $id', async ({ question, expected, testId }) => {
+ const client = createTestClient(testId);
+ const textModel = requireTextModel();
+
+ const stream = await client.chat.completions.create({
+ model: textModel,
+ messages: [{ role: 'user', content: question }],
+ stream: true,
+ });
+
+ const streamedContent: string[] = [];
+ for await (const chunk of stream) {
+ if (chunk.choices && chunk.choices.length > 0 && chunk.choices[0]?.delta?.content) {
+ streamedContent.push(chunk.choices[0].delta.content);
+ }
+ }
+
+ expect(streamedContent.length).toBeGreaterThan(0);
+ const fullContent = streamedContent.join('').toLowerCase().trim();
+ expect(fullContent).toContain(expected.toLowerCase());
+ });
+});
diff --git a/tests/integration/client-typescript/__tests__/responses.test.ts b/tests/integration/client-typescript/__tests__/responses.test.ts
new file mode 100644
index 000000000..0fc2a3245
--- /dev/null
+++ b/tests/integration/client-typescript/__tests__/responses.test.ts
@@ -0,0 +1,132 @@
+// Copyright (c) Meta Platforms, Inc. and affiliates.
+// All rights reserved.
+//
+// This source code is licensed under the terms described in the LICENSE file in
+// the root directory of this source tree.
+
+/**
+ * Integration tests for Responses API.
+ * Ported from: llama-stack/tests/integration/responses/test_basic_responses.py
+ *
+ * IMPORTANT: Test cases and IDs must match EXACTLY with Python tests to use recorded API responses.
+ */
+
+import { createTestClient, requireTextModel, getResponseOutputText } from '../setup';
+
+describe('Responses API - Basic', () => {
+ // Test cases matching llama-stack/tests/integration/responses/fixtures/test_cases.py
+ const basicTestCases = [
+ {
+ id: 'earth',
+ input: 'Which planet do humans live on?',
+ expected: 'earth',
+ // Use client_with_models fixture to match non-streaming recordings
+ testId:
+ 'tests/integration/responses/test_basic_responses.py::test_response_non_streaming_basic[client_with_models-txt=openai/gpt-4o-earth]',
+ },
+ {
+ id: 'saturn',
+ input: 'Which planet has rings around it with a name starting with letter S?',
+ expected: 'saturn',
+ testId:
+ 'tests/integration/responses/test_basic_responses.py::test_response_non_streaming_basic[client_with_models-txt=openai/gpt-4o-saturn]',
+ },
+ ];
+
+ test.each(basicTestCases)('non-streaming basic response: $id', async ({ input, expected, testId }) => {
+ // Create client with test_id for all requests
+ const client = createTestClient(testId);
+ const textModel = requireTextModel();
+
+ // Create a response
+ const response = await client.responses.create({
+ model: textModel,
+ input,
+ stream: false,
+ });
+
+ // Verify response has content
+ const outputText = getResponseOutputText(response).toLowerCase().trim();
+ expect(outputText.length).toBeGreaterThan(0);
+ expect(outputText).toContain(expected.toLowerCase());
+
+ // Verify usage is reported
+ expect(response.usage).toBeDefined();
+ expect(response.usage!.input_tokens).toBeGreaterThan(0);
+ expect(response.usage!.output_tokens).toBeGreaterThan(0);
+ expect(response.usage!.total_tokens).toBe(response.usage!.input_tokens + response.usage!.output_tokens);
+
+ // Verify stored response matches
+ const retrievedResponse = await client.responses.retrieve(response.id);
+ expect(getResponseOutputText(retrievedResponse)).toBe(getResponseOutputText(response));
+
+ // Test follow-up with previous_response_id
+ const nextResponse = await client.responses.create({
+ model: textModel,
+ input: 'Repeat your previous response in all caps.',
+ previous_response_id: response.id,
+ });
+ const nextOutputText = getResponseOutputText(nextResponse).trim();
+ expect(nextOutputText).toContain(expected.toUpperCase());
+ });
+
+ test.each(basicTestCases)('streaming basic response: $id', async ({ input, expected, testId }) => {
+ // Modify test_id for streaming variant
+ const streamingTestId = testId.replace(
+ 'test_response_non_streaming_basic',
+ 'test_response_streaming_basic',
+ );
+ const client = createTestClient(streamingTestId);
+ const textModel = requireTextModel();
+
+ // Create a streaming response
+ const stream = await client.responses.create({
+ model: textModel,
+ input,
+ stream: true,
+ });
+
+ const events: any[] = [];
+ let responseId = '';
+
+ for await (const chunk of stream) {
+ events.push(chunk);
+
+ if (chunk.type === 'response.created') {
+ // Verify response.created is the first event
+ expect(events.length).toBe(1);
+ expect(chunk.response.status).toBe('in_progress');
+ responseId = chunk.response.id;
+ } else if (chunk.type === 'response.completed') {
+ // Verify response.completed comes after response.created
+ expect(events.length).toBeGreaterThanOrEqual(2);
+ expect(chunk.response.status).toBe('completed');
+ expect(chunk.response.id).toBe(responseId);
+
+ // Verify content quality
+ const outputText = getResponseOutputText(chunk.response).toLowerCase().trim();
+ expect(outputText.length).toBeGreaterThan(0);
+ expect(outputText).toContain(expected.toLowerCase());
+
+ // Verify usage is reported
+ expect(chunk.response.usage).toBeDefined();
+ expect(chunk.response.usage!.input_tokens).toBeGreaterThan(0);
+ expect(chunk.response.usage!.output_tokens).toBeGreaterThan(0);
+ expect(chunk.response.usage!.total_tokens).toBe(
+ chunk.response.usage!.input_tokens + chunk.response.usage!.output_tokens,
+ );
+ }
+ }
+
+ // Verify we got both events
+ expect(events.length).toBeGreaterThanOrEqual(2);
+ const firstEvent = events[0];
+ const lastEvent = events[events.length - 1];
+ expect(firstEvent.type).toBe('response.created');
+ expect(lastEvent.type).toBe('response.completed');
+
+ // Verify stored response matches streamed response
+ const retrievedResponse = await client.responses.retrieve(responseId);
+ expect(getResponseOutputText(retrievedResponse)).toBe(getResponseOutputText(lastEvent.response));
+ });
+});
diff --git a/tests/integration/client-typescript/jest.integration.config.js b/tests/integration/client-typescript/jest.integration.config.js
new file mode 100644
index 000000000..769bd177a
--- /dev/null
+++ b/tests/integration/client-typescript/jest.integration.config.js
@@ -0,0 +1,31 @@
+// Copyright (c) Meta Platforms, Inc. and affiliates.
+// All rights reserved.
+//
+// This source code is licensed under the terms described in the LICENSE file in
+// the root directory of this source tree.
+
+/** @type {import('ts-jest').JestConfigWithTsJest} */
+module.exports = {
+ preset: 'ts-jest/presets/default-esm',
+ testEnvironment: 'node',
+ extensionsToTreatAsEsm: ['.ts'],
+ moduleNameMapper: {
+ '^(\\.{1,2}/.*)\\.js$': '$1',
+ },
+ transform: {
+ '^.+\\.tsx?$': [
+ 'ts-jest',
+ {
+ useESM: true,
+ tsconfig: {
+ module: 'ES2022',
+ moduleResolution: 'bundler',
+ },
+ },
+ ],
+ },
+ testMatch: ['/__tests__/**/*.test.ts'],
+ setupFilesAfterEnv: ['/setup.ts'],
+ testTimeout: 60000, // 60 seconds (integration tests can be slow)
+ watchman: false, // Disable watchman to avoid permission issues
+};
diff --git a/tests/integration/client-typescript/package-lock.json b/tests/integration/client-typescript/package-lock.json
new file mode 100644
index 000000000..f118a07e3
--- /dev/null
+++ b/tests/integration/client-typescript/package-lock.json
@@ -0,0 +1,5507 @@
+{
+ "name": "llama-stack-typescript-integration-tests",
+ "version": "0.0.1",
+ "lockfileVersion": 3,
+ "requires": true,
+ "packages": {
+ "": {
+ "name": "llama-stack-typescript-integration-tests",
+ "version": "0.0.1",
+ "dependencies": {
+ "llama-stack-client": "^0.3.2"
+ },
+ "devDependencies": {
+ "@swc/core": "^1.3.102",
+ "@swc/jest": "^0.2.29",
+ "@types/jest": "^29.4.0",
+ "@types/node": "^20.0.0",
+ "jest": "^29.4.0",
+ "ts-jest": "^29.1.0",
+ "typescript": "^5.0.0"
+ }
+ },
+ "node_modules/@babel/code-frame": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz",
+ "integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-validator-identifier": "^7.27.1",
+ "js-tokens": "^4.0.0",
+ "picocolors": "^1.1.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/compat-data": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.28.5.tgz",
+ "integrity": "sha512-6uFXyCayocRbqhZOB+6XcuZbkMNimwfVGFji8CTZnCzOHVGvDqzvitu1re2AU5LROliz7eQPhB8CpAMvnx9EjA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/core": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.28.5.tgz",
+ "integrity": "sha512-e7jT4DxYvIDLk1ZHmU/m/mB19rex9sv0c2ftBtjSBv+kVM/902eh0fINUzD7UwLLNR+jU585GxUJ8/EBfAM5fw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/code-frame": "^7.27.1",
+ "@babel/generator": "^7.28.5",
+ "@babel/helper-compilation-targets": "^7.27.2",
+ "@babel/helper-module-transforms": "^7.28.3",
+ "@babel/helpers": "^7.28.4",
+ "@babel/parser": "^7.28.5",
+ "@babel/template": "^7.27.2",
+ "@babel/traverse": "^7.28.5",
+ "@babel/types": "^7.28.5",
+ "@jridgewell/remapping": "^2.3.5",
+ "convert-source-map": "^2.0.0",
+ "debug": "^4.1.0",
+ "gensync": "^1.0.0-beta.2",
+ "json5": "^2.2.3",
+ "semver": "^6.3.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/babel"
+ }
+ },
+ "node_modules/@babel/generator": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.28.5.tgz",
+ "integrity": "sha512-3EwLFhZ38J4VyIP6WNtt2kUdW9dokXA9Cr4IVIFHuCpZ3H8/YFOl5JjZHisrn1fATPBmKKqXzDFvh9fUwHz6CQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/parser": "^7.28.5",
+ "@babel/types": "^7.28.5",
+ "@jridgewell/gen-mapping": "^0.3.12",
+ "@jridgewell/trace-mapping": "^0.3.28",
+ "jsesc": "^3.0.2"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-compilation-targets": {
+ "version": "7.27.2",
+ "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.27.2.tgz",
+ "integrity": "sha512-2+1thGUUWWjLTYTHZWK1n8Yga0ijBz1XAhUXcKy81rd5g6yh7hGqMp45v7cadSbEHc9G3OTv45SyneRN3ps4DQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/compat-data": "^7.27.2",
+ "@babel/helper-validator-option": "^7.27.1",
+ "browserslist": "^4.24.0",
+ "lru-cache": "^5.1.1",
+ "semver": "^6.3.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-globals": {
+ "version": "7.28.0",
+ "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz",
+ "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-module-imports": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.27.1.tgz",
+ "integrity": "sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/traverse": "^7.27.1",
+ "@babel/types": "^7.27.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-module-transforms": {
+ "version": "7.28.3",
+ "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.28.3.tgz",
+ "integrity": "sha512-gytXUbs8k2sXS9PnQptz5o0QnpLL51SwASIORY6XaBKF88nsOT0Zw9szLqlSGQDP/4TljBAD5y98p2U1fqkdsw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-module-imports": "^7.27.1",
+ "@babel/helper-validator-identifier": "^7.27.1",
+ "@babel/traverse": "^7.28.3"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0"
+ }
+ },
+ "node_modules/@babel/helper-plugin-utils": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.27.1.tgz",
+ "integrity": "sha512-1gn1Up5YXka3YYAHGKpbideQ5Yjf1tDa9qYcgysz+cNCXukyLl6DjPXhD3VRwSb8c0J9tA4b2+rHEZtc6R0tlw==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-string-parser": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz",
+ "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-validator-identifier": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz",
+ "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-validator-option": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz",
+ "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helpers": {
+ "version": "7.28.4",
+ "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.28.4.tgz",
+ "integrity": "sha512-HFN59MmQXGHVyYadKLVumYsA9dBFun/ldYxipEjzA4196jpLZd8UjEEBLkbEkvfYreDqJhZxYAWFPtrfhNpj4w==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/template": "^7.27.2",
+ "@babel/types": "^7.28.4"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/parser": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.28.5.tgz",
+ "integrity": "sha512-KKBU1VGYR7ORr3At5HAtUQ+TV3SzRCXmA/8OdDZiLDBIZxVyzXuztPjfLd3BV1PRAQGCMWWSHYhL0F8d5uHBDQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/types": "^7.28.5"
+ },
+ "bin": {
+ "parser": "bin/babel-parser.js"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-async-generators": {
+ "version": "7.8.4",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz",
+ "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-bigint": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-bigint/-/plugin-syntax-bigint-7.8.3.tgz",
+ "integrity": "sha512-wnTnFlG+YxQm3vDxpGE57Pj0srRU4sHE/mDkt1qv2YJJSeUAec2ma4WLUnUPeKjyrfntVwe/N6dCXpU+zL3Npg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-class-properties": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz",
+ "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.12.13"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-class-static-block": {
+ "version": "7.14.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz",
+ "integrity": "sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.14.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-import-attributes": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.27.1.tgz",
+ "integrity": "sha512-oFT0FrKHgF53f4vOsZGi2Hh3I35PfSmVs4IBFLFj4dnafP+hIWDLg3VyKmUHfLoLHlyxY4C7DGtmHuJgn+IGww==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.27.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-import-meta": {
+ "version": "7.10.4",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz",
+ "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.10.4"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-json-strings": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz",
+ "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-jsx": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.27.1.tgz",
+ "integrity": "sha512-y8YTNIeKoyhGd9O0Jiyzyyqk8gdjnumGTQPsz0xOZOQ2RmkVJeZ1vmmfIvFEKqucBG6axJGBZDE/7iI5suUI/w==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.27.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-logical-assignment-operators": {
+ "version": "7.10.4",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz",
+ "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.10.4"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-nullish-coalescing-operator": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz",
+ "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-numeric-separator": {
+ "version": "7.10.4",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz",
+ "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.10.4"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-object-rest-spread": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz",
+ "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-optional-catch-binding": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz",
+ "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-optional-chaining": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz",
+ "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-private-property-in-object": {
+ "version": "7.14.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz",
+ "integrity": "sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.14.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-top-level-await": {
+ "version": "7.14.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz",
+ "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.14.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-typescript": {
+ "version": "7.27.1",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.27.1.tgz",
+ "integrity": "sha512-xfYCBMxveHrRMnAWl1ZlPXOZjzkN82THFvLhQhFXFt81Z5HnN+EtUkZhv/zcKpmT3fzmWZB0ywiBrbC3vogbwQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.27.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/template": {
+ "version": "7.27.2",
+ "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz",
+ "integrity": "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/code-frame": "^7.27.1",
+ "@babel/parser": "^7.27.2",
+ "@babel/types": "^7.27.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/traverse": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.28.5.tgz",
+ "integrity": "sha512-TCCj4t55U90khlYkVV/0TfkJkAkUg3jZFA3Neb7unZT8CPok7iiRfaX0F+WnqWqt7OxhOn0uBKXCw4lbL8W0aQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/code-frame": "^7.27.1",
+ "@babel/generator": "^7.28.5",
+ "@babel/helper-globals": "^7.28.0",
+ "@babel/parser": "^7.28.5",
+ "@babel/template": "^7.27.2",
+ "@babel/types": "^7.28.5",
+ "debug": "^4.3.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/types": {
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.28.5.tgz",
+ "integrity": "sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-string-parser": "^7.27.1",
+ "@babel/helper-validator-identifier": "^7.28.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@bcoe/v8-coverage": {
+ "version": "0.2.3",
+ "resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz",
+ "integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@istanbuljs/load-nyc-config": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz",
+ "integrity": "sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ==",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "camelcase": "^5.3.1",
+ "find-up": "^4.1.0",
+ "get-package-type": "^0.1.0",
+ "js-yaml": "^3.13.1",
+ "resolve-from": "^5.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/@istanbuljs/schema": {
+ "version": "0.1.3",
+ "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz",
+ "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/@jest/console": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/@jest/console/-/console-29.7.0.tgz",
+ "integrity": "sha512-5Ni4CU7XHQi32IJ398EEP4RrB8eV09sXP2ROqD4bksHrnTree52PsxvX8tpL8LvTZ3pFzXyPbNQReSN41CAhOg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/types": "^29.6.3",
+ "@types/node": "*",
+ "chalk": "^4.0.0",
+ "jest-message-util": "^29.7.0",
+ "jest-util": "^29.7.0",
+ "slash": "^3.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/console/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/console/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/console/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@jest/core": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/@jest/core/-/core-29.7.0.tgz",
+ "integrity": "sha512-n7aeXWKMnGtDA48y8TLWJPJmLmmZ642Ceo78cYWEpiD7FzDgmNDV/GCVRorPABdXLJZ/9wzzgZAlHjXjxDHGsg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/console": "^29.7.0",
+ "@jest/reporters": "^29.7.0",
+ "@jest/test-result": "^29.7.0",
+ "@jest/transform": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "@types/node": "*",
+ "ansi-escapes": "^4.2.1",
+ "chalk": "^4.0.0",
+ "ci-info": "^3.2.0",
+ "exit": "^0.1.2",
+ "graceful-fs": "^4.2.9",
+ "jest-changed-files": "^29.7.0",
+ "jest-config": "^29.7.0",
+ "jest-haste-map": "^29.7.0",
+ "jest-message-util": "^29.7.0",
+ "jest-regex-util": "^29.6.3",
+ "jest-resolve": "^29.7.0",
+ "jest-resolve-dependencies": "^29.7.0",
+ "jest-runner": "^29.7.0",
+ "jest-runtime": "^29.7.0",
+ "jest-snapshot": "^29.7.0",
+ "jest-util": "^29.7.0",
+ "jest-validate": "^29.7.0",
+ "jest-watcher": "^29.7.0",
+ "micromatch": "^4.0.4",
+ "pretty-format": "^29.7.0",
+ "slash": "^3.0.0",
+ "strip-ansi": "^6.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ },
+ "peerDependencies": {
+ "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0"
+ },
+ "peerDependenciesMeta": {
+ "node-notifier": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@jest/core/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/core/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/core/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@jest/core/node_modules/jest-regex-util": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-29.6.3.tgz",
+ "integrity": "sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/create-cache-key-function": {
+ "version": "30.2.0",
+ "resolved": "https://registry.npmjs.org/@jest/create-cache-key-function/-/create-cache-key-function-30.2.0.tgz",
+ "integrity": "sha512-44F4l4Enf+MirJN8X/NhdGkl71k5rBYiwdVlo4HxOwbu0sHV8QKrGEedb1VUU4K3W7fBKE0HGfbn7eZm0Ti3zg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/types": "30.2.0"
+ },
+ "engines": {
+ "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0"
+ }
+ },
+ "node_modules/@jest/environment": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/@jest/environment/-/environment-29.7.0.tgz",
+ "integrity": "sha512-aQIfHDq33ExsN4jP1NWGXhxgQ/wixs60gDiKO+XVMd8Mn0NWPWgc34ZQDTb2jKaUWQ7MuwoitXAsN2XVXNMpAw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/fake-timers": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "@types/node": "*",
+ "jest-mock": "^29.7.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/environment/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/environment/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/environment/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@jest/expect": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/@jest/expect/-/expect-29.7.0.tgz",
+ "integrity": "sha512-8uMeAMycttpva3P1lBHB8VciS9V0XAr3GymPpipdyQXbBcuhkLQOSe8E/p92RyAdToS6ZD1tFkX+CkhoECE0dQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "expect": "^29.7.0",
+ "jest-snapshot": "^29.7.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/expect-utils": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/@jest/expect-utils/-/expect-utils-29.7.0.tgz",
+ "integrity": "sha512-GlsNBWiFQFCVi9QVSx7f5AgMeLxe9YCCs5PuP2O2LdjDAA8Jh9eX7lA1Jq/xdXw3Wb3hyvlFNfZIfcRetSzYcA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "jest-get-type": "^29.6.3"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/fake-timers": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/@jest/fake-timers/-/fake-timers-29.7.0.tgz",
+ "integrity": "sha512-q4DH1Ha4TTFPdxLsqDXK1d3+ioSL7yL5oCMJZgDYm6i+6CygW5E5xVr/D1HdsGxjt1ZWSfUAs9OxSB/BNelWrQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/types": "^29.6.3",
+ "@sinonjs/fake-timers": "^10.0.2",
+ "@types/node": "*",
+ "jest-message-util": "^29.7.0",
+ "jest-mock": "^29.7.0",
+ "jest-util": "^29.7.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/fake-timers/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/fake-timers/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/fake-timers/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@jest/globals": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/@jest/globals/-/globals-29.7.0.tgz",
+ "integrity": "sha512-mpiz3dutLbkW2MNFubUGUEVLkTGiqW6yLVTA+JbP6fI6J5iL9Y0Nlg8k95pcF8ctKwCS7WVxteBs29hhfAotzQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/environment": "^29.7.0",
+ "@jest/expect": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "jest-mock": "^29.7.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/globals/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/globals/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/globals/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@jest/pattern": {
+ "version": "30.0.1",
+ "resolved": "https://registry.npmjs.org/@jest/pattern/-/pattern-30.0.1.tgz",
+ "integrity": "sha512-gWp7NfQW27LaBQz3TITS8L7ZCQ0TLvtmI//4OwlQRx4rnWxcPNIYjxZpDcN4+UlGxgm3jS5QPz8IPTCkb59wZA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@types/node": "*",
+ "jest-regex-util": "30.0.1"
+ },
+ "engines": {
+ "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0"
+ }
+ },
+ "node_modules/@jest/reporters": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/@jest/reporters/-/reporters-29.7.0.tgz",
+ "integrity": "sha512-DApq0KJbJOEzAFYjHADNNxAE3KbhxQB1y5Kplb5Waqw6zVbuWatSnMjE5gs8FUgEPmNsnZA3NCWl9NG0ia04Pg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@bcoe/v8-coverage": "^0.2.3",
+ "@jest/console": "^29.7.0",
+ "@jest/test-result": "^29.7.0",
+ "@jest/transform": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "@jridgewell/trace-mapping": "^0.3.18",
+ "@types/node": "*",
+ "chalk": "^4.0.0",
+ "collect-v8-coverage": "^1.0.0",
+ "exit": "^0.1.2",
+ "glob": "^7.1.3",
+ "graceful-fs": "^4.2.9",
+ "istanbul-lib-coverage": "^3.0.0",
+ "istanbul-lib-instrument": "^6.0.0",
+ "istanbul-lib-report": "^3.0.0",
+ "istanbul-lib-source-maps": "^4.0.0",
+ "istanbul-reports": "^3.1.3",
+ "jest-message-util": "^29.7.0",
+ "jest-util": "^29.7.0",
+ "jest-worker": "^29.7.0",
+ "slash": "^3.0.0",
+ "string-length": "^4.0.1",
+ "strip-ansi": "^6.0.0",
+ "v8-to-istanbul": "^9.0.1"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ },
+ "peerDependencies": {
+ "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0"
+ },
+ "peerDependenciesMeta": {
+ "node-notifier": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@jest/reporters/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/reporters/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/reporters/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@jest/schemas": {
+ "version": "30.0.5",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-30.0.5.tgz",
+ "integrity": "sha512-DmdYgtezMkh3cpU8/1uyXakv3tJRcmcXxBOcO0tbaozPwpmh4YMsnWrQm9ZmZMfa5ocbxzbFk6O4bDPEc/iAnA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.34.0"
+ },
+ "engines": {
+ "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0"
+ }
+ },
+ "node_modules/@jest/source-map": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/source-map/-/source-map-29.6.3.tgz",
+ "integrity": "sha512-MHjT95QuipcPrpLM+8JMSzFx6eHp5Bm+4XeFDJlwsvVBjmKNiIAvasGK2fxz2WbGRlnvqehFbh07MMa7n3YJnw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jridgewell/trace-mapping": "^0.3.18",
+ "callsites": "^3.0.0",
+ "graceful-fs": "^4.2.9"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/test-result": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/@jest/test-result/-/test-result-29.7.0.tgz",
+ "integrity": "sha512-Fdx+tv6x1zlkJPcWXmMDAG2HBnaR9XPSd5aDWQVsfrZmLVT3lU1cwyxLgRmXR9yrq4NBoEm9BMsfgFzTQAbJYA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/console": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "collect-v8-coverage": "^1.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/test-result/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/test-result/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/test-result/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@jest/test-sequencer": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/@jest/test-sequencer/-/test-sequencer-29.7.0.tgz",
+ "integrity": "sha512-GQwJ5WZVrKnOJuiYiAF52UNUJXgTZx1NHjFSEB0qEMmSZKAkdMoIzw/Cj6x6NF4AvV23AUqDpFzQkN/eYCYTxw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/test-result": "^29.7.0",
+ "graceful-fs": "^4.2.9",
+ "jest-haste-map": "^29.7.0",
+ "slash": "^3.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/transform": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/@jest/transform/-/transform-29.7.0.tgz",
+ "integrity": "sha512-ok/BTPFzFKVMwO5eOHRrvnBVHdRy9IrsrW1GpMaQ9MCnilNLXQKmAX8s1YXDFaai9xJpac2ySzV0YeRRECr2Vw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/core": "^7.11.6",
+ "@jest/types": "^29.6.3",
+ "@jridgewell/trace-mapping": "^0.3.18",
+ "babel-plugin-istanbul": "^6.1.1",
+ "chalk": "^4.0.0",
+ "convert-source-map": "^2.0.0",
+ "fast-json-stable-stringify": "^2.1.0",
+ "graceful-fs": "^4.2.9",
+ "jest-haste-map": "^29.7.0",
+ "jest-regex-util": "^29.6.3",
+ "jest-util": "^29.7.0",
+ "micromatch": "^4.0.4",
+ "pirates": "^4.0.4",
+ "slash": "^3.0.0",
+ "write-file-atomic": "^4.0.2"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/transform/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/transform/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/transform/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@jest/transform/node_modules/jest-regex-util": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-29.6.3.tgz",
+ "integrity": "sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/types": {
+ "version": "30.2.0",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-30.2.0.tgz",
+ "integrity": "sha512-H9xg1/sfVvyfU7o3zMfBEjQ1gcsdeTMgqHoYdN79tuLqfTtuu7WckRA1R5whDwOzxaZAeMKTYWqP+WCAi0CHsg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/pattern": "30.0.1",
+ "@jest/schemas": "30.0.5",
+ "@types/istanbul-lib-coverage": "^2.0.6",
+ "@types/istanbul-reports": "^3.0.4",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.33",
+ "chalk": "^4.1.2"
+ },
+ "engines": {
+ "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0"
+ }
+ },
+ "node_modules/@jridgewell/gen-mapping": {
+ "version": "0.3.13",
+ "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz",
+ "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jridgewell/sourcemap-codec": "^1.5.0",
+ "@jridgewell/trace-mapping": "^0.3.24"
+ }
+ },
+ "node_modules/@jridgewell/remapping": {
+ "version": "2.3.5",
+ "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz",
+ "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jridgewell/gen-mapping": "^0.3.5",
+ "@jridgewell/trace-mapping": "^0.3.24"
+ }
+ },
+ "node_modules/@jridgewell/resolve-uri": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz",
+ "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@jridgewell/sourcemap-codec": {
+ "version": "1.5.5",
+ "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz",
+ "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@jridgewell/trace-mapping": {
+ "version": "0.3.31",
+ "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz",
+ "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jridgewell/resolve-uri": "^3.1.0",
+ "@jridgewell/sourcemap-codec": "^1.4.14"
+ }
+ },
+ "node_modules/@sinclair/typebox": {
+ "version": "0.34.41",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.34.41.tgz",
+ "integrity": "sha512-6gS8pZzSXdyRHTIqoqSVknxolr1kzfy4/CeDnrzsVz8TTIWUbOBr6gnzOmTYJ3eXQNh4IYHIGi5aIL7sOZ2G/g==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@sinonjs/commons": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/@sinonjs/commons/-/commons-3.0.1.tgz",
+ "integrity": "sha512-K3mCHKQ9sVh8o1C9cxkwxaOmXoAMlDxC1mYyHrjqOWEcBjYr76t96zL2zlj5dUGZ3HSw240X1qgH3Mjf1yJWpQ==",
+ "dev": true,
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "type-detect": "4.0.8"
+ }
+ },
+ "node_modules/@sinonjs/fake-timers": {
+ "version": "10.3.0",
+ "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-10.3.0.tgz",
+ "integrity": "sha512-V4BG07kuYSUkTCSBHG8G8TNhM+F19jXFWnQtzj+we8DrkpSBCee9Z3Ms8yiGer/dlmhe35/Xdgyo3/0rQKg7YA==",
+ "dev": true,
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "@sinonjs/commons": "^3.0.0"
+ }
+ },
+ "node_modules/@swc/core": {
+ "version": "1.15.2",
+ "resolved": "https://registry.npmjs.org/@swc/core/-/core-1.15.2.tgz",
+ "integrity": "sha512-OQm+yJdXxvSjqGeaWhP6Ia264ogifwAO7Q12uTDVYj/Ks4jBTI4JknlcjDRAXtRhqbWsfbZyK/5RtuIPyptk3w==",
+ "dev": true,
+ "hasInstallScript": true,
+ "license": "Apache-2.0",
+ "dependencies": {
+ "@swc/counter": "^0.1.3",
+ "@swc/types": "^0.1.25"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/swc"
+ },
+ "optionalDependencies": {
+ "@swc/core-darwin-arm64": "1.15.2",
+ "@swc/core-darwin-x64": "1.15.2",
+ "@swc/core-linux-arm-gnueabihf": "1.15.2",
+ "@swc/core-linux-arm64-gnu": "1.15.2",
+ "@swc/core-linux-arm64-musl": "1.15.2",
+ "@swc/core-linux-x64-gnu": "1.15.2",
+ "@swc/core-linux-x64-musl": "1.15.2",
+ "@swc/core-win32-arm64-msvc": "1.15.2",
+ "@swc/core-win32-ia32-msvc": "1.15.2",
+ "@swc/core-win32-x64-msvc": "1.15.2"
+ },
+ "peerDependencies": {
+ "@swc/helpers": ">=0.5.17"
+ },
+ "peerDependenciesMeta": {
+ "@swc/helpers": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@swc/core-darwin-arm64": {
+ "version": "1.15.2",
+ "resolved": "https://registry.npmjs.org/@swc/core-darwin-arm64/-/core-darwin-arm64-1.15.2.tgz",
+ "integrity": "sha512-Ghyz4RJv4zyXzrUC1B2MLQBbppIB5c4jMZJybX2ebdEQAvryEKp3gq1kBksCNsatKGmEgXul88SETU19sMWcrw==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "Apache-2.0 AND MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/@swc/core-darwin-x64": {
+ "version": "1.15.2",
+ "resolved": "https://registry.npmjs.org/@swc/core-darwin-x64/-/core-darwin-x64-1.15.2.tgz",
+ "integrity": "sha512-7n/PGJOcL2QoptzL42L5xFFfXY5rFxLHnuz1foU+4ruUTG8x2IebGhtwVTpaDN8ShEv2UZObBlT1rrXTba15Zw==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "Apache-2.0 AND MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/@swc/core-linux-arm-gnueabihf": {
+ "version": "1.15.2",
+ "resolved": "https://registry.npmjs.org/@swc/core-linux-arm-gnueabihf/-/core-linux-arm-gnueabihf-1.15.2.tgz",
+ "integrity": "sha512-ZUQVCfRJ9wimuxkStRSlLwqX4TEDmv6/J+E6FicGkQ6ssLMWoKDy0cAo93HiWt/TWEee5vFhFaSQYzCuBEGO6A==",
+ "cpu": [
+ "arm"
+ ],
+ "dev": true,
+ "license": "Apache-2.0",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/@swc/core-linux-arm64-gnu": {
+ "version": "1.15.2",
+ "resolved": "https://registry.npmjs.org/@swc/core-linux-arm64-gnu/-/core-linux-arm64-gnu-1.15.2.tgz",
+ "integrity": "sha512-GZh3pYBmfnpQ+JIg+TqLuz+pM+Mjsk5VOzi8nwKn/m+GvQBsxD5ectRtxuWUxMGNG8h0lMy4SnHRqdK3/iJl7A==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "Apache-2.0 AND MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/@swc/core-linux-arm64-musl": {
+ "version": "1.15.2",
+ "resolved": "https://registry.npmjs.org/@swc/core-linux-arm64-musl/-/core-linux-arm64-musl-1.15.2.tgz",
+ "integrity": "sha512-5av6VYZZeneiYIodwzGMlnyVakpuYZryGzFIbgu1XP8wVylZxduEzup4eP8atiMDFmIm+s4wn8GySJmYqeJC0A==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "Apache-2.0 AND MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/@swc/core-linux-x64-gnu": {
+ "version": "1.15.2",
+ "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-gnu/-/core-linux-x64-gnu-1.15.2.tgz",
+ "integrity": "sha512-1nO/UfdCLuT/uE/7oB3EZgTeZDCIa6nL72cFEpdegnqpJVNDI6Qb8U4g/4lfVPkmHq2lvxQ0L+n+JdgaZLhrRA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "Apache-2.0 AND MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/@swc/core-linux-x64-musl": {
+ "version": "1.15.2",
+ "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-musl/-/core-linux-x64-musl-1.15.2.tgz",
+ "integrity": "sha512-Ksfrb0Tx310kr+TLiUOvB/I80lyZ3lSOp6cM18zmNRT/92NB4mW8oX2Jo7K4eVEI2JWyaQUAFubDSha2Q+439A==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "Apache-2.0 AND MIT",
+ "optional": true,
+ "os": [
+ "linux"
+ ],
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/@swc/core-win32-arm64-msvc": {
+ "version": "1.15.2",
+ "resolved": "https://registry.npmjs.org/@swc/core-win32-arm64-msvc/-/core-win32-arm64-msvc-1.15.2.tgz",
+ "integrity": "sha512-IzUb5RlMUY0r1A9IuJrQ7Tbts1wWb73/zXVXT8VhewbHGoNlBKE0qUhKMED6Tv4wDF+pmbtUJmKXDthytAvLmg==",
+ "cpu": [
+ "arm64"
+ ],
+ "dev": true,
+ "license": "Apache-2.0 AND MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/@swc/core-win32-ia32-msvc": {
+ "version": "1.15.2",
+ "resolved": "https://registry.npmjs.org/@swc/core-win32-ia32-msvc/-/core-win32-ia32-msvc-1.15.2.tgz",
+ "integrity": "sha512-kCATEzuY2LP9AlbU2uScjcVhgnCAkRdu62vbce17Ro5kxEHxYWcugkveyBRS3AqZGtwAKYbMAuNloer9LS/hpw==",
+ "cpu": [
+ "ia32"
+ ],
+ "dev": true,
+ "license": "Apache-2.0 AND MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/@swc/core-win32-x64-msvc": {
+ "version": "1.15.2",
+ "resolved": "https://registry.npmjs.org/@swc/core-win32-x64-msvc/-/core-win32-x64-msvc-1.15.2.tgz",
+ "integrity": "sha512-iJaHeYCF4jTn7OEKSa3KRiuVFIVYts8jYjNmCdyz1u5g8HRyTDISD76r8+ljEOgm36oviRQvcXaw6LFp1m0yyA==",
+ "cpu": [
+ "x64"
+ ],
+ "dev": true,
+ "license": "Apache-2.0 AND MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ],
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/@swc/counter": {
+ "version": "0.1.3",
+ "resolved": "https://registry.npmjs.org/@swc/counter/-/counter-0.1.3.tgz",
+ "integrity": "sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==",
+ "dev": true,
+ "license": "Apache-2.0"
+ },
+ "node_modules/@swc/jest": {
+ "version": "0.2.39",
+ "resolved": "https://registry.npmjs.org/@swc/jest/-/jest-0.2.39.tgz",
+ "integrity": "sha512-eyokjOwYd0Q8RnMHri+8/FS1HIrIUKK/sRrFp8c1dThUOfNeCWbLmBP1P5VsKdvmkd25JaH+OKYwEYiAYg9YAA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/create-cache-key-function": "^30.0.0",
+ "@swc/counter": "^0.1.3",
+ "jsonc-parser": "^3.2.0"
+ },
+ "engines": {
+ "npm": ">= 7.0.0"
+ },
+ "peerDependencies": {
+ "@swc/core": "*"
+ }
+ },
+ "node_modules/@swc/types": {
+ "version": "0.1.25",
+ "resolved": "https://registry.npmjs.org/@swc/types/-/types-0.1.25.tgz",
+ "integrity": "sha512-iAoY/qRhNH8a/hBvm3zKj9qQ4oc2+3w1unPJa2XvTK3XjeLXtzcCingVPw/9e5mn1+0yPqxcBGp9Jf0pkfMb1g==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "dependencies": {
+ "@swc/counter": "^0.1.3"
+ }
+ },
+ "node_modules/@types/babel__core": {
+ "version": "7.20.5",
+ "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz",
+ "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/parser": "^7.20.7",
+ "@babel/types": "^7.20.7",
+ "@types/babel__generator": "*",
+ "@types/babel__template": "*",
+ "@types/babel__traverse": "*"
+ }
+ },
+ "node_modules/@types/babel__generator": {
+ "version": "7.27.0",
+ "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.27.0.tgz",
+ "integrity": "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/types": "^7.0.0"
+ }
+ },
+ "node_modules/@types/babel__template": {
+ "version": "7.4.4",
+ "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz",
+ "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/parser": "^7.1.0",
+ "@babel/types": "^7.0.0"
+ }
+ },
+ "node_modules/@types/babel__traverse": {
+ "version": "7.28.0",
+ "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.28.0.tgz",
+ "integrity": "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/types": "^7.28.2"
+ }
+ },
+ "node_modules/@types/graceful-fs": {
+ "version": "4.1.9",
+ "resolved": "https://registry.npmjs.org/@types/graceful-fs/-/graceful-fs-4.1.9.tgz",
+ "integrity": "sha512-olP3sd1qOEe5dXTSaFvQG+02VdRXcdytWLAZsAq1PecU8uqQAhkrnbli7DagjtXKW/Bl7YJbUsa8MPcuc8LHEQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@types/node": "*"
+ }
+ },
+ "node_modules/@types/istanbul-lib-coverage": {
+ "version": "2.0.6",
+ "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.6.tgz",
+ "integrity": "sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@types/istanbul-lib-report": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.3.tgz",
+ "integrity": "sha512-NQn7AHQnk/RSLOxrBbGyJM/aVQ+pjj5HCgasFxc0K/KhoATfQ/47AyUl15I2yBUpihjmas+a+VJBOqecrFH+uA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@types/istanbul-lib-coverage": "*"
+ }
+ },
+ "node_modules/@types/istanbul-reports": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.4.tgz",
+ "integrity": "sha512-pk2B1NWalF9toCRu6gjBzR69syFjP4Od8WRAX+0mmf9lAjCRicLOWc+ZrxZHx/0XRjotgkF9t6iaMJ+aXcOdZQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@types/istanbul-lib-report": "*"
+ }
+ },
+ "node_modules/@types/jest": {
+ "version": "29.5.14",
+ "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.14.tgz",
+ "integrity": "sha512-ZN+4sdnLUbo8EVvVc2ao0GFW6oVrQRPn4K2lglySj7APvSrgzxHiNNK99us4WDMi57xxA2yggblIAMNhXOotLQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "expect": "^29.0.0",
+ "pretty-format": "^29.0.0"
+ }
+ },
+ "node_modules/@types/node": {
+ "version": "20.19.25",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-20.19.25.tgz",
+ "integrity": "sha512-ZsJzA5thDQMSQO788d7IocwwQbI8B5OPzmqNvpf3NY/+MHDAS759Wo0gd2WQeXYt5AAAQjzcrTVC6SKCuYgoCQ==",
+ "license": "MIT",
+ "dependencies": {
+ "undici-types": "~6.21.0"
+ }
+ },
+ "node_modules/@types/node-fetch": {
+ "version": "2.6.13",
+ "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.13.tgz",
+ "integrity": "sha512-QGpRVpzSaUs30JBSGPjOg4Uveu384erbHBoT1zeONvyCfwQxIkUshLAOqN/k9EjGviPRmWTTe6aH2qySWKTVSw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/node": "*",
+ "form-data": "^4.0.4"
+ }
+ },
+ "node_modules/@types/stack-utils": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/@types/stack-utils/-/stack-utils-2.0.3.tgz",
+ "integrity": "sha512-9aEbYZ3TbYMznPdcdr3SmIrLXwC/AKZXQeCf9Pgao5CKb8CyHuEX5jzWPTkvregvhRJHcpRO6BFoGW9ycaOkYw==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/@types/yargs": {
+ "version": "17.0.35",
+ "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.35.tgz",
+ "integrity": "sha512-qUHkeCyQFxMXg79wQfTtfndEC+N9ZZg76HJftDJp+qH2tV7Gj4OJi7l+PiWwJ+pWtW8GwSmqsDj/oymhrTWXjg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@types/yargs-parser": "*"
+ }
+ },
+ "node_modules/@types/yargs-parser": {
+ "version": "21.0.3",
+ "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.3.tgz",
+ "integrity": "sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/abort-controller": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz",
+ "integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==",
+ "license": "MIT",
+ "dependencies": {
+ "event-target-shim": "^5.0.0"
+ },
+ "engines": {
+ "node": ">=6.5"
+ }
+ },
+ "node_modules/agentkeepalive": {
+ "version": "4.6.0",
+ "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.6.0.tgz",
+ "integrity": "sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ==",
+ "license": "MIT",
+ "dependencies": {
+ "humanize-ms": "^1.2.1"
+ },
+ "engines": {
+ "node": ">= 8.0.0"
+ }
+ },
+ "node_modules/ansi-escapes": {
+ "version": "4.3.2",
+ "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz",
+ "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "type-fest": "^0.21.3"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/ansi-regex": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
+ "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/ansi-styles": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
+ "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "color-convert": "^2.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/ansi-styles?sponsor=1"
+ }
+ },
+ "node_modules/anymatch": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz",
+ "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "normalize-path": "^3.0.0",
+ "picomatch": "^2.0.4"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/argparse": {
+ "version": "1.0.10",
+ "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz",
+ "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "sprintf-js": "~1.0.2"
+ }
+ },
+ "node_modules/asynckit": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
+ "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==",
+ "license": "MIT"
+ },
+ "node_modules/babel-jest": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/babel-jest/-/babel-jest-29.7.0.tgz",
+ "integrity": "sha512-BrvGY3xZSwEcCzKvKsCi2GgHqDqsYkOP4/by5xCgIwGXQxIEh+8ew3gmrE1y7XRR6LHZIj6yLYnUi/mm2KXKBg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/transform": "^29.7.0",
+ "@types/babel__core": "^7.1.14",
+ "babel-plugin-istanbul": "^6.1.1",
+ "babel-preset-jest": "^29.6.3",
+ "chalk": "^4.0.0",
+ "graceful-fs": "^4.2.9",
+ "slash": "^3.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.8.0"
+ }
+ },
+ "node_modules/babel-plugin-istanbul": {
+ "version": "6.1.1",
+ "resolved": "https://registry.npmjs.org/babel-plugin-istanbul/-/babel-plugin-istanbul-6.1.1.tgz",
+ "integrity": "sha512-Y1IQok9821cC9onCx5otgFfRm7Lm+I+wwxOx738M/WLPZ9Q42m4IG5W0FNX8WLL2gYMZo3JkuXIH2DOpWM+qwA==",
+ "dev": true,
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.0.0",
+ "@istanbuljs/load-nyc-config": "^1.0.0",
+ "@istanbuljs/schema": "^0.1.2",
+ "istanbul-lib-instrument": "^5.0.4",
+ "test-exclude": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/babel-plugin-istanbul/node_modules/istanbul-lib-instrument": {
+ "version": "5.2.1",
+ "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-5.2.1.tgz",
+ "integrity": "sha512-pzqtp31nLv/XFOzXGuvhCb8qhjmTVo5vjVk19XE4CRlSWz0KoeJ3bw9XsA7nOp9YBf4qHjwBxkDzKcME/J29Yg==",
+ "dev": true,
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "@babel/core": "^7.12.3",
+ "@babel/parser": "^7.14.7",
+ "@istanbuljs/schema": "^0.1.2",
+ "istanbul-lib-coverage": "^3.2.0",
+ "semver": "^6.3.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/babel-plugin-jest-hoist": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/babel-plugin-jest-hoist/-/babel-plugin-jest-hoist-29.6.3.tgz",
+ "integrity": "sha512-ESAc/RJvGTFEzRwOTT4+lNDk/GNHMkKbNzsvT0qKRfDyyYTskxB5rnU2njIDYVxXCBHHEI1c0YwHob3WaYujOg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/template": "^7.3.3",
+ "@babel/types": "^7.3.3",
+ "@types/babel__core": "^7.1.14",
+ "@types/babel__traverse": "^7.0.6"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/babel-preset-current-node-syntax": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/babel-preset-current-node-syntax/-/babel-preset-current-node-syntax-1.2.0.tgz",
+ "integrity": "sha512-E/VlAEzRrsLEb2+dv8yp3bo4scof3l9nR4lrld+Iy5NyVqgVYUJnDAmunkhPMisRI32Qc4iRiz425d8vM++2fg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/plugin-syntax-async-generators": "^7.8.4",
+ "@babel/plugin-syntax-bigint": "^7.8.3",
+ "@babel/plugin-syntax-class-properties": "^7.12.13",
+ "@babel/plugin-syntax-class-static-block": "^7.14.5",
+ "@babel/plugin-syntax-import-attributes": "^7.24.7",
+ "@babel/plugin-syntax-import-meta": "^7.10.4",
+ "@babel/plugin-syntax-json-strings": "^7.8.3",
+ "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4",
+ "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3",
+ "@babel/plugin-syntax-numeric-separator": "^7.10.4",
+ "@babel/plugin-syntax-object-rest-spread": "^7.8.3",
+ "@babel/plugin-syntax-optional-catch-binding": "^7.8.3",
+ "@babel/plugin-syntax-optional-chaining": "^7.8.3",
+ "@babel/plugin-syntax-private-property-in-object": "^7.14.5",
+ "@babel/plugin-syntax-top-level-await": "^7.14.5"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0 || ^8.0.0-0"
+ }
+ },
+ "node_modules/babel-preset-jest": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/babel-preset-jest/-/babel-preset-jest-29.6.3.tgz",
+ "integrity": "sha512-0B3bhxR6snWXJZtR/RliHTDPRgn1sNHOR0yVtq/IiQFyuOVjFS+wuio/R4gSNkyYmKmJB4wGZv2NZanmKmTnNA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "babel-plugin-jest-hoist": "^29.6.3",
+ "babel-preset-current-node-syntax": "^1.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0"
+ }
+ },
+ "node_modules/balanced-match": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
+ "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/baseline-browser-mapping": {
+ "version": "2.8.29",
+ "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.8.29.tgz",
+ "integrity": "sha512-sXdt2elaVnhpDNRDz+1BDx1JQoJRuNk7oVlAlbGiFkLikHCAQiccexF/9e91zVi6RCgqspl04aP+6Cnl9zRLrA==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "bin": {
+ "baseline-browser-mapping": "dist/cli.js"
+ }
+ },
+ "node_modules/brace-expansion": {
+ "version": "1.1.12",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz",
+ "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "balanced-match": "^1.0.0",
+ "concat-map": "0.0.1"
+ }
+ },
+ "node_modules/braces": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz",
+ "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "fill-range": "^7.1.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/browserslist": {
+ "version": "4.28.0",
+ "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.0.tgz",
+ "integrity": "sha512-tbydkR/CxfMwelN0vwdP/pLkDwyAASZ+VfWm4EOwlB6SWhx1sYnWLqo8N5j0rAzPfzfRaxt0mM/4wPU/Su84RQ==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/browserslist"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "baseline-browser-mapping": "^2.8.25",
+ "caniuse-lite": "^1.0.30001754",
+ "electron-to-chromium": "^1.5.249",
+ "node-releases": "^2.0.27",
+ "update-browserslist-db": "^1.1.4"
+ },
+ "bin": {
+ "browserslist": "cli.js"
+ },
+ "engines": {
+ "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7"
+ }
+ },
+ "node_modules/bs-logger": {
+ "version": "0.2.6",
+ "resolved": "https://registry.npmjs.org/bs-logger/-/bs-logger-0.2.6.tgz",
+ "integrity": "sha512-pd8DCoxmbgc7hyPKOvxtqNcjYoOsABPQdcCUjGp3d42VR2CX1ORhk2A87oqqu5R1kk+76nsxZupkmyd+MVtCog==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "fast-json-stable-stringify": "2.x"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/bser": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/bser/-/bser-2.1.1.tgz",
+ "integrity": "sha512-gQxTNE/GAfIIrmHLUE3oJyp5FO6HRBfhjnw4/wMmA63ZGDJnWBmgY/lyQBpnDUkGmAhbSe39tx2d/iTOAfglwQ==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "dependencies": {
+ "node-int64": "^0.4.0"
+ }
+ },
+ "node_modules/buffer-from": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz",
+ "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/call-bind-apply-helpers": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz",
+ "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==",
+ "license": "MIT",
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "function-bind": "^1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/callsites": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz",
+ "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/camelcase": {
+ "version": "5.3.1",
+ "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz",
+ "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/caniuse-lite": {
+ "version": "1.0.30001755",
+ "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001755.tgz",
+ "integrity": "sha512-44V+Jm6ctPj7R52Na4TLi3Zri4dWUljJd+RDm+j8LtNCc/ihLCT+X1TzoOAkRETEWqjuLnh9581Tl80FvK7jVA==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/caniuse-lite"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "CC-BY-4.0"
+ },
+ "node_modules/chalk": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz",
+ "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "ansi-styles": "^4.1.0",
+ "supports-color": "^7.1.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/chalk?sponsor=1"
+ }
+ },
+ "node_modules/char-regex": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/char-regex/-/char-regex-1.0.2.tgz",
+ "integrity": "sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/ci-info": {
+ "version": "3.9.0",
+ "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz",
+ "integrity": "sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/sibiraj-s"
+ }
+ ],
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/cjs-module-lexer": {
+ "version": "1.4.3",
+ "resolved": "https://registry.npmjs.org/cjs-module-lexer/-/cjs-module-lexer-1.4.3.tgz",
+ "integrity": "sha512-9z8TZaGM1pfswYeXrUpzPrkx8UnWYdhJclsiYMm6x/w5+nN+8Tf/LnAgfLGQCm59qAOxU8WwHEq2vNwF6i4j+Q==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/cliui": {
+ "version": "8.0.1",
+ "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz",
+ "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "string-width": "^4.2.0",
+ "strip-ansi": "^6.0.1",
+ "wrap-ansi": "^7.0.0"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/co": {
+ "version": "4.6.0",
+ "resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz",
+ "integrity": "sha512-QVb0dM5HvG+uaxitm8wONl7jltx8dqhfU33DcqtOZcLSVIKSDDLDi7+0LbAKiyI8hD9u42m2YxXSkMGWThaecQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "iojs": ">= 1.0.0",
+ "node": ">= 0.12.0"
+ }
+ },
+ "node_modules/collect-v8-coverage": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/collect-v8-coverage/-/collect-v8-coverage-1.0.3.tgz",
+ "integrity": "sha512-1L5aqIkwPfiodaMgQunkF1zRhNqifHBmtbbbxcr6yVxxBnliw4TDOW6NxpO8DJLgJ16OT+Y4ztZqP6p/FtXnAw==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/color-convert": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
+ "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "color-name": "~1.1.4"
+ },
+ "engines": {
+ "node": ">=7.0.0"
+ }
+ },
+ "node_modules/color-name": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
+ "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/combined-stream": {
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
+ "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==",
+ "license": "MIT",
+ "dependencies": {
+ "delayed-stream": "~1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/concat-map": {
+ "version": "0.0.1",
+ "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
+ "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/convert-source-map": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz",
+ "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/create-jest": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/create-jest/-/create-jest-29.7.0.tgz",
+ "integrity": "sha512-Adz2bdH0Vq3F53KEMJOoftQFutWCukm6J24wbPWRO4k1kMY7gS7ds/uoJkNuV8wDCtWWnuwGcJwpWcih+zEW1Q==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/types": "^29.6.3",
+ "chalk": "^4.0.0",
+ "exit": "^0.1.2",
+ "graceful-fs": "^4.2.9",
+ "jest-config": "^29.7.0",
+ "jest-util": "^29.7.0",
+ "prompts": "^2.0.1"
+ },
+ "bin": {
+ "create-jest": "bin/create-jest.js"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/create-jest/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/create-jest/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/create-jest/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/cross-spawn": {
+ "version": "7.0.6",
+ "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz",
+ "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "path-key": "^3.1.0",
+ "shebang-command": "^2.0.0",
+ "which": "^2.0.1"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/debug": {
+ "version": "4.4.3",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz",
+ "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "ms": "^2.1.3"
+ },
+ "engines": {
+ "node": ">=6.0"
+ },
+ "peerDependenciesMeta": {
+ "supports-color": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/dedent": {
+ "version": "1.7.0",
+ "resolved": "https://registry.npmjs.org/dedent/-/dedent-1.7.0.tgz",
+ "integrity": "sha512-HGFtf8yhuhGhqO07SV79tRp+br4MnbdjeVxotpn1QBl30pcLLCQjX5b2295ll0fv8RKDKsmWYrl05usHM9CewQ==",
+ "dev": true,
+ "license": "MIT",
+ "peerDependencies": {
+ "babel-plugin-macros": "^3.1.0"
+ },
+ "peerDependenciesMeta": {
+ "babel-plugin-macros": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/deepmerge": {
+ "version": "4.3.1",
+ "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz",
+ "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/delayed-stream": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
+ "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.4.0"
+ }
+ },
+ "node_modules/detect-newline": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/detect-newline/-/detect-newline-3.1.0.tgz",
+ "integrity": "sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/diff-sequences": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.6.3.tgz",
+ "integrity": "sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/dunder-proto": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz",
+ "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==",
+ "license": "MIT",
+ "dependencies": {
+ "call-bind-apply-helpers": "^1.0.1",
+ "es-errors": "^1.3.0",
+ "gopd": "^1.2.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/electron-to-chromium": {
+ "version": "1.5.255",
+ "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.255.tgz",
+ "integrity": "sha512-Z9oIp4HrFF/cZkDPMpz2XSuVpc1THDpT4dlmATFlJUIBVCy9Vap5/rIXsASP1CscBacBqhabwh8vLctqBwEerQ==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/emittery": {
+ "version": "0.13.1",
+ "resolved": "https://registry.npmjs.org/emittery/-/emittery-0.13.1.tgz",
+ "integrity": "sha512-DeWwawk6r5yR9jFgnDKYt4sLS0LmHJJi3ZOnb5/JdbYwj3nW+FxQnHIjhBKz8YLC7oRNPVM9NQ47I3CVx34eqQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sindresorhus/emittery?sponsor=1"
+ }
+ },
+ "node_modules/emoji-regex": {
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
+ "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/error-ex": {
+ "version": "1.3.4",
+ "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.4.tgz",
+ "integrity": "sha512-sqQamAnR14VgCr1A618A3sGrygcpK+HEbenA/HiEAkkUwcZIIB/tgWqHFxWgOyDh4nB4JCRimh79dR5Ywc9MDQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "is-arrayish": "^0.2.1"
+ }
+ },
+ "node_modules/es-define-property": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz",
+ "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-errors": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz",
+ "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-object-atoms": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz",
+ "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==",
+ "license": "MIT",
+ "dependencies": {
+ "es-errors": "^1.3.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-set-tostringtag": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz",
+ "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==",
+ "license": "MIT",
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "get-intrinsic": "^1.2.6",
+ "has-tostringtag": "^1.0.2",
+ "hasown": "^2.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/escalade": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz",
+ "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/escape-string-regexp": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz",
+ "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/esprima": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz",
+ "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==",
+ "dev": true,
+ "license": "BSD-2-Clause",
+ "bin": {
+ "esparse": "bin/esparse.js",
+ "esvalidate": "bin/esvalidate.js"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/event-target-shim": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz",
+ "integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/execa": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz",
+ "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "cross-spawn": "^7.0.3",
+ "get-stream": "^6.0.0",
+ "human-signals": "^2.1.0",
+ "is-stream": "^2.0.0",
+ "merge-stream": "^2.0.0",
+ "npm-run-path": "^4.0.1",
+ "onetime": "^5.1.2",
+ "signal-exit": "^3.0.3",
+ "strip-final-newline": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sindresorhus/execa?sponsor=1"
+ }
+ },
+ "node_modules/exit": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/exit/-/exit-0.1.2.tgz",
+ "integrity": "sha512-Zk/eNKV2zbjpKzrsQ+n1G6poVbErQxJ0LBOJXaKZ1EViLzH+hrLu9cdXI4zw9dBQJslwBEpbQ2P1oS7nDxs6jQ==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/expect": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/expect/-/expect-29.7.0.tgz",
+ "integrity": "sha512-2Zks0hf1VLFYI1kbh0I5jP3KHHyCHpkfyHBzsSXRFgl/Bg9mWYfMW8oD+PdMPlEwy5HNsR9JutYy6pMeOh61nw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/expect-utils": "^29.7.0",
+ "jest-get-type": "^29.6.3",
+ "jest-matcher-utils": "^29.7.0",
+ "jest-message-util": "^29.7.0",
+ "jest-util": "^29.7.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/fast-json-stable-stringify": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz",
+ "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/fb-watchman": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/fb-watchman/-/fb-watchman-2.0.2.tgz",
+ "integrity": "sha512-p5161BqbuCaSnB8jIbzQHOlpgsPmK5rJVDfDKO91Axs5NC1uu3HRQm6wt9cd9/+GtQQIO53JdGXXoyDpTAsgYA==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "dependencies": {
+ "bser": "2.1.1"
+ }
+ },
+ "node_modules/fill-range": {
+ "version": "7.1.1",
+ "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz",
+ "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "to-regex-range": "^5.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/find-up": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz",
+ "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "locate-path": "^5.0.0",
+ "path-exists": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/form-data": {
+ "version": "4.0.5",
+ "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.5.tgz",
+ "integrity": "sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==",
+ "license": "MIT",
+ "dependencies": {
+ "asynckit": "^0.4.0",
+ "combined-stream": "^1.0.8",
+ "es-set-tostringtag": "^2.1.0",
+ "hasown": "^2.0.2",
+ "mime-types": "^2.1.12"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/form-data-encoder": {
+ "version": "1.7.2",
+ "resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-1.7.2.tgz",
+ "integrity": "sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==",
+ "license": "MIT"
+ },
+ "node_modules/formdata-node": {
+ "version": "4.4.1",
+ "resolved": "https://registry.npmjs.org/formdata-node/-/formdata-node-4.4.1.tgz",
+ "integrity": "sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==",
+ "license": "MIT",
+ "dependencies": {
+ "node-domexception": "1.0.0",
+ "web-streams-polyfill": "4.0.0-beta.3"
+ },
+ "engines": {
+ "node": ">= 12.20"
+ }
+ },
+ "node_modules/fs.realpath": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
+ "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/fsevents": {
+ "version": "2.3.3",
+ "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz",
+ "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==",
+ "dev": true,
+ "hasInstallScript": true,
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": "^8.16.0 || ^10.6.0 || >=11.0.0"
+ }
+ },
+ "node_modules/function-bind": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
+ "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
+ "license": "MIT",
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/gensync": {
+ "version": "1.0.0-beta.2",
+ "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz",
+ "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/get-caller-file": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz",
+ "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==",
+ "dev": true,
+ "license": "ISC",
+ "engines": {
+ "node": "6.* || 8.* || >= 10.*"
+ }
+ },
+ "node_modules/get-intrinsic": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz",
+ "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==",
+ "license": "MIT",
+ "dependencies": {
+ "call-bind-apply-helpers": "^1.0.2",
+ "es-define-property": "^1.0.1",
+ "es-errors": "^1.3.0",
+ "es-object-atoms": "^1.1.1",
+ "function-bind": "^1.1.2",
+ "get-proto": "^1.0.1",
+ "gopd": "^1.2.0",
+ "has-symbols": "^1.1.0",
+ "hasown": "^2.0.2",
+ "math-intrinsics": "^1.1.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/get-package-type": {
+ "version": "0.1.0",
+ "resolved": "https://registry.npmjs.org/get-package-type/-/get-package-type-0.1.0.tgz",
+ "integrity": "sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8.0.0"
+ }
+ },
+ "node_modules/get-proto": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz",
+ "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==",
+ "license": "MIT",
+ "dependencies": {
+ "dunder-proto": "^1.0.1",
+ "es-object-atoms": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/get-stream": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz",
+ "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/glob": {
+ "version": "7.2.3",
+ "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz",
+ "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==",
+ "deprecated": "Glob versions prior to v9 are no longer supported",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "fs.realpath": "^1.0.0",
+ "inflight": "^1.0.4",
+ "inherits": "2",
+ "minimatch": "^3.1.1",
+ "once": "^1.3.0",
+ "path-is-absolute": "^1.0.0"
+ },
+ "engines": {
+ "node": "*"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
+ }
+ },
+ "node_modules/gopd": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz",
+ "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/graceful-fs": {
+ "version": "4.2.11",
+ "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz",
+ "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/handlebars": {
+ "version": "4.7.8",
+ "resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.7.8.tgz",
+ "integrity": "sha512-vafaFqs8MZkRrSX7sFVUdo3ap/eNiLnb4IakshzvP56X5Nr1iGKAIqdX6tMlm6HcNRIkr6AxO5jFEoJzzpT8aQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "minimist": "^1.2.5",
+ "neo-async": "^2.6.2",
+ "source-map": "^0.6.1",
+ "wordwrap": "^1.0.0"
+ },
+ "bin": {
+ "handlebars": "bin/handlebars"
+ },
+ "engines": {
+ "node": ">=0.4.7"
+ },
+ "optionalDependencies": {
+ "uglify-js": "^3.1.4"
+ }
+ },
+ "node_modules/has-flag": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
+ "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/has-symbols": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz",
+ "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/has-tostringtag": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz",
+ "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==",
+ "license": "MIT",
+ "dependencies": {
+ "has-symbols": "^1.0.3"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/hasown": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
+ "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
+ "license": "MIT",
+ "dependencies": {
+ "function-bind": "^1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/html-escaper": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz",
+ "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/human-signals": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz",
+ "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "engines": {
+ "node": ">=10.17.0"
+ }
+ },
+ "node_modules/humanize-ms": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz",
+ "integrity": "sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==",
+ "license": "MIT",
+ "dependencies": {
+ "ms": "^2.0.0"
+ }
+ },
+ "node_modules/import-local": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/import-local/-/import-local-3.2.0.tgz",
+ "integrity": "sha512-2SPlun1JUPWoM6t3F0dw0FkCF/jWY8kttcY4f599GLTSjh2OCuuhdTkJQsEcZzBqbXZGKMK2OqW1oZsjtf/gQA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "pkg-dir": "^4.2.0",
+ "resolve-cwd": "^3.0.0"
+ },
+ "bin": {
+ "import-local-fixture": "fixtures/cli.js"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/imurmurhash": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz",
+ "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.8.19"
+ }
+ },
+ "node_modules/inflight": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
+ "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==",
+ "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "once": "^1.3.0",
+ "wrappy": "1"
+ }
+ },
+ "node_modules/inherits": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
+ "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/is-arrayish": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz",
+ "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/is-core-module": {
+ "version": "2.16.1",
+ "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz",
+ "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "hasown": "^2.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-fullwidth-code-point": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
+ "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/is-generator-fn": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/is-generator-fn/-/is-generator-fn-2.1.0.tgz",
+ "integrity": "sha512-cTIB4yPYL/Grw0EaSzASzg6bBy9gqCofvWN8okThAYIxKJZC+udlRAmGbM0XLeniEJSs8uEgHPGuHSe1XsOLSQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/is-number": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
+ "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.12.0"
+ }
+ },
+ "node_modules/is-stream": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz",
+ "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/isexe": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
+ "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/istanbul-lib-coverage": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.2.tgz",
+ "integrity": "sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==",
+ "dev": true,
+ "license": "BSD-3-Clause",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/istanbul-lib-instrument": {
+ "version": "6.0.3",
+ "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-6.0.3.tgz",
+ "integrity": "sha512-Vtgk7L/R2JHyyGW07spoFlB8/lpjiOLTjMdms6AFMraYt3BaJauod/NGrfnVG/y4Ix1JEuMRPDPEj2ua+zz1/Q==",
+ "dev": true,
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "@babel/core": "^7.23.9",
+ "@babel/parser": "^7.23.9",
+ "@istanbuljs/schema": "^0.1.3",
+ "istanbul-lib-coverage": "^3.2.0",
+ "semver": "^7.5.4"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/istanbul-lib-instrument/node_modules/semver": {
+ "version": "7.7.3",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.3.tgz",
+ "integrity": "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==",
+ "dev": true,
+ "license": "ISC",
+ "bin": {
+ "semver": "bin/semver.js"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/istanbul-lib-report": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz",
+ "integrity": "sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==",
+ "dev": true,
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "istanbul-lib-coverage": "^3.0.0",
+ "make-dir": "^4.0.0",
+ "supports-color": "^7.1.0"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/istanbul-lib-source-maps": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-4.0.1.tgz",
+ "integrity": "sha512-n3s8EwkdFIJCG3BPKBYvskgXGoy88ARzvegkitk60NxRdwltLOTaH7CUiMRXvwYorl0Q712iEjcWB+fK/MrWVw==",
+ "dev": true,
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "debug": "^4.1.1",
+ "istanbul-lib-coverage": "^3.0.0",
+ "source-map": "^0.6.1"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/istanbul-reports": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.2.0.tgz",
+ "integrity": "sha512-HGYWWS/ehqTV3xN10i23tkPkpH46MLCIMFNCaaKNavAXTF1RkqxawEPtnjnGZ6XKSInBKkiOA5BKS+aZiY3AvA==",
+ "dev": true,
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "html-escaper": "^2.0.0",
+ "istanbul-lib-report": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/jest": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest/-/jest-29.7.0.tgz",
+ "integrity": "sha512-NIy3oAFp9shda19hy4HK0HRTWKtPJmGdnvywu01nOqNC2vZg+Z+fvJDxpMQA88eb2I9EcafcdjYgsDthnYTvGw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/core": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "import-local": "^3.0.2",
+ "jest-cli": "^29.7.0"
+ },
+ "bin": {
+ "jest": "bin/jest.js"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ },
+ "peerDependencies": {
+ "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0"
+ },
+ "peerDependenciesMeta": {
+ "node-notifier": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/jest-changed-files": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-changed-files/-/jest-changed-files-29.7.0.tgz",
+ "integrity": "sha512-fEArFiwf1BpQ+4bXSprcDc3/x4HSzL4al2tozwVpDFpsxALjLYdyiIK4e5Vz66GQJIbXJ82+35PtysofptNX2w==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "execa": "^5.0.0",
+ "jest-util": "^29.7.0",
+ "p-limit": "^3.1.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-circus": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-circus/-/jest-circus-29.7.0.tgz",
+ "integrity": "sha512-3E1nCMgipcTkCocFwM90XXQab9bS+GMsjdpmPrlelaxwD93Ad8iVEjX/vvHPdLPnFf+L40u+5+iutRdA1N9myw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/environment": "^29.7.0",
+ "@jest/expect": "^29.7.0",
+ "@jest/test-result": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "@types/node": "*",
+ "chalk": "^4.0.0",
+ "co": "^4.6.0",
+ "dedent": "^1.0.0",
+ "is-generator-fn": "^2.0.0",
+ "jest-each": "^29.7.0",
+ "jest-matcher-utils": "^29.7.0",
+ "jest-message-util": "^29.7.0",
+ "jest-runtime": "^29.7.0",
+ "jest-snapshot": "^29.7.0",
+ "jest-util": "^29.7.0",
+ "p-limit": "^3.1.0",
+ "pretty-format": "^29.7.0",
+ "pure-rand": "^6.0.0",
+ "slash": "^3.0.0",
+ "stack-utils": "^2.0.3"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-circus/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-circus/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-circus/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/jest-cli": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-cli/-/jest-cli-29.7.0.tgz",
+ "integrity": "sha512-OVVobw2IubN/GSYsxETi+gOe7Ka59EFMR/twOU3Jb2GnKKeMGJB5SGUUrEz3SFVmJASUdZUzy83sLNNQ2gZslg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/core": "^29.7.0",
+ "@jest/test-result": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "chalk": "^4.0.0",
+ "create-jest": "^29.7.0",
+ "exit": "^0.1.2",
+ "import-local": "^3.0.2",
+ "jest-config": "^29.7.0",
+ "jest-util": "^29.7.0",
+ "jest-validate": "^29.7.0",
+ "yargs": "^17.3.1"
+ },
+ "bin": {
+ "jest": "bin/jest.js"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ },
+ "peerDependencies": {
+ "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0"
+ },
+ "peerDependenciesMeta": {
+ "node-notifier": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/jest-cli/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-cli/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-cli/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/jest-config": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-config/-/jest-config-29.7.0.tgz",
+ "integrity": "sha512-uXbpfeQ7R6TZBqI3/TxCU4q4ttk3u0PJeC+E0zbfSoSjq6bJ7buBPxzQPL0ifrkY4DNu4JUdk0ImlBUYi840eQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/core": "^7.11.6",
+ "@jest/test-sequencer": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "babel-jest": "^29.7.0",
+ "chalk": "^4.0.0",
+ "ci-info": "^3.2.0",
+ "deepmerge": "^4.2.2",
+ "glob": "^7.1.3",
+ "graceful-fs": "^4.2.9",
+ "jest-circus": "^29.7.0",
+ "jest-environment-node": "^29.7.0",
+ "jest-get-type": "^29.6.3",
+ "jest-regex-util": "^29.6.3",
+ "jest-resolve": "^29.7.0",
+ "jest-runner": "^29.7.0",
+ "jest-util": "^29.7.0",
+ "jest-validate": "^29.7.0",
+ "micromatch": "^4.0.4",
+ "parse-json": "^5.2.0",
+ "pretty-format": "^29.7.0",
+ "slash": "^3.0.0",
+ "strip-json-comments": "^3.1.1"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ },
+ "peerDependencies": {
+ "@types/node": "*",
+ "ts-node": ">=9.0.0"
+ },
+ "peerDependenciesMeta": {
+ "@types/node": {
+ "optional": true
+ },
+ "ts-node": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/jest-config/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-config/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-config/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/jest-config/node_modules/jest-regex-util": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-29.6.3.tgz",
+ "integrity": "sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-diff": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-diff/-/jest-diff-29.7.0.tgz",
+ "integrity": "sha512-LMIgiIrhigmPrs03JHpxUh2yISK3vLFPkAodPeo0+BuF7wA2FoQbkEg1u8gBYBThncu7e1oEDUfIXVuTqLRUjw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "chalk": "^4.0.0",
+ "diff-sequences": "^29.6.3",
+ "jest-get-type": "^29.6.3",
+ "pretty-format": "^29.7.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-docblock": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-docblock/-/jest-docblock-29.7.0.tgz",
+ "integrity": "sha512-q617Auw3A612guyaFgsbFeYpNP5t2aoUNLwBUbc/0kD1R4t9ixDbyFTHd1nok4epoVFpr7PmeWHrhvuV3XaJ4g==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "detect-newline": "^3.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-each": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-each/-/jest-each-29.7.0.tgz",
+ "integrity": "sha512-gns+Er14+ZrEoC5fhOfYCY1LOHHr0TI+rQUHZS8Ttw2l7gl+80eHc/gFf2Ktkw0+SIACDTeWvpFcv3B04VembQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/types": "^29.6.3",
+ "chalk": "^4.0.0",
+ "jest-get-type": "^29.6.3",
+ "jest-util": "^29.7.0",
+ "pretty-format": "^29.7.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-each/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-each/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-each/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/jest-environment-node": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-environment-node/-/jest-environment-node-29.7.0.tgz",
+ "integrity": "sha512-DOSwCRqXirTOyheM+4d5YZOrWcdu0LNZ87ewUoywbcb2XR4wKgqiG8vNeYwhjFMbEkfju7wx2GYH0P2gevGvFw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/environment": "^29.7.0",
+ "@jest/fake-timers": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "@types/node": "*",
+ "jest-mock": "^29.7.0",
+ "jest-util": "^29.7.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-environment-node/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-environment-node/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-environment-node/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/jest-get-type": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/jest-get-type/-/jest-get-type-29.6.3.tgz",
+ "integrity": "sha512-zrteXnqYxfQh7l5FHyL38jL39di8H8rHoecLH3JNxH3BwOrBsNeabdap5e0I23lD4HHI8W5VFBZqG4Eaq5LNcw==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-haste-map": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-haste-map/-/jest-haste-map-29.7.0.tgz",
+ "integrity": "sha512-fP8u2pyfqx0K1rGn1R9pyE0/KTn+G7PxktWidOBTqFPLYX0b9ksaMFkhK5vrS3DVun09pckLdlx90QthlW7AmA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/types": "^29.6.3",
+ "@types/graceful-fs": "^4.1.3",
+ "@types/node": "*",
+ "anymatch": "^3.0.3",
+ "fb-watchman": "^2.0.0",
+ "graceful-fs": "^4.2.9",
+ "jest-regex-util": "^29.6.3",
+ "jest-util": "^29.7.0",
+ "jest-worker": "^29.7.0",
+ "micromatch": "^4.0.4",
+ "walker": "^1.0.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ },
+ "optionalDependencies": {
+ "fsevents": "^2.3.2"
+ }
+ },
+ "node_modules/jest-haste-map/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-haste-map/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-haste-map/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/jest-haste-map/node_modules/jest-regex-util": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-29.6.3.tgz",
+ "integrity": "sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-leak-detector": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-leak-detector/-/jest-leak-detector-29.7.0.tgz",
+ "integrity": "sha512-kYA8IJcSYtST2BY9I+SMC32nDpBT3J2NvWJx8+JCuCdl/CR1I4EKUJROiP8XtCcxqgTTBGJNdbB1A8XRKbTetw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "jest-get-type": "^29.6.3",
+ "pretty-format": "^29.7.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-matcher-utils": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-matcher-utils/-/jest-matcher-utils-29.7.0.tgz",
+ "integrity": "sha512-sBkD+Xi9DtcChsI3L3u0+N0opgPYnCRPtGcQYrgXmR+hmt/fYfWAL0xRXYU8eWOdfuLgBe0YCW3AFtnRLagq/g==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "chalk": "^4.0.0",
+ "jest-diff": "^29.7.0",
+ "jest-get-type": "^29.6.3",
+ "pretty-format": "^29.7.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-message-util": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-29.7.0.tgz",
+ "integrity": "sha512-GBEV4GRADeP+qtB2+6u61stea8mGcOT4mCtrYISZwfu9/ISHFJ/5zOMXYbpBE9RsS5+Gb63DW4FgmnKJ79Kf6w==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/code-frame": "^7.12.13",
+ "@jest/types": "^29.6.3",
+ "@types/stack-utils": "^2.0.0",
+ "chalk": "^4.0.0",
+ "graceful-fs": "^4.2.9",
+ "micromatch": "^4.0.4",
+ "pretty-format": "^29.7.0",
+ "slash": "^3.0.0",
+ "stack-utils": "^2.0.3"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-message-util/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-message-util/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-message-util/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/jest-mock": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-mock/-/jest-mock-29.7.0.tgz",
+ "integrity": "sha512-ITOMZn+UkYS4ZFh83xYAOzWStloNzJFO2s8DWrE4lhtGD+AorgnbkiKERe4wQVBydIGPx059g6riW5Btp6Llnw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/types": "^29.6.3",
+ "@types/node": "*",
+ "jest-util": "^29.7.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-mock/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-mock/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-mock/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/jest-pnp-resolver": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/jest-pnp-resolver/-/jest-pnp-resolver-1.2.3.tgz",
+ "integrity": "sha512-+3NpwQEnRoIBtx4fyhblQDPgJI0H1IEIkX7ShLUjPGA7TtUTvI1oiKi3SR4oBR0hQhQR80l4WAe5RrXBwWMA8w==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ },
+ "peerDependencies": {
+ "jest-resolve": "*"
+ },
+ "peerDependenciesMeta": {
+ "jest-resolve": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/jest-regex-util": {
+ "version": "30.0.1",
+ "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-30.0.1.tgz",
+ "integrity": "sha512-jHEQgBXAgc+Gh4g0p3bCevgRCVRkB4VB70zhoAE48gxeSr1hfUOsM/C2WoJgVL7Eyg//hudYENbm3Ne+/dRVVA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": "^18.14.0 || ^20.0.0 || ^22.0.0 || >=24.0.0"
+ }
+ },
+ "node_modules/jest-resolve": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-resolve/-/jest-resolve-29.7.0.tgz",
+ "integrity": "sha512-IOVhZSrg+UvVAshDSDtHyFCCBUl/Q3AAJv8iZ6ZjnZ74xzvwuzLXid9IIIPgTnY62SJjfuupMKZsZQRsCvxEgA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "chalk": "^4.0.0",
+ "graceful-fs": "^4.2.9",
+ "jest-haste-map": "^29.7.0",
+ "jest-pnp-resolver": "^1.2.2",
+ "jest-util": "^29.7.0",
+ "jest-validate": "^29.7.0",
+ "resolve": "^1.20.0",
+ "resolve.exports": "^2.0.0",
+ "slash": "^3.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-resolve-dependencies": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-resolve-dependencies/-/jest-resolve-dependencies-29.7.0.tgz",
+ "integrity": "sha512-un0zD/6qxJ+S0et7WxeI3H5XSe9lTBBR7bOHCHXkKR6luG5mwDDlIzVQ0V5cZCuoTgEdcdwzTghYkTWfubi+nA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "jest-regex-util": "^29.6.3",
+ "jest-snapshot": "^29.7.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-resolve-dependencies/node_modules/jest-regex-util": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-29.6.3.tgz",
+ "integrity": "sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-runner": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-runner/-/jest-runner-29.7.0.tgz",
+ "integrity": "sha512-fsc4N6cPCAahybGBfTRcq5wFR6fpLznMg47sY5aDpsoejOcVYFb07AHuSnR0liMcPTgBsA3ZJL6kFOjPdoNipQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/console": "^29.7.0",
+ "@jest/environment": "^29.7.0",
+ "@jest/test-result": "^29.7.0",
+ "@jest/transform": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "@types/node": "*",
+ "chalk": "^4.0.0",
+ "emittery": "^0.13.1",
+ "graceful-fs": "^4.2.9",
+ "jest-docblock": "^29.7.0",
+ "jest-environment-node": "^29.7.0",
+ "jest-haste-map": "^29.7.0",
+ "jest-leak-detector": "^29.7.0",
+ "jest-message-util": "^29.7.0",
+ "jest-resolve": "^29.7.0",
+ "jest-runtime": "^29.7.0",
+ "jest-util": "^29.7.0",
+ "jest-watcher": "^29.7.0",
+ "jest-worker": "^29.7.0",
+ "p-limit": "^3.1.0",
+ "source-map-support": "0.5.13"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-runner/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-runner/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-runner/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/jest-runtime": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-runtime/-/jest-runtime-29.7.0.tgz",
+ "integrity": "sha512-gUnLjgwdGqW7B4LvOIkbKs9WGbn+QLqRQQ9juC6HndeDiezIwhDP+mhMwHWCEcfQ5RUXa6OPnFF8BJh5xegwwQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/environment": "^29.7.0",
+ "@jest/fake-timers": "^29.7.0",
+ "@jest/globals": "^29.7.0",
+ "@jest/source-map": "^29.6.3",
+ "@jest/test-result": "^29.7.0",
+ "@jest/transform": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "@types/node": "*",
+ "chalk": "^4.0.0",
+ "cjs-module-lexer": "^1.0.0",
+ "collect-v8-coverage": "^1.0.0",
+ "glob": "^7.1.3",
+ "graceful-fs": "^4.2.9",
+ "jest-haste-map": "^29.7.0",
+ "jest-message-util": "^29.7.0",
+ "jest-mock": "^29.7.0",
+ "jest-regex-util": "^29.6.3",
+ "jest-resolve": "^29.7.0",
+ "jest-snapshot": "^29.7.0",
+ "jest-util": "^29.7.0",
+ "slash": "^3.0.0",
+ "strip-bom": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-runtime/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-runtime/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-runtime/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/jest-runtime/node_modules/jest-regex-util": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-29.6.3.tgz",
+ "integrity": "sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-snapshot": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-snapshot/-/jest-snapshot-29.7.0.tgz",
+ "integrity": "sha512-Rm0BMWtxBcioHr1/OX5YCP8Uov4riHvKPknOGs804Zg9JGZgmIBkbtlxJC/7Z4msKYVbIJtfU+tKb8xlYNfdkw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/core": "^7.11.6",
+ "@babel/generator": "^7.7.2",
+ "@babel/plugin-syntax-jsx": "^7.7.2",
+ "@babel/plugin-syntax-typescript": "^7.7.2",
+ "@babel/types": "^7.3.3",
+ "@jest/expect-utils": "^29.7.0",
+ "@jest/transform": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "babel-preset-current-node-syntax": "^1.0.0",
+ "chalk": "^4.0.0",
+ "expect": "^29.7.0",
+ "graceful-fs": "^4.2.9",
+ "jest-diff": "^29.7.0",
+ "jest-get-type": "^29.6.3",
+ "jest-matcher-utils": "^29.7.0",
+ "jest-message-util": "^29.7.0",
+ "jest-util": "^29.7.0",
+ "natural-compare": "^1.4.0",
+ "pretty-format": "^29.7.0",
+ "semver": "^7.5.3"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-snapshot/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-snapshot/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-snapshot/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/jest-snapshot/node_modules/semver": {
+ "version": "7.7.3",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.3.tgz",
+ "integrity": "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==",
+ "dev": true,
+ "license": "ISC",
+ "bin": {
+ "semver": "bin/semver.js"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/jest-util": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz",
+ "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/types": "^29.6.3",
+ "@types/node": "*",
+ "chalk": "^4.0.0",
+ "ci-info": "^3.2.0",
+ "graceful-fs": "^4.2.9",
+ "picomatch": "^2.2.3"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-util/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-util/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-util/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/jest-validate": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-validate/-/jest-validate-29.7.0.tgz",
+ "integrity": "sha512-ZB7wHqaRGVw/9hST/OuFUReG7M8vKeq0/J2egIGLdvjHCmYqGARhzXmtgi+gVeZ5uXFF219aOc3Ls2yLg27tkw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/types": "^29.6.3",
+ "camelcase": "^6.2.0",
+ "chalk": "^4.0.0",
+ "jest-get-type": "^29.6.3",
+ "leven": "^3.1.0",
+ "pretty-format": "^29.7.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-validate/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-validate/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-validate/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/jest-validate/node_modules/camelcase": {
+ "version": "6.3.0",
+ "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz",
+ "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/jest-watcher": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-watcher/-/jest-watcher-29.7.0.tgz",
+ "integrity": "sha512-49Fg7WXkU3Vl2h6LbLtMQ/HyB6rXSIX7SqvBLQmssRBGN9I0PNvPmAmCWSOY6SOvrjhI/F7/bGAv9RtnsPA03g==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/test-result": "^29.7.0",
+ "@jest/types": "^29.6.3",
+ "@types/node": "*",
+ "ansi-escapes": "^4.2.1",
+ "chalk": "^4.0.0",
+ "emittery": "^0.13.1",
+ "jest-util": "^29.7.0",
+ "string-length": "^4.0.1"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-watcher/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-watcher/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-watcher/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/jest-worker": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.7.0.tgz",
+ "integrity": "sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@types/node": "*",
+ "jest-util": "^29.7.0",
+ "merge-stream": "^2.0.0",
+ "supports-color": "^8.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-worker/node_modules/supports-color": {
+ "version": "8.1.1",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz",
+ "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "has-flag": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/supports-color?sponsor=1"
+ }
+ },
+ "node_modules/jest/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest/node_modules/@jest/types": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz",
+ "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/js-tokens": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
+ "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/js-yaml": {
+ "version": "3.14.2",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.2.tgz",
+ "integrity": "sha512-PMSmkqxr106Xa156c2M265Z+FTrPl+oxd/rgOQy2tijQeK5TxQ43psO1ZCwhVOSdnn+RzkzlRz/eY4BgJBYVpg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "argparse": "^1.0.7",
+ "esprima": "^4.0.0"
+ },
+ "bin": {
+ "js-yaml": "bin/js-yaml.js"
+ }
+ },
+ "node_modules/jsesc": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz",
+ "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==",
+ "dev": true,
+ "license": "MIT",
+ "bin": {
+ "jsesc": "bin/jsesc"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/json-parse-even-better-errors": {
+ "version": "2.3.1",
+ "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz",
+ "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/json5": {
+ "version": "2.2.3",
+ "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz",
+ "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==",
+ "dev": true,
+ "license": "MIT",
+ "bin": {
+ "json5": "lib/cli.js"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/jsonc-parser": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/jsonc-parser/-/jsonc-parser-3.3.1.tgz",
+ "integrity": "sha512-HUgH65KyejrUFPvHFPbqOY0rsFip3Bo5wb4ngvdi1EpCYWUQDC5V+Y7mZws+DLkr4M//zQJoanu1SP+87Dv1oQ==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/kleur": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz",
+ "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/leven": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz",
+ "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/lines-and-columns": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz",
+ "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/llama-stack-client": {
+ "version": "0.3.2",
+ "resolved": "https://registry.npmjs.org/llama-stack-client/-/llama-stack-client-0.3.2.tgz",
+ "integrity": "sha512-vzcnIN6k3sp7dhMXSnyrPSd82ACH/H3snj2uF6DgZwZCacKQNp2Y5XIT5qZZgoM1EUXbaxdVYFCeWD9yNCwatw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/node": "^18.11.18",
+ "@types/node-fetch": "^2.6.4",
+ "abort-controller": "^3.0.0",
+ "agentkeepalive": "^4.2.1",
+ "form-data-encoder": "1.7.2",
+ "formdata-node": "^4.3.2",
+ "node-fetch": "^2.6.7"
+ }
+ },
+ "node_modules/llama-stack-client/node_modules/@types/node": {
+ "version": "18.19.130",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.130.tgz",
+ "integrity": "sha512-GRaXQx6jGfL8sKfaIDD6OupbIHBr9jv7Jnaml9tB7l4v068PAOXqfcujMMo5PhbIs6ggR1XODELqahT2R8v0fg==",
+ "license": "MIT",
+ "dependencies": {
+ "undici-types": "~5.26.4"
+ }
+ },
+ "node_modules/llama-stack-client/node_modules/undici-types": {
+ "version": "5.26.5",
+ "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
+ "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==",
+ "license": "MIT"
+ },
+ "node_modules/locate-path": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz",
+ "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "p-locate": "^4.1.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/lodash.memoize": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz",
+ "integrity": "sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/lru-cache": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz",
+ "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "yallist": "^3.0.2"
+ }
+ },
+ "node_modules/make-dir": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-4.0.0.tgz",
+ "integrity": "sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "semver": "^7.5.3"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/make-dir/node_modules/semver": {
+ "version": "7.7.3",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.3.tgz",
+ "integrity": "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==",
+ "dev": true,
+ "license": "ISC",
+ "bin": {
+ "semver": "bin/semver.js"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/make-error": {
+ "version": "1.3.6",
+ "resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz",
+ "integrity": "sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/makeerror": {
+ "version": "1.0.12",
+ "resolved": "https://registry.npmjs.org/makeerror/-/makeerror-1.0.12.tgz",
+ "integrity": "sha512-JmqCvUhmt43madlpFzG4BQzG2Z3m6tvQDNKdClZnO3VbIudJYmxsT0FNJMeiB2+JTSlTQTSbU8QdesVmwJcmLg==",
+ "dev": true,
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "tmpl": "1.0.5"
+ }
+ },
+ "node_modules/math-intrinsics": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz",
+ "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/merge-stream": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz",
+ "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/micromatch": {
+ "version": "4.0.8",
+ "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz",
+ "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "braces": "^3.0.3",
+ "picomatch": "^2.3.1"
+ },
+ "engines": {
+ "node": ">=8.6"
+ }
+ },
+ "node_modules/mime-db": {
+ "version": "1.52.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
+ "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/mime-types": {
+ "version": "2.1.35",
+ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
+ "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
+ "license": "MIT",
+ "dependencies": {
+ "mime-db": "1.52.0"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/mimic-fn": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz",
+ "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/minimatch": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
+ "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "brace-expansion": "^1.1.7"
+ },
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/minimist": {
+ "version": "1.2.8",
+ "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz",
+ "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==",
+ "dev": true,
+ "license": "MIT",
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/ms": {
+ "version": "2.1.3",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
+ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
+ "license": "MIT"
+ },
+ "node_modules/natural-compare": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz",
+ "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/neo-async": {
+ "version": "2.6.2",
+ "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz",
+ "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/node-domexception": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/node-domexception/-/node-domexception-1.0.0.tgz",
+ "integrity": "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==",
+ "deprecated": "Use your platform's native DOMException instead",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/jimmywarting"
+ },
+ {
+ "type": "github",
+ "url": "https://paypal.me/jimmywarting"
+ }
+ ],
+ "license": "MIT",
+ "engines": {
+ "node": ">=10.5.0"
+ }
+ },
+ "node_modules/node-fetch": {
+ "version": "2.7.0",
+ "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz",
+ "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==",
+ "license": "MIT",
+ "dependencies": {
+ "whatwg-url": "^5.0.0"
+ },
+ "engines": {
+ "node": "4.x || >=6.0.0"
+ },
+ "peerDependencies": {
+ "encoding": "^0.1.0"
+ },
+ "peerDependenciesMeta": {
+ "encoding": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/node-int64": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/node-int64/-/node-int64-0.4.0.tgz",
+ "integrity": "sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/node-releases": {
+ "version": "2.0.27",
+ "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.27.tgz",
+ "integrity": "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/normalize-path": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz",
+ "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/npm-run-path": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz",
+ "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "path-key": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/once": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
+ "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "wrappy": "1"
+ }
+ },
+ "node_modules/onetime": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz",
+ "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "mimic-fn": "^2.1.0"
+ },
+ "engines": {
+ "node": ">=6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/p-limit": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz",
+ "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "yocto-queue": "^0.1.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/p-locate": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz",
+ "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "p-limit": "^2.2.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/p-locate/node_modules/p-limit": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz",
+ "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "p-try": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/p-try": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz",
+ "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/parse-json": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz",
+ "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@babel/code-frame": "^7.0.0",
+ "error-ex": "^1.3.1",
+ "json-parse-even-better-errors": "^2.3.0",
+ "lines-and-columns": "^1.1.6"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/path-exists": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz",
+ "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/path-is-absolute": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
+ "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/path-key": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz",
+ "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/path-parse": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz",
+ "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/picocolors": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz",
+ "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/picomatch": {
+ "version": "2.3.1",
+ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
+ "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8.6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/jonschlinkert"
+ }
+ },
+ "node_modules/pirates": {
+ "version": "4.0.7",
+ "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.7.tgz",
+ "integrity": "sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/pkg-dir": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz",
+ "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "find-up": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/pretty-format": {
+ "version": "29.7.0",
+ "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz",
+ "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@jest/schemas": "^29.6.3",
+ "ansi-styles": "^5.0.0",
+ "react-is": "^18.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/pretty-format/node_modules/@jest/schemas": {
+ "version": "29.6.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz",
+ "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@sinclair/typebox": "^0.27.8"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/pretty-format/node_modules/@sinclair/typebox": {
+ "version": "0.27.8",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
+ "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/pretty-format/node_modules/ansi-styles": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz",
+ "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/ansi-styles?sponsor=1"
+ }
+ },
+ "node_modules/prompts": {
+ "version": "2.4.2",
+ "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz",
+ "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "kleur": "^3.0.3",
+ "sisteransi": "^1.0.5"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/pure-rand": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/pure-rand/-/pure-rand-6.1.0.tgz",
+ "integrity": "sha512-bVWawvoZoBYpp6yIoQtQXHZjmz35RSVHnUOTefl8Vcjr8snTPY1wnpSPMWekcFwbxI6gtmT7rSYPFvz71ldiOA==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "individual",
+ "url": "https://github.com/sponsors/dubzzz"
+ },
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/fast-check"
+ }
+ ],
+ "license": "MIT"
+ },
+ "node_modules/react-is": {
+ "version": "18.3.1",
+ "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz",
+ "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/require-directory": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz",
+ "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/resolve": {
+ "version": "1.22.11",
+ "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.11.tgz",
+ "integrity": "sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "is-core-module": "^2.16.1",
+ "path-parse": "^1.0.7",
+ "supports-preserve-symlinks-flag": "^1.0.0"
+ },
+ "bin": {
+ "resolve": "bin/resolve"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/resolve-cwd": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/resolve-cwd/-/resolve-cwd-3.0.0.tgz",
+ "integrity": "sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "resolve-from": "^5.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/resolve-from": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz",
+ "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/resolve.exports": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/resolve.exports/-/resolve.exports-2.0.3.tgz",
+ "integrity": "sha512-OcXjMsGdhL4XnbShKpAcSqPMzQoYkYyhbEaeSko47MjRP9NfEQMhZkXL1DoFlt9LWQn4YttrdnV6X2OiyzBi+A==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/semver": {
+ "version": "6.3.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
+ "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
+ "dev": true,
+ "license": "ISC",
+ "bin": {
+ "semver": "bin/semver.js"
+ }
+ },
+ "node_modules/shebang-command": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz",
+ "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "shebang-regex": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/shebang-regex": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz",
+ "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/signal-exit": {
+ "version": "3.0.7",
+ "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz",
+ "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/sisteransi": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz",
+ "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/slash": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz",
+ "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/source-map": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+ "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
+ "dev": true,
+ "license": "BSD-3-Clause",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/source-map-support": {
+ "version": "0.5.13",
+ "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.13.tgz",
+ "integrity": "sha512-SHSKFHadjVA5oR4PPqhtAVdcBWwRYVd6g6cAXnIbRiIwc2EhPrTuKUBdSLvlEKyIP3GCf89fltvcZiP9MMFA1w==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "buffer-from": "^1.0.0",
+ "source-map": "^0.6.0"
+ }
+ },
+ "node_modules/sprintf-js": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz",
+ "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==",
+ "dev": true,
+ "license": "BSD-3-Clause"
+ },
+ "node_modules/stack-utils": {
+ "version": "2.0.6",
+ "resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-2.0.6.tgz",
+ "integrity": "sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "escape-string-regexp": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/string-length": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/string-length/-/string-length-4.0.2.tgz",
+ "integrity": "sha512-+l6rNN5fYHNhZZy41RXsYptCjA2Igmq4EG7kZAYFQI1E1VTXarr6ZPXBg6eq7Y6eK4FEhY6AJlyuFIb/v/S0VQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "char-regex": "^1.0.2",
+ "strip-ansi": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/string-width": {
+ "version": "4.2.3",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
+ "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "emoji-regex": "^8.0.0",
+ "is-fullwidth-code-point": "^3.0.0",
+ "strip-ansi": "^6.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/strip-ansi": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
+ "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "ansi-regex": "^5.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/strip-bom": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-4.0.0.tgz",
+ "integrity": "sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/strip-final-newline": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz",
+ "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/strip-json-comments": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz",
+ "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/supports-color": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
+ "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "has-flag": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/supports-preserve-symlinks-flag": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz",
+ "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/test-exclude": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-6.0.0.tgz",
+ "integrity": "sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "@istanbuljs/schema": "^0.1.2",
+ "glob": "^7.1.4",
+ "minimatch": "^3.0.4"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/tmpl": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/tmpl/-/tmpl-1.0.5.tgz",
+ "integrity": "sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==",
+ "dev": true,
+ "license": "BSD-3-Clause"
+ },
+ "node_modules/to-regex-range": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
+ "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "is-number": "^7.0.0"
+ },
+ "engines": {
+ "node": ">=8.0"
+ }
+ },
+ "node_modules/tr46": {
+ "version": "0.0.3",
+ "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz",
+ "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==",
+ "license": "MIT"
+ },
+ "node_modules/ts-jest": {
+ "version": "29.4.5",
+ "resolved": "https://registry.npmjs.org/ts-jest/-/ts-jest-29.4.5.tgz",
+ "integrity": "sha512-HO3GyiWn2qvTQA4kTgjDcXiMwYQt68a1Y8+JuLRVpdIzm+UOLSHgl/XqR4c6nzJkq5rOkjc02O2I7P7l/Yof0Q==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "bs-logger": "^0.2.6",
+ "fast-json-stable-stringify": "^2.1.0",
+ "handlebars": "^4.7.8",
+ "json5": "^2.2.3",
+ "lodash.memoize": "^4.1.2",
+ "make-error": "^1.3.6",
+ "semver": "^7.7.3",
+ "type-fest": "^4.41.0",
+ "yargs-parser": "^21.1.1"
+ },
+ "bin": {
+ "ts-jest": "cli.js"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || ^18.0.0 || >=20.0.0"
+ },
+ "peerDependencies": {
+ "@babel/core": ">=7.0.0-beta.0 <8",
+ "@jest/transform": "^29.0.0 || ^30.0.0",
+ "@jest/types": "^29.0.0 || ^30.0.0",
+ "babel-jest": "^29.0.0 || ^30.0.0",
+ "jest": "^29.0.0 || ^30.0.0",
+ "jest-util": "^29.0.0 || ^30.0.0",
+ "typescript": ">=4.3 <6"
+ },
+ "peerDependenciesMeta": {
+ "@babel/core": {
+ "optional": true
+ },
+ "@jest/transform": {
+ "optional": true
+ },
+ "@jest/types": {
+ "optional": true
+ },
+ "babel-jest": {
+ "optional": true
+ },
+ "esbuild": {
+ "optional": true
+ },
+ "jest-util": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/ts-jest/node_modules/semver": {
+ "version": "7.7.3",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.3.tgz",
+ "integrity": "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==",
+ "dev": true,
+ "license": "ISC",
+ "bin": {
+ "semver": "bin/semver.js"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/ts-jest/node_modules/type-fest": {
+ "version": "4.41.0",
+ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.41.0.tgz",
+ "integrity": "sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA==",
+ "dev": true,
+ "license": "(MIT OR CC0-1.0)",
+ "engines": {
+ "node": ">=16"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/type-detect": {
+ "version": "4.0.8",
+ "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz",
+ "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/type-fest": {
+ "version": "0.21.3",
+ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz",
+ "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==",
+ "dev": true,
+ "license": "(MIT OR CC0-1.0)",
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/typescript": {
+ "version": "5.9.3",
+ "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz",
+ "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "bin": {
+ "tsc": "bin/tsc",
+ "tsserver": "bin/tsserver"
+ },
+ "engines": {
+ "node": ">=14.17"
+ }
+ },
+ "node_modules/uglify-js": {
+ "version": "3.19.3",
+ "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.19.3.tgz",
+ "integrity": "sha512-v3Xu+yuwBXisp6QYTcH4UbH+xYJXqnq2m/LtQVWKWzYc1iehYnLixoQDN9FH6/j9/oybfd6W9Ghwkl8+UMKTKQ==",
+ "dev": true,
+ "license": "BSD-2-Clause",
+ "optional": true,
+ "bin": {
+ "uglifyjs": "bin/uglifyjs"
+ },
+ "engines": {
+ "node": ">=0.8.0"
+ }
+ },
+ "node_modules/undici-types": {
+ "version": "6.21.0",
+ "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz",
+ "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==",
+ "license": "MIT"
+ },
+ "node_modules/update-browserslist-db": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.4.tgz",
+ "integrity": "sha512-q0SPT4xyU84saUX+tomz1WLkxUbuaJnR1xWt17M7fJtEJigJeWUNGUqrauFXsHnqev9y9JTRGwk13tFBuKby4A==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/browserslist"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "escalade": "^3.2.0",
+ "picocolors": "^1.1.1"
+ },
+ "bin": {
+ "update-browserslist-db": "cli.js"
+ },
+ "peerDependencies": {
+ "browserslist": ">= 4.21.0"
+ }
+ },
+ "node_modules/v8-to-istanbul": {
+ "version": "9.3.0",
+ "resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-9.3.0.tgz",
+ "integrity": "sha512-kiGUalWN+rgBJ/1OHZsBtU4rXZOfj/7rKQxULKlIzwzQSvMJUUNgPwJEEh7gU6xEVxC0ahoOBvN2YI8GH6FNgA==",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "@jridgewell/trace-mapping": "^0.3.12",
+ "@types/istanbul-lib-coverage": "^2.0.1",
+ "convert-source-map": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=10.12.0"
+ }
+ },
+ "node_modules/walker": {
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/walker/-/walker-1.0.8.tgz",
+ "integrity": "sha512-ts/8E8l5b7kY0vlWLewOkDXMmPdLcVV4GmOQLyxuSswIJsweeFZtAsMF7k1Nszz+TYBQrlYRmzOnr398y1JemQ==",
+ "dev": true,
+ "license": "Apache-2.0",
+ "dependencies": {
+ "makeerror": "1.0.12"
+ }
+ },
+ "node_modules/web-streams-polyfill": {
+ "version": "4.0.0-beta.3",
+ "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-4.0.0-beta.3.tgz",
+ "integrity": "sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 14"
+ }
+ },
+ "node_modules/webidl-conversions": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz",
+ "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==",
+ "license": "BSD-2-Clause"
+ },
+ "node_modules/whatwg-url": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz",
+ "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==",
+ "license": "MIT",
+ "dependencies": {
+ "tr46": "~0.0.3",
+ "webidl-conversions": "^3.0.0"
+ }
+ },
+ "node_modules/which": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
+ "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "isexe": "^2.0.0"
+ },
+ "bin": {
+ "node-which": "bin/node-which"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/wordwrap": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-1.0.0.tgz",
+ "integrity": "sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/wrap-ansi": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
+ "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "ansi-styles": "^4.0.0",
+ "string-width": "^4.1.0",
+ "strip-ansi": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/wrap-ansi?sponsor=1"
+ }
+ },
+ "node_modules/wrappy": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
+ "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/write-file-atomic": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-4.0.2.tgz",
+ "integrity": "sha512-7KxauUdBmSdWnmpaGFg+ppNjKF8uNLry8LyzjauQDOVONfFLNKrKvQOxZ/VuTIcS/gge/YNahf5RIIQWTSarlg==",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "imurmurhash": "^0.1.4",
+ "signal-exit": "^3.0.7"
+ },
+ "engines": {
+ "node": "^12.13.0 || ^14.15.0 || >=16.0.0"
+ }
+ },
+ "node_modules/y18n": {
+ "version": "5.0.8",
+ "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz",
+ "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==",
+ "dev": true,
+ "license": "ISC",
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/yallist": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz",
+ "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==",
+ "dev": true,
+ "license": "ISC"
+ },
+ "node_modules/yargs": {
+ "version": "17.7.2",
+ "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz",
+ "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "cliui": "^8.0.1",
+ "escalade": "^3.1.1",
+ "get-caller-file": "^2.0.5",
+ "require-directory": "^2.1.1",
+ "string-width": "^4.2.3",
+ "y18n": "^5.0.5",
+ "yargs-parser": "^21.1.1"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/yargs-parser": {
+ "version": "21.1.1",
+ "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz",
+ "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==",
+ "dev": true,
+ "license": "ISC",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/yocto-queue": {
+ "version": "0.1.0",
+ "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz",
+ "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ }
+ }
+}
diff --git a/tests/integration/client-typescript/package.json b/tests/integration/client-typescript/package.json
new file mode 100644
index 000000000..e5fe1b8f5
--- /dev/null
+++ b/tests/integration/client-typescript/package.json
@@ -0,0 +1,18 @@
+{
+ "name": "llama-stack-typescript-integration-tests",
+ "version": "0.0.1",
+ "private": true,
+ "description": "TypeScript client integration tests for Llama Stack",
+ "scripts": {
+ "test": "node run-tests.js"
+ },
+ "devDependencies": {
+ "@swc/core": "^1.3.102",
+ "@swc/jest": "^0.2.29",
+ "@types/jest": "^29.4.0",
+ "@types/node": "^20.0.0",
+ "jest": "^29.4.0",
+ "ts-jest": "^29.1.0",
+ "typescript": "^5.0.0"
+ }
+}
diff --git a/tests/integration/client-typescript/run-tests.js b/tests/integration/client-typescript/run-tests.js
new file mode 100755
index 000000000..93df5d8a0
--- /dev/null
+++ b/tests/integration/client-typescript/run-tests.js
@@ -0,0 +1,63 @@
+#!/usr/bin/env node
+// Copyright (c) Meta Platforms, Inc. and affiliates.
+// All rights reserved.
+//
+// This source code is licensed under the terms described in the LICENSE file in
+// the root directory of this source tree.
+
+/**
+ * Test runner that finds and executes TypeScript tests based on suite/setup mapping.
+ * Called by integration-tests.sh via npm test.
+ */
+
+const fs = require('fs');
+const path = require('path');
+const { execSync } = require('child_process');
+
+const suite = process.env.LLAMA_STACK_TEST_SUITE;
+const setup = process.env.LLAMA_STACK_TEST_SETUP || '';
+
+if (!suite) {
+ console.error('Error: LLAMA_STACK_TEST_SUITE environment variable is required');
+ process.exit(1);
+}
+
+// Read suites.json to find matching test files
+const suitesPath = path.join(__dirname, 'suites.json');
+if (!fs.existsSync(suitesPath)) {
+ console.log(`No TypeScript tests configured (${suitesPath} not found)`);
+ process.exit(0);
+}
+
+const suites = JSON.parse(fs.readFileSync(suitesPath, 'utf-8'));
+
+// Find matching entry
+let testFiles = [];
+for (const entry of suites) {
+ if (entry.suite !== suite) {
+ continue;
+ }
+ const entrySetup = entry.setup || '';
+ if (entrySetup && entrySetup !== setup) {
+ continue;
+ }
+ testFiles = entry.files || [];
+ break;
+}
+
+if (testFiles.length === 0) {
+ console.log(`No TypeScript integration tests mapped for suite ${suite} (setup ${setup})`);
+ process.exit(0);
+}
+
+console.log(`Running TypeScript tests for suite ${suite} (setup ${setup}): ${testFiles.join(', ')}`);
+
+// Run Jest with the mapped test files
+try {
+ execSync(`npx jest --config jest.integration.config.js ${testFiles.join(' ')}`, {
+ stdio: 'inherit',
+ cwd: __dirname,
+ });
+} catch (error) {
+ process.exit(error.status || 1);
+}
diff --git a/tests/integration/client-typescript/setup.ts b/tests/integration/client-typescript/setup.ts
new file mode 100644
index 000000000..75cabab74
--- /dev/null
+++ b/tests/integration/client-typescript/setup.ts
@@ -0,0 +1,162 @@
+// Copyright (c) Meta Platforms, Inc. and affiliates.
+// All rights reserved.
+//
+// This source code is licensed under the terms described in the LICENSE file in
+// the root directory of this source tree.
+
+/**
+ * Global setup for integration tests.
+ * This file mimics pytest's fixture system by providing shared test configuration.
+ */
+
+import LlamaStackClient from 'llama-stack-client';
+
+/**
+ * Load test configuration from the Python setup system.
+ * This reads setup definitions from tests/integration/suites.py via get_setup_env.py.
+ */
+function loadTestConfig() {
+ const baseURL = process.env['TEST_API_BASE_URL'];
+ const setupName = process.env['LLAMA_STACK_TEST_SETUP'];
+ const textModel = process.env['LLAMA_STACK_TEST_TEXT_MODEL'];
+ const embeddingModel = process.env['LLAMA_STACK_TEST_EMBEDDING_MODEL'];
+
+ if (!baseURL) {
+ throw new Error(
+ 'TEST_API_BASE_URL is required for integration tests. ' +
+ 'Run tests using: ./scripts/integration-test.sh',
+ );
+ }
+
+ return {
+ baseURL,
+ textModel,
+ embeddingModel,
+ setupName,
+ };
+}
+
+// Read configuration from environment variables (set by scripts/integration-test.sh)
+export const TEST_CONFIG = loadTestConfig();
+
+// Validate required configuration
+beforeAll(() => {
+ console.log('\n=== Integration Test Configuration ===');
+ console.log(`Base URL: ${TEST_CONFIG.baseURL}`);
+ console.log(`Setup: ${TEST_CONFIG.setupName || 'NOT SET'}`);
+ console.log(
+ `Text Model: ${TEST_CONFIG.textModel || 'NOT SET - tests requiring text model will be skipped'}`,
+ );
+ console.log(
+ `Embedding Model: ${
+ TEST_CONFIG.embeddingModel || 'NOT SET - tests requiring embedding model will be skipped'
+ }`,
+ );
+ console.log('=====================================\n');
+});
+
+/**
+ * Create a client instance for integration tests.
+ * Mimics pytest's `llama_stack_client` fixture.
+ *
+ * @param testId - Test ID to send in X-LlamaStack-Provider-Data header for replay mode.
+ * Format: "tests/integration/responses/test_basic_responses.py::test_name[params]"
+ */
+export function createTestClient(testId?: string): LlamaStackClient {
+ const headers: Record = {};
+
+ // In server mode with replay, send test ID for recording isolation
+ if (process.env['LLAMA_STACK_TEST_STACK_CONFIG_TYPE'] === 'server' && testId) {
+ headers['X-LlamaStack-Provider-Data'] = JSON.stringify({
+ __test_id: testId,
+ });
+ }
+
+ return new LlamaStackClient({
+ baseURL: TEST_CONFIG.baseURL,
+ timeout: 60000, // 60 seconds
+ defaultHeaders: headers,
+ });
+}
+
+/**
+ * Skip test if required model is not configured.
+ * Mimics pytest's `skip_if_no_model` autouse fixture.
+ */
+export function skipIfNoModel(modelType: 'text' | 'embedding'): typeof test {
+ const model = modelType === 'text' ? TEST_CONFIG.textModel : TEST_CONFIG.embeddingModel;
+
+ if (!model) {
+ const envVar = modelType === 'text' ? 'LLAMA_STACK_TEST_TEXT_MODEL' : 'LLAMA_STACK_TEST_EMBEDDING_MODEL';
+ const message = `Skipping: ${modelType} model not configured (set ${envVar})`;
+ return test.skip.bind(test) as typeof test;
+ }
+
+ return test;
+}
+
+/**
+ * Get the configured text model, throwing if not set.
+ * Use this in tests that absolutely require a text model.
+ */
+export function requireTextModel(): string {
+ if (!TEST_CONFIG.textModel) {
+ throw new Error(
+ 'LLAMA_STACK_TEST_TEXT_MODEL environment variable is required. ' +
+ 'Run tests using: ./scripts/integration-test.sh',
+ );
+ }
+ return TEST_CONFIG.textModel;
+}
+
+/**
+ * Get the configured embedding model, throwing if not set.
+ * Use this in tests that absolutely require an embedding model.
+ */
+export function requireEmbeddingModel(): string {
+ if (!TEST_CONFIG.embeddingModel) {
+ throw new Error(
+ 'LLAMA_STACK_TEST_EMBEDDING_MODEL environment variable is required. ' +
+ 'Run tests using: ./scripts/integration-test.sh',
+ );
+ }
+ return TEST_CONFIG.embeddingModel;
+}
+
+/**
+ * Extracts aggregated text output from a ResponseObject.
+ * This concatenates all text content from the response's output array.
+ *
+ * Copied from llama-stack-client's response-helpers until it's available in published version.
+ */
+export function getResponseOutputText(response: any): string {
+ const pieces: string[] = [];
+
+ for (const output of response.output ?? []) {
+ if (!output || output.type !== 'message') {
+ continue;
+ }
+
+ const content = output.content;
+ if (typeof content === 'string') {
+ pieces.push(content);
+ continue;
+ }
+
+ if (!Array.isArray(content)) {
+ continue;
+ }
+
+ for (const item of content) {
+ if (typeof item === 'string') {
+ pieces.push(item);
+ continue;
+ }
+ if (item && item.type === 'output_text' && 'text' in item && typeof item.text === 'string') {
+ pieces.push(item.text);
+ }
+ }
+ }
+
+ return pieces.join('');
+}
diff --git a/tests/integration/client-typescript/suites.json b/tests/integration/client-typescript/suites.json
new file mode 100644
index 000000000..5c5b83058
--- /dev/null
+++ b/tests/integration/client-typescript/suites.json
@@ -0,0 +1,12 @@
+[
+ {
+ "suite": "responses",
+ "setup": "gpt",
+ "files": ["__tests__/responses.test.ts"]
+ },
+ {
+ "suite": "base",
+ "setup": "ollama",
+ "files": ["__tests__/inference.test.ts"]
+ }
+]
diff --git a/tests/integration/client-typescript/tsconfig.json b/tests/integration/client-typescript/tsconfig.json
new file mode 100644
index 000000000..19b6cdeb1
--- /dev/null
+++ b/tests/integration/client-typescript/tsconfig.json
@@ -0,0 +1,16 @@
+{
+ "compilerOptions": {
+ "target": "ES2022",
+ "module": "ES2022",
+ "lib": ["ES2022"],
+ "moduleResolution": "bundler",
+ "esModuleInterop": true,
+ "allowSyntheticDefaultImports": true,
+ "strict": true,
+ "skipLibCheck": true,
+ "resolveJsonModule": true,
+ "types": ["jest", "node"]
+ },
+ "include": ["**/*.ts"],
+ "exclude": ["node_modules"]
+}
diff --git a/tests/integration/files/test_files.py b/tests/integration/files/test_files.py
index d9e8dd501..e8004c95d 100644
--- a/tests/integration/files/test_files.py
+++ b/tests/integration/files/test_files.py
@@ -10,8 +10,8 @@ from unittest.mock import patch
import pytest
import requests
-from llama_stack.apis.files import OpenAIFilePurpose
from llama_stack.core.datatypes import User
+from llama_stack_api import OpenAIFilePurpose
purpose = OpenAIFilePurpose.ASSISTANTS
@@ -175,7 +175,7 @@ def test_expires_after_requests(openai_client):
@pytest.mark.xfail(message="User isolation broken for current providers, must be fixed.")
-@patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user")
+@patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user")
def test_files_authentication_isolation(mock_get_authenticated_user, llama_stack_client):
"""Test that users can only access their own files."""
from llama_stack_client import NotFoundError
@@ -275,7 +275,7 @@ def test_files_authentication_isolation(mock_get_authenticated_user, llama_stack
raise e
-@patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user")
+@patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user")
def test_files_authentication_shared_attributes(
mock_get_authenticated_user, llama_stack_client, provider_type_is_openai
):
@@ -335,7 +335,7 @@ def test_files_authentication_shared_attributes(
raise e
-@patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user")
+@patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user")
def test_files_authentication_anonymous_access(
mock_get_authenticated_user, llama_stack_client, provider_type_is_openai
):
diff --git a/tests/integration/fixtures/common.py b/tests/integration/fixtures/common.py
index d5e4c15f7..817180cfe 100644
--- a/tests/integration/fixtures/common.py
+++ b/tests/integration/fixtures/common.py
@@ -233,10 +233,21 @@ def instantiate_llama_stack_client(session):
raise ValueError("You must specify either --stack-config or LLAMA_STACK_CONFIG")
# Handle server: format or server::
+ # Also handles server::: format
if config.startswith("server:"):
- parts = config.split(":")
- config_name = parts[1]
- port = int(parts[2]) if len(parts) > 2 else int(os.environ.get("LLAMA_STACK_PORT", DEFAULT_PORT))
+ # Strip the "server:" prefix first
+ config_part = config[7:] # len("server:") == 7
+
+ # Check for :: (distro::runfile format)
+ if "::" in config_part:
+ config_name = config_part
+ port = int(os.environ.get("LLAMA_STACK_PORT", DEFAULT_PORT))
+ else:
+ # Single colon format: either or :
+ parts = config_part.split(":")
+ config_name = parts[0]
+ port = int(parts[1]) if len(parts) > 1 else int(os.environ.get("LLAMA_STACK_PORT", DEFAULT_PORT))
+
base_url = f"http://localhost:{port}"
force_restart = os.environ.get("LLAMA_STACK_TEST_FORCE_SERVER_RESTART") == "1"
@@ -323,7 +334,13 @@ def require_server(llama_stack_client):
@pytest.fixture(scope="session")
def openai_client(llama_stack_client, require_server):
base_url = f"{llama_stack_client.base_url}/v1"
- return OpenAI(base_url=base_url, api_key="fake")
+ client = OpenAI(base_url=base_url, api_key="fake", max_retries=0, timeout=30.0)
+ yield client
+ # Cleanup: close HTTP connections
+ try:
+ client.close()
+ except Exception:
+ pass
@pytest.fixture(params=["openai_client", "client_with_models"])
diff --git a/tests/integration/inference/test_openai_completion.py b/tests/integration/inference/test_openai_completion.py
index 1568ffbe2..4ce2850b4 100644
--- a/tests/integration/inference/test_openai_completion.py
+++ b/tests/integration/inference/test_openai_completion.py
@@ -54,6 +54,7 @@ def skip_if_model_doesnt_support_openai_completion(client_with_models, model_id)
# {"error":{"message":"Unknown request URL: GET /openai/v1/completions. Please check the URL for typos,
# or see the docs at https://console.groq.com/docs/","type":"invalid_request_error","code":"unknown_url"}}
"remote::groq",
+ "remote::oci",
"remote::gemini", # https://generativelanguage.googleapis.com/v1beta/openai/completions -> 404
"remote::anthropic", # at least claude-3-{5,7}-{haiku,sonnet}-* / claude-{sonnet,opus}-4-* are not supported
"remote::azure", # {'error': {'code': 'OperationNotSupported', 'message': 'The completion operation
diff --git a/tests/integration/inference/test_openai_embeddings.py b/tests/integration/inference/test_openai_embeddings.py
index 704775716..fe8070162 100644
--- a/tests/integration/inference/test_openai_embeddings.py
+++ b/tests/integration/inference/test_openai_embeddings.py
@@ -138,6 +138,7 @@ def skip_if_model_doesnt_support_openai_embeddings(client, model_id):
"remote::runpod",
"remote::sambanova",
"remote::tgi",
+ "remote::oci",
):
pytest.skip(f"Model {model_id} hosted by {provider.provider_type} doesn't support OpenAI embeddings.")
diff --git a/tests/integration/inference/test_provider_data_routing.py b/tests/integration/inference/test_provider_data_routing.py
index 99aa75395..e4a0a24b5 100644
--- a/tests/integration/inference/test_provider_data_routing.py
+++ b/tests/integration/inference/test_provider_data_routing.py
@@ -16,15 +16,15 @@ from unittest.mock import AsyncMock, patch
import pytest
-from llama_stack.apis.datatypes import Api
-from llama_stack.apis.inference.inference import (
+from llama_stack.core.library_client import LlamaStackAsLibraryClient
+from llama_stack.core.telemetry.telemetry import MetricEvent
+from llama_stack_api import (
+ Api,
OpenAIAssistantMessageParam,
OpenAIChatCompletion,
OpenAIChatCompletionUsage,
OpenAIChoice,
)
-from llama_stack.core.library_client import LlamaStackAsLibraryClient
-from llama_stack.core.telemetry.telemetry import MetricEvent
class OpenAIChatCompletionWithMetrics(OpenAIChatCompletion):
diff --git a/tests/integration/inference/test_tools_with_schemas.py b/tests/integration/inference/test_tools_with_schemas.py
index f30e9ece5..ab033c381 100644
--- a/tests/integration/inference/test_tools_with_schemas.py
+++ b/tests/integration/inference/test_tools_with_schemas.py
@@ -9,8 +9,6 @@ Integration tests for inference/chat completion with JSON Schema-based tools.
Tests that tools pass through correctly to various LLM providers.
"""
-import json
-
import pytest
from llama_stack.core.library_client import LlamaStackAsLibraryClient
@@ -193,15 +191,11 @@ class TestMCPToolsInChatCompletion:
mcp_endpoint=dict(uri=uri),
)
- provider_data = {"mcp_headers": {uri: {"Authorization": f"Bearer {AUTH_TOKEN}"}}}
- auth_headers = {
- "X-LlamaStack-Provider-Data": json.dumps(provider_data),
- }
-
+ # Use the dedicated authorization parameter
# Get the tools from MCP
tools_response = llama_stack_client.tool_runtime.list_tools(
tool_group_id=test_toolgroup_id,
- extra_headers=auth_headers,
+ authorization=AUTH_TOKEN,
)
# Convert to OpenAI format for inference
diff --git a/tests/integration/post_training/test_post_training.py b/tests/integration/post_training/test_post_training.py
index b5be71c7c..e6868019a 100644
--- a/tests/integration/post_training/test_post_training.py
+++ b/tests/integration/post_training/test_post_training.py
@@ -10,7 +10,8 @@ import uuid
import pytest
-from llama_stack.apis.post_training import (
+from llama_stack.log import get_logger
+from llama_stack_api import (
DataConfig,
DatasetFormat,
DPOAlignmentConfig,
@@ -18,7 +19,6 @@ from llama_stack.apis.post_training import (
LoraFinetuningConfig,
TrainingConfig,
)
-from llama_stack.log import get_logger
# Configure logging
logger = get_logger(name=__name__, category="post_training")
diff --git a/tests/integration/providers/utils/sqlstore/test_authorized_sqlstore.py b/tests/integration/providers/utils/sqlstore/test_authorized_sqlstore.py
index ad9115756..4f4f4a8dd 100644
--- a/tests/integration/providers/utils/sqlstore/test_authorized_sqlstore.py
+++ b/tests/integration/providers/utils/sqlstore/test_authorized_sqlstore.py
@@ -13,14 +13,14 @@ import pytest
from llama_stack.core.access_control.access_control import default_policy
from llama_stack.core.datatypes import User
from llama_stack.core.storage.datatypes import SqlStoreReference
-from llama_stack.providers.utils.sqlstore.api import ColumnType
-from llama_stack.providers.utils.sqlstore.authorized_sqlstore import AuthorizedSqlStore
-from llama_stack.providers.utils.sqlstore.sqlstore import (
+from llama_stack.core.storage.sqlstore.authorized_sqlstore import AuthorizedSqlStore
+from llama_stack.core.storage.sqlstore.sqlstore import (
PostgresSqlStoreConfig,
SqliteSqlStoreConfig,
register_sqlstore_backends,
sqlstore_impl,
)
+from llama_stack_api.internal.sqlstore import ColumnType
def get_postgres_config():
@@ -96,7 +96,7 @@ async def cleanup_records(sql_store, table_name, record_ids):
@pytest.mark.parametrize("backend_config", BACKEND_CONFIGS)
-@patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user")
+@patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user")
async def test_authorized_store_attributes(mock_get_authenticated_user, authorized_store, request):
"""Test that JSON column comparisons work correctly for both PostgreSQL and SQLite"""
backend_name = request.node.callspec.id
@@ -190,7 +190,7 @@ async def test_authorized_store_attributes(mock_get_authenticated_user, authoriz
@pytest.mark.parametrize("backend_config", BACKEND_CONFIGS)
-@patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user")
+@patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user")
async def test_user_ownership_policy(mock_get_authenticated_user, authorized_store, request):
"""Test that 'user is owner' policies work correctly with record ownership"""
from llama_stack.core.access_control.datatypes import AccessRule, Action, Scope
diff --git a/tests/integration/recordings/README.md b/tests/integration/recordings/README.md
index 621a07562..bdf4f532f 100644
--- a/tests/integration/recordings/README.md
+++ b/tests/integration/recordings/README.md
@@ -2,6 +2,10 @@
This directory contains recorded inference API responses used for deterministic testing without requiring live API access.
+For more information, see the
+[docs](https://llamastack.github.io/docs/contributing/testing/record-replay).
+This README provides more technical information.
+
## Structure
- `responses/` - JSON files containing request/response pairs for inference operations
diff --git a/tests/integration/responses/conftest.py b/tests/integration/responses/conftest.py
new file mode 100644
index 000000000..c29575072
--- /dev/null
+++ b/tests/integration/responses/conftest.py
@@ -0,0 +1,17 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+import pytest
+
+from llama_stack.core.library_client import LlamaStackAsLibraryClient
+
+
+@pytest.fixture
+def responses_client(compat_client):
+ """Provide a client for responses tests, skipping library client mode."""
+ if isinstance(compat_client, LlamaStackAsLibraryClient):
+ pytest.skip("Responses API tests are not supported in library client mode")
+ return compat_client
diff --git a/tests/integration/responses/fixtures/fixtures.py b/tests/integration/responses/fixtures/fixtures.py
index dbf67e138..b06117b98 100644
--- a/tests/integration/responses/fixtures/fixtures.py
+++ b/tests/integration/responses/fixtures/fixtures.py
@@ -115,7 +115,15 @@ def openai_client(base_url, api_key, provider):
client = LlamaStackAsLibraryClient(config, skip_logger_removal=True)
return client
- return OpenAI(
+ client = OpenAI(
base_url=base_url,
api_key=api_key,
+ max_retries=0,
+ timeout=30.0,
)
+ yield client
+ # Cleanup: close HTTP connections
+ try:
+ client.close()
+ except Exception:
+ pass
diff --git a/tests/integration/responses/recordings/0a4aca0cd075369aaf6133ee82d9d940455cb083c0fd1330c666a12d74df6f89.json b/tests/integration/responses/recordings/0a4aca0cd075369aaf6133ee82d9d940455cb083c0fd1330c666a12d74df6f89.json
new file mode 100644
index 000000000..9b432130b
--- /dev/null
+++ b/tests/integration/responses/recordings/0a4aca0cd075369aaf6133ee82d9d940455cb083c0fd1330c666a12d74df6f89.json
@@ -0,0 +1,549 @@
+{
+ "test_id": "tests/integration/responses/test_tool_responses.py::test_response_streaming_multi_turn_tool_execution[openai_client-txt=openai/gpt-4o-experiment_analysis_streaming]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "I need a complete analysis: First, get the experiment ID for 'chemical_reaction', then get the results for that experiment, and tell me if the yield was above 80%. Return only one tool call per step. Please stream your analysis process."
+ },
+ {
+ "role": "assistant",
+ "content": "",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_Q9Gcxub7UbQsxJWVkiy4FETr",
+ "type": "function",
+ "function": {
+ "name": "get_experiment_id",
+ "arguments": "{\"experiment_name\":\"chemical_reaction\"}"
+ }
+ }
+ ]
+ },
+ {
+ "role": "tool",
+ "tool_call_id": "call_Q9Gcxub7UbQsxJWVkiy4FETr",
+ "content": [
+ {
+ "type": "text",
+ "text": "exp_003"
+ }
+ ]
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_id",
+ "description": "\n Get the user ID for a given username. This ID is needed for other operations.\n\n :param username: The username to look up\n :return: The user ID for the username\n ",
+ "parameters": {
+ "properties": {
+ "username": {
+ "title": "Username",
+ "type": "string"
+ }
+ },
+ "required": [
+ "username"
+ ],
+ "title": "get_user_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_permissions",
+ "description": "\n Get the permissions for a user ID. Requires a valid user ID from get_user_id.\n\n :param user_id: The user ID to check permissions for\n :return: The permissions for the user\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id"
+ ],
+ "title": "get_user_permissionsArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "check_file_access",
+ "description": "\n Check if a user can access a specific file. Requires a valid user ID.\n\n :param user_id: The user ID to check access for\n :param filename: The filename to check access to\n :return: Whether the user can access the file (yes/no)\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ },
+ "filename": {
+ "title": "Filename",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id",
+ "filename"
+ ],
+ "title": "check_file_accessArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_id",
+ "description": "\n Get the experiment ID for a given experiment name. This ID is needed to get results.\n\n :param experiment_name: The name of the experiment\n :return: The experiment ID\n ",
+ "parameters": {
+ "properties": {
+ "experiment_name": {
+ "title": "Experiment Name",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_name"
+ ],
+ "title": "get_experiment_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_results",
+ "description": "\n Get the results for an experiment ID. Requires a valid experiment ID from get_experiment_id.\n\n :param experiment_id: The experiment ID to get results for\n :return: The experiment results\n ",
+ "parameters": {
+ "properties": {
+ "experiment_id": {
+ "title": "Experiment Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_id"
+ ],
+ "title": "get_experiment_resultsArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-0a4aca0cd075",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_yTMuQEKu7x115q8XvhqelRub",
+ "function": {
+ "arguments": "",
+ "name": "get_experiment_results"
+ },
+ "type": "function"
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "9CSOZwfG5M7nid"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-0a4aca0cd075",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "{\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "Wss"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-0a4aca0cd075",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "experiment",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "5AmVsa0S6NBy"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-0a4aca0cd075",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "_id",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "2Sf"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-0a4aca0cd075",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\":\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "z"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-0a4aca0cd075",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "exp",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "leu"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-0a4aca0cd075",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "_",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "omxpR"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-0a4aca0cd075",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "003",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "kW6"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-0a4aca0cd075",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\"}",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "Zm6"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-0a4aca0cd075",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "tool_calls",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "aXvC"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-0a4aca0cd075",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": {
+ "completion_tokens": 19,
+ "prompt_tokens": 457,
+ "total_tokens": 476,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "s13YHOCCaCDcJ"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/1997dc007d202497ce456683d24ddde3553f0db5d5a673146d8bb99c072e77cd.json b/tests/integration/responses/recordings/1997dc007d202497ce456683d24ddde3553f0db5d5a673146d8bb99c072e77cd.json
new file mode 100644
index 000000000..4418331b0
--- /dev/null
+++ b/tests/integration/responses/recordings/1997dc007d202497ce456683d24ddde3553f0db5d5a673146d8bb99c072e77cd.json
@@ -0,0 +1,773 @@
+{
+ "test_id": "tests/integration/responses/test_tool_responses.py::test_max_tool_calls_with_mcp_tools[client_with_models-txt=openai/gpt-4o]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "Get the experiment ID for 'boiling_point' and get the user ID for 'charlie'"
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_id",
+ "description": "\n Get the user ID for a given username. This ID is needed for other operations.\n\n :param username: The username to look up\n :return: The user ID for the username\n ",
+ "parameters": {
+ "properties": {
+ "username": {
+ "title": "Username",
+ "type": "string"
+ }
+ },
+ "required": [
+ "username"
+ ],
+ "title": "get_user_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_permissions",
+ "description": "\n Get the permissions for a user ID. Requires a valid user ID from get_user_id.\n\n :param user_id: The user ID to check permissions for\n :return: The permissions for the user\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id"
+ ],
+ "title": "get_user_permissionsArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "check_file_access",
+ "description": "\n Check if a user can access a specific file. Requires a valid user ID.\n\n :param user_id: The user ID to check access for\n :param filename: The filename to check access to\n :return: Whether the user can access the file (yes/no)\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ },
+ "filename": {
+ "title": "Filename",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id",
+ "filename"
+ ],
+ "title": "check_file_accessArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_id",
+ "description": "\n Get the experiment ID for a given experiment name. This ID is needed to get results.\n\n :param experiment_name: The name of the experiment\n :return: The experiment ID\n ",
+ "parameters": {
+ "properties": {
+ "experiment_name": {
+ "title": "Experiment Name",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_name"
+ ],
+ "title": "get_experiment_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_results",
+ "description": "\n Get the results for an experiment ID. Requires a valid experiment ID from get_experiment_id.\n\n :param experiment_id: The experiment ID to get results for\n :return: The experiment results\n ",
+ "parameters": {
+ "properties": {
+ "experiment_id": {
+ "title": "Experiment Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_id"
+ ],
+ "title": "get_experiment_resultsArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-1997dc007d20",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "1V9w3bXnppL"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-1997dc007d20",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_y8S7JKR2Qhu4Bh1uxdHRcNDg",
+ "function": {
+ "arguments": "",
+ "name": "get_experiment_id"
+ },
+ "type": "function"
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "YEsj"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-1997dc007d20",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "{\"ex",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "n"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-1997dc007d20",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "perim",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "Q"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-1997dc007d20",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "ent_na",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": ""
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-1997dc007d20",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "me\":",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "U"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-1997dc007d20",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": " \"boi",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": ""
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-1997dc007d20",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "ling_p",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": ""
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-1997dc007d20",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "oint",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "ha"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-1997dc007d20",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\"}",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "d5D"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-1997dc007d20",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": "call_HELkyZOm2fzLx2CeTH3bEcS2",
+ "function": {
+ "arguments": "",
+ "name": "get_user_id"
+ },
+ "type": "function"
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "0LbsjDcKz6"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-1997dc007d20",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": null,
+ "function": {
+ "arguments": "{\"us",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "c"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-1997dc007d20",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": null,
+ "function": {
+ "arguments": "ernam",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "9"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-1997dc007d20",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": null,
+ "function": {
+ "arguments": "e\": \"c",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "7C0WFn181I3y3l"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-1997dc007d20",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": null,
+ "function": {
+ "arguments": "harl",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "wf"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-1997dc007d20",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": null,
+ "function": {
+ "arguments": "ie\"}",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "r"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-1997dc007d20",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "tool_calls",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "FAci"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-1997dc007d20",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": {
+ "completion_tokens": 51,
+ "prompt_tokens": 393,
+ "total_tokens": 444,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "6xgpRRdKjviPT"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/2bd4c8dc08b3ee3ffce696864f0bd9f35d82223c7d1cab613ab2e818d79d6f9b.json b/tests/integration/responses/recordings/2bd4c8dc08b3ee3ffce696864f0bd9f35d82223c7d1cab613ab2e818d79d6f9b.json
new file mode 100644
index 000000000..5aebcd841
--- /dev/null
+++ b/tests/integration/responses/recordings/2bd4c8dc08b3ee3ffce696864f0bd9f35d82223c7d1cab613ab2e818d79d6f9b.json
@@ -0,0 +1,295 @@
+{
+ "test_id": "tests/integration/responses/test_tool_responses.py::test_response_non_streaming_multi_turn_tool_execution[openai_client-txt=openai/gpt-4o-user_file_access_check]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "I need to check if user 'alice' can access the file 'document.txt'. First, get alice's user ID, then check if that user ID can access the file 'document.txt'. Do this as a series of steps, where each step is a separate message. Return only one tool call per step. Summarize the final result with a single 'yes' or 'no' response."
+ },
+ {
+ "role": "assistant",
+ "content": "",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_EsVvmBUqtJb42kNkYnK19QkJ",
+ "type": "function",
+ "function": {
+ "name": "get_user_id",
+ "arguments": "{\"username\":\"alice\"}"
+ }
+ }
+ ]
+ },
+ {
+ "role": "tool",
+ "tool_call_id": "call_EsVvmBUqtJb42kNkYnK19QkJ",
+ "content": [
+ {
+ "type": "text",
+ "text": "user_12345"
+ }
+ ]
+ },
+ {
+ "role": "assistant",
+ "content": "",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_kCmSE8ORKfQoiEsW2UCYr5Sh",
+ "type": "function",
+ "function": {
+ "name": "check_file_access",
+ "arguments": "{\"user_id\":\"user_12345\",\"filename\":\"document.txt\"}"
+ }
+ }
+ ]
+ },
+ {
+ "role": "tool",
+ "tool_call_id": "call_kCmSE8ORKfQoiEsW2UCYr5Sh",
+ "content": [
+ {
+ "type": "text",
+ "text": "yes"
+ }
+ ]
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_id",
+ "description": "\n Get the user ID for a given username. This ID is needed for other operations.\n\n :param username: The username to look up\n :return: The user ID for the username\n ",
+ "parameters": {
+ "properties": {
+ "username": {
+ "title": "Username",
+ "type": "string"
+ }
+ },
+ "required": [
+ "username"
+ ],
+ "title": "get_user_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_permissions",
+ "description": "\n Get the permissions for a user ID. Requires a valid user ID from get_user_id.\n\n :param user_id: The user ID to check permissions for\n :return: The permissions for the user\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id"
+ ],
+ "title": "get_user_permissionsArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "check_file_access",
+ "description": "\n Check if a user can access a specific file. Requires a valid user ID.\n\n :param user_id: The user ID to check access for\n :param filename: The filename to check access to\n :return: Whether the user can access the file (yes/no)\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ },
+ "filename": {
+ "title": "Filename",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id",
+ "filename"
+ ],
+ "title": "check_file_accessArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_id",
+ "description": "\n Get the experiment ID for a given experiment name. This ID is needed to get results.\n\n :param experiment_name: The name of the experiment\n :return: The experiment ID\n ",
+ "parameters": {
+ "properties": {
+ "experiment_name": {
+ "title": "Experiment Name",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_name"
+ ],
+ "title": "get_experiment_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_results",
+ "description": "\n Get the results for an experiment ID. Requires a valid experiment ID from get_experiment_id.\n\n :param experiment_id: The experiment ID to get results for\n :return: The experiment results\n ",
+ "parameters": {
+ "properties": {
+ "experiment_id": {
+ "title": "Experiment Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_id"
+ ],
+ "title": "get_experiment_resultsArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-2bd4c8dc08b3",
+ "choices": [
+ {
+ "delta": {
+ "content": "",
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "UxHf8fChwO3CUY"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-2bd4c8dc08b3",
+ "choices": [
+ {
+ "delta": {
+ "content": "yes",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "GOexNEhopELIg"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-2bd4c8dc08b3",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "stop",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "O41d8hC8zD"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-2bd4c8dc08b3",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": {
+ "completion_tokens": 2,
+ "prompt_tokens": 516,
+ "total_tokens": 518,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "9VQklZAZMYAfa0"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/2ed23a4289840f93202f94e7e7027118869d34d768ad87ba072e92e8a43a52f2.json b/tests/integration/responses/recordings/2ed23a4289840f93202f94e7e7027118869d34d768ad87ba072e92e8a43a52f2.json
new file mode 100644
index 000000000..c39483a7c
--- /dev/null
+++ b/tests/integration/responses/recordings/2ed23a4289840f93202f94e7e7027118869d34d768ad87ba072e92e8a43a52f2.json
@@ -0,0 +1,833 @@
+{
+ "test_id": "tests/integration/responses/test_tool_responses.py::test_response_streaming_multi_turn_tool_execution[openai_client-txt=openai/gpt-4o-user_permissions_workflow]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "Help me with this security check: First, get the user ID for 'charlie', then get the permissions for that user ID, and finally check if that user can access 'secret_file.txt'. Stream your progress as you work through each step. Return only one tool call per step. Summarize the final result with a single 'yes' or 'no' response."
+ },
+ {
+ "role": "assistant",
+ "content": "",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_fsxGbKmceUbLSXCe4sx9WLXO",
+ "type": "function",
+ "function": {
+ "name": "get_user_id",
+ "arguments": "{\"username\":\"charlie\"}"
+ }
+ }
+ ]
+ },
+ {
+ "role": "tool",
+ "tool_call_id": "call_fsxGbKmceUbLSXCe4sx9WLXO",
+ "content": [
+ {
+ "type": "text",
+ "text": "user_11111"
+ }
+ ]
+ },
+ {
+ "role": "assistant",
+ "content": "",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_moRBxqnBJ48EWTSEoQ1llgib",
+ "type": "function",
+ "function": {
+ "name": "get_user_permissions",
+ "arguments": "{\"user_id\":\"user_11111\"}"
+ }
+ }
+ ]
+ },
+ {
+ "role": "tool",
+ "tool_call_id": "call_moRBxqnBJ48EWTSEoQ1llgib",
+ "content": [
+ {
+ "type": "text",
+ "text": "admin"
+ }
+ ]
+ },
+ {
+ "role": "assistant",
+ "content": "",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_ybUqAP9oQn3rwQqVdOLs5Wb4",
+ "type": "function",
+ "function": {
+ "name": "check_file_access",
+ "arguments": "{\"user_id\":\"user_11111\",\"filename\":\"secret_file.txt\"}"
+ }
+ }
+ ]
+ },
+ {
+ "role": "tool",
+ "tool_call_id": "call_ybUqAP9oQn3rwQqVdOLs5Wb4",
+ "content": [
+ {
+ "type": "text",
+ "text": "no"
+ }
+ ]
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_id",
+ "description": "\n Get the user ID for a given username. This ID is needed for other operations.\n\n :param username: The username to look up\n :return: The user ID for the username\n ",
+ "parameters": {
+ "properties": {
+ "username": {
+ "title": "Username",
+ "type": "string"
+ }
+ },
+ "required": [
+ "username"
+ ],
+ "title": "get_user_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_permissions",
+ "description": "\n Get the permissions for a user ID. Requires a valid user ID from get_user_id.\n\n :param user_id: The user ID to check permissions for\n :return: The permissions for the user\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id"
+ ],
+ "title": "get_user_permissionsArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "check_file_access",
+ "description": "\n Check if a user can access a specific file. Requires a valid user ID.\n\n :param user_id: The user ID to check access for\n :param filename: The filename to check access to\n :return: Whether the user can access the file (yes/no)\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ },
+ "filename": {
+ "title": "Filename",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id",
+ "filename"
+ ],
+ "title": "check_file_accessArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_id",
+ "description": "\n Get the experiment ID for a given experiment name. This ID is needed to get results.\n\n :param experiment_name: The name of the experiment\n :return: The experiment ID\n ",
+ "parameters": {
+ "properties": {
+ "experiment_name": {
+ "title": "Experiment Name",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_name"
+ ],
+ "title": "get_experiment_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_results",
+ "description": "\n Get the results for an experiment ID. Requires a valid experiment ID from get_experiment_id.\n\n :param experiment_id: The experiment ID to get results for\n :return: The experiment results\n ",
+ "parameters": {
+ "properties": {
+ "experiment_id": {
+ "title": "Experiment Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_id"
+ ],
+ "title": "get_experiment_resultsArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-2ed23a428984",
+ "choices": [
+ {
+ "delta": {
+ "content": "",
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "WLGSIGDbuImIc2"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-2ed23a428984",
+ "choices": [
+ {
+ "delta": {
+ "content": "The",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "tOPrT8GpCzqCn"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-2ed23a428984",
+ "choices": [
+ {
+ "delta": {
+ "content": " user",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "ViOvVDT7owF"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-2ed23a428984",
+ "choices": [
+ {
+ "delta": {
+ "content": " '",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "EkiYJGYtRb2KCr"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-2ed23a428984",
+ "choices": [
+ {
+ "delta": {
+ "content": "char",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "ioC2G58DuWTx"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-2ed23a428984",
+ "choices": [
+ {
+ "delta": {
+ "content": "lie",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "A5rxByl55APwi"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-2ed23a428984",
+ "choices": [
+ {
+ "delta": {
+ "content": "'",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "kmDNWRqOyy2r3ST"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-2ed23a428984",
+ "choices": [
+ {
+ "delta": {
+ "content": " cannot",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "JHGD4XKFC"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-2ed23a428984",
+ "choices": [
+ {
+ "delta": {
+ "content": " access",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "6IPkFhs93"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-2ed23a428984",
+ "choices": [
+ {
+ "delta": {
+ "content": " '",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "LGHjKnVq2lF1DS"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-2ed23a428984",
+ "choices": [
+ {
+ "delta": {
+ "content": "secret",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "1nGoXVjnK0"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-2ed23a428984",
+ "choices": [
+ {
+ "delta": {
+ "content": "_file",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "OeR7YlvZQLa"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-2ed23a428984",
+ "choices": [
+ {
+ "delta": {
+ "content": ".txt",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "yLKHaSgjE64R"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-2ed23a428984",
+ "choices": [
+ {
+ "delta": {
+ "content": "'.",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "waZY1Js7DPWtoN"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-2ed23a428984",
+ "choices": [
+ {
+ "delta": {
+ "content": " The",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "km3Gr5HspErW"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-2ed23a428984",
+ "choices": [
+ {
+ "delta": {
+ "content": " final",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "Mvzf8AUstX"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-2ed23a428984",
+ "choices": [
+ {
+ "delta": {
+ "content": " result",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "660CrCPne"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-2ed23a428984",
+ "choices": [
+ {
+ "delta": {
+ "content": " is",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "lq7NyKvIo8UEO"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-2ed23a428984",
+ "choices": [
+ {
+ "delta": {
+ "content": ":",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "qjIz07y1RQsKqTo"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-2ed23a428984",
+ "choices": [
+ {
+ "delta": {
+ "content": " no",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "xhcVwxM4RaQcN"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-2ed23a428984",
+ "choices": [
+ {
+ "delta": {
+ "content": ".",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "dPxBJZ3WUesIy8T"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-2ed23a428984",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "stop",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "Z9wFfcEaK2"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-2ed23a428984",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": {
+ "completion_tokens": 21,
+ "prompt_tokens": 542,
+ "total_tokens": 563,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "fSoZk1lrb3nJt"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/3177a984c900c2bdc2785b502bded6791b1054ce0f36e967eb3793b5608344f3.json b/tests/integration/responses/recordings/3177a984c900c2bdc2785b502bded6791b1054ce0f36e967eb3793b5608344f3.json
new file mode 100644
index 000000000..d86ca8cc9
--- /dev/null
+++ b/tests/integration/responses/recordings/3177a984c900c2bdc2785b502bded6791b1054ce0f36e967eb3793b5608344f3.json
@@ -0,0 +1,759 @@
+{
+ "test_id": "tests/integration/responses/test_tool_responses.py::test_response_mcp_tool_approval[openai_client-txt=openai/gpt-4o-True-boiling_point_tool]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "What is the boiling point of myawesomeliquid in Celsius?"
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "greet_everyone",
+ "parameters": {
+ "properties": {
+ "url": {
+ "title": "Url",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "title": "greet_everyoneArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_boiling_point",
+ "description": "\n Returns the boiling point of a liquid in Celsius or Fahrenheit.\n\n :param liquid_name: The name of the liquid\n :param celsius: Whether to return the boiling point in Celsius\n :return: The boiling point of the liquid in Celcius or Fahrenheit\n ",
+ "parameters": {
+ "properties": {
+ "liquid_name": {
+ "title": "Liquid Name",
+ "type": "string"
+ },
+ "celsius": {
+ "default": true,
+ "title": "Celsius",
+ "type": "boolean"
+ }
+ },
+ "required": [
+ "liquid_name"
+ ],
+ "title": "get_boiling_pointArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-3177a984c900",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_bL84OWNnE1s75GJEqGLAK35W",
+ "function": {
+ "arguments": "",
+ "name": "get_boiling_point"
+ },
+ "type": "function"
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "ptE"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-3177a984c900",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "{\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "UEV"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-3177a984c900",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "li",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "hMko"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-3177a984c900",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "quid",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "nr"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-3177a984c900",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "_name",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "x"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-3177a984c900",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\":\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "D"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-3177a984c900",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "my",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "aLLC"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-3177a984c900",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "aw",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "EZdr"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-3177a984c900",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "esom",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "yV"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-3177a984c900",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "eli",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "0bj"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-3177a984c900",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "quid",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "5J"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-3177a984c900",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\",\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "z"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-3177a984c900",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "c",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "7dZEY"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-3177a984c900",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "elsius",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": ""
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-3177a984c900",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\":",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "AqP"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-3177a984c900",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "true",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "X8"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-3177a984c900",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "}",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "oa7h2"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-3177a984c900",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "tool_calls",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "1Is8"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-3177a984c900",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": {
+ "completion_tokens": 27,
+ "prompt_tokens": 156,
+ "total_tokens": 183,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "DfwHMdbjUVww7"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/318c5361647df0245c074cd2c7d6f50e862aeddbbeaeb256ef1add34de7c1dc8.json b/tests/integration/responses/recordings/318c5361647df0245c074cd2c7d6f50e862aeddbbeaeb256ef1add34de7c1dc8.json
new file mode 100644
index 000000000..025246ebe
--- /dev/null
+++ b/tests/integration/responses/recordings/318c5361647df0245c074cd2c7d6f50e862aeddbbeaeb256ef1add34de7c1dc8.json
@@ -0,0 +1,549 @@
+{
+ "test_id": "tests/integration/responses/test_tool_responses.py::test_response_non_streaming_multi_turn_tool_execution[openai_client-txt=openai/gpt-4o-experiment_results_lookup]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "I need to get the results for the 'boiling_point' experiment. First, get the experiment ID for 'boiling_point', then use that ID to get the experiment results. Tell me the boiling point in Celsius."
+ },
+ {
+ "role": "assistant",
+ "content": "",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_dZwjBxH3aTRhnaS0bJVPqRcz",
+ "type": "function",
+ "function": {
+ "name": "get_experiment_id",
+ "arguments": "{\"experiment_name\":\"boiling_point\"}"
+ }
+ }
+ ]
+ },
+ {
+ "role": "tool",
+ "tool_call_id": "call_dZwjBxH3aTRhnaS0bJVPqRcz",
+ "content": [
+ {
+ "type": "text",
+ "text": "exp_004"
+ }
+ ]
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_id",
+ "description": "\n Get the user ID for a given username. This ID is needed for other operations.\n\n :param username: The username to look up\n :return: The user ID for the username\n ",
+ "parameters": {
+ "properties": {
+ "username": {
+ "title": "Username",
+ "type": "string"
+ }
+ },
+ "required": [
+ "username"
+ ],
+ "title": "get_user_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_permissions",
+ "description": "\n Get the permissions for a user ID. Requires a valid user ID from get_user_id.\n\n :param user_id: The user ID to check permissions for\n :return: The permissions for the user\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id"
+ ],
+ "title": "get_user_permissionsArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "check_file_access",
+ "description": "\n Check if a user can access a specific file. Requires a valid user ID.\n\n :param user_id: The user ID to check access for\n :param filename: The filename to check access to\n :return: Whether the user can access the file (yes/no)\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ },
+ "filename": {
+ "title": "Filename",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id",
+ "filename"
+ ],
+ "title": "check_file_accessArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_id",
+ "description": "\n Get the experiment ID for a given experiment name. This ID is needed to get results.\n\n :param experiment_name: The name of the experiment\n :return: The experiment ID\n ",
+ "parameters": {
+ "properties": {
+ "experiment_name": {
+ "title": "Experiment Name",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_name"
+ ],
+ "title": "get_experiment_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_results",
+ "description": "\n Get the results for an experiment ID. Requires a valid experiment ID from get_experiment_id.\n\n :param experiment_id: The experiment ID to get results for\n :return: The experiment results\n ",
+ "parameters": {
+ "properties": {
+ "experiment_id": {
+ "title": "Experiment Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_id"
+ ],
+ "title": "get_experiment_resultsArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-318c5361647d",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_skNUKbERbtdoADH834U9OE91",
+ "function": {
+ "arguments": "",
+ "name": "get_experiment_results"
+ },
+ "type": "function"
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "5aHvu2xes6Amy8"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-318c5361647d",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "{\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "9HQ"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-318c5361647d",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "experiment",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "ckAh5OXg9JIe"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-318c5361647d",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "_id",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "avh"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-318c5361647d",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\":\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "x"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-318c5361647d",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "exp",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "f75"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-318c5361647d",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "_",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "Nini1"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-318c5361647d",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "004",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "MXB"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-318c5361647d",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\"}",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "Vc4"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-318c5361647d",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "tool_calls",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "rnph"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-318c5361647d",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": {
+ "completion_tokens": 19,
+ "prompt_tokens": 450,
+ "total_tokens": 469,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "nUptVmnQlQZrH"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/42c357284497af596ae6c9341b0c189daa31e88b25d0381a985f24203b7a5a38.json b/tests/integration/responses/recordings/42c357284497af596ae6c9341b0c189daa31e88b25d0381a985f24203b7a5a38.json
index 7ec2ac931..4e80e1cdd 100644
--- a/tests/integration/responses/recordings/42c357284497af596ae6c9341b0c189daa31e88b25d0381a985f24203b7a5a38.json
+++ b/tests/integration/responses/recordings/42c357284497af596ae6c9341b0c189daa31e88b25d0381a985f24203b7a5a38.json
@@ -10,7 +10,7 @@
},
"response": {
"body": {
- "__type__": "llama_stack.apis.tools.tools.ToolInvocationResult",
+ "__type__": "llama_stack_api.tools.ToolInvocationResult",
"__data__": {
"content": "{\"query\": \"Llama 4 Maverick model experts\", \"top_k\": [{\"url\": \"https://console.groq.com/docs/model/meta-llama/llama-4-maverick-17b-128e-instruct\", \"title\": \"Llama 4 Maverick 17B 128E\", \"content\": \"Llama 4 Maverick is Meta's natively multimodal model that enables text and image understanding. With a 17 billion parameter mixture-of-experts architecture (128 experts), this model offers industry-leading performance for multimodal tasks like natural assistant-like chat, image recognition, and coding tasks. Llama 4 Maverick features an auto-regressive language model that uses a mixture-of-experts (MoE) architecture with 17B activated parameters (400B total) and incorporates early fusion for native multimodality. The model uses 128 experts to efficiently handle both text and image inputs while maintaining high performance across chat, knowledge, and code generation tasks, with a knowledge cutoff of August 2024. * For multimodal applications, this model supports up to 5 image inputs create( model =\\\"meta-llama/llama-4-maverick-17b-128e-instruct\\\", messages =[ { \\\"role\\\": \\\"user\\\", \\\"content\\\": \\\"Explain why fast inference is critical for reasoning models\\\" } ] ) print(completion.\", \"score\": 0.9170729, \"raw_content\": null}, {\"url\": \"https://huggingface.co/meta-llama/Llama-4-Maverick-17B-128E\", \"title\": \"meta-llama/Llama-4-Maverick-17B-128E - Hugging Face\", \"content\": \"Model Architecture: The Llama 4 models are auto-regressive language models that use a mixture-of-experts (MoE) architecture and incorporate\", \"score\": 0.8021998, \"raw_content\": null}, {\"url\": \"https://www.ibm.com/new/announcements/meta-llama-4-maverick-and-llama-4-scout-now-available-in-watsonx-ai\", \"title\": \"Meta Llama 4 Maverick and Llama 4 Scout now available in watsonx ...\", \"content\": \"# Meta Llama 4 Maverick and Llama 4 Scout now available in watsonx.ai **IBM is excited to announce the addition of Meta\\u2019s latest generation of open models, Llama 4, to** **watsonx.ai****.** Llama 4 Scout and Llama 4 Maverick, the first mixture of experts (MoE) models released by Meta, provide frontier multimodal performance, high speeds, low cost, and industry leading context length. With the introduction of these latest offerings from Meta, IBM now supports a total of 13 Meta models in the expansive library of \\u00a0foundation models available in watsonx.ai. Trained on 40 trillion tokens of data, Llama 4 Scout offers performance rivalling or exceeding that of models with significantly larger active parameter counts while keeping costs and latency low. ## Llama 4 models on IBM watsonx\", \"score\": 0.78194773, \"raw_content\": null}, {\"url\": \"https://medium.com/@divyanshbhatiajm19/metas-llama-4-family-the-complete-guide-to-scout-maverick-and-behemoth-ai-models-in-2025-21a90c882e8a\", \"title\": \"Meta's Llama 4 Family: The Complete Guide to Scout, Maverick, and ...\", \"content\": \"# Meta\\u2019s Llama 4 Family: The Complete Guide to Scout, Maverick, and Behemoth AI Models in 2025 Feature Llama 4 Scout Llama 4 Maverick Llama 4 Behemoth **Total Parameters** 109B 400B ~2T **Active Parameters** 17B 17B 288B **Expert Count** 16 128 16 **Context Window** 10M tokens 1M tokens Not specified **Hardware Requirements** Single H100 GPU Single H100 DGX host Multiple GPUs **Inference Cost** Not specified $0.19-$0.49 per 1M tokens Not specified **Release Status** Available now Available now In training **Primary Use Cases** Long-context analysis, code processing High-performance multimodal applications Research, STEM reasoning The Llama 4 family represents Meta\\u2019s most significant AI development to date, with each model offering distinct advantages for different use cases:\", \"score\": 0.69672287, \"raw_content\": null}, {\"url\": \"https://www.llama.com/models/llama-4/\", \"title\": \"Unmatched Performance and Efficiency | Llama 4\", \"content\": \"# Llama 4 # Llama 4 Llama 4 Scout Class-leading natively multimodal model that offers superior text and visual intelligence, single H100 GPU efficiency, and a 10M context window for seamless long document analysis. Llama 4 MaverickIndustry-leading natively multimodal model for image and text understanding with groundbreaking intelligence and fast responses at a low cost. We evaluated model performance on a suite of common benchmarks across a wide range of languages, testing for coding, reasoning, knowledge, vision understanding, multilinguality, and long context. 4. Specialized long context evals are not traditionally reported for generalist models, so we share internal runs to showcase llama's frontier performance. 4. Specialized long context evals are not traditionally reported for generalist models, so we share internal runs to showcase llama's frontier performance.\", \"score\": 0.629889, \"raw_content\": null}]}",
"error_message": null,
diff --git a/tests/integration/responses/recordings/430a49246c97c29bd958f383627f53ec795fd77ef818827e16691689151bf17c.json b/tests/integration/responses/recordings/430a49246c97c29bd958f383627f53ec795fd77ef818827e16691689151bf17c.json
new file mode 100644
index 000000000..b26cd985e
--- /dev/null
+++ b/tests/integration/responses/recordings/430a49246c97c29bd958f383627f53ec795fd77ef818827e16691689151bf17c.json
@@ -0,0 +1,413 @@
+{
+ "test_id": "tests/integration/responses/test_tool_responses.py::test_response_non_streaming_multi_turn_tool_execution[openai_client-txt=openai/gpt-4o-user_file_access_check]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "I need to check if user 'alice' can access the file 'document.txt'. First, get alice's user ID, then check if that user ID can access the file 'document.txt'. Do this as a series of steps, where each step is a separate message. Return only one tool call per step. Summarize the final result with a single 'yes' or 'no' response."
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_id",
+ "description": "\n Get the user ID for a given username. This ID is needed for other operations.\n\n :param username: The username to look up\n :return: The user ID for the username\n ",
+ "parameters": {
+ "properties": {
+ "username": {
+ "title": "Username",
+ "type": "string"
+ }
+ },
+ "required": [
+ "username"
+ ],
+ "title": "get_user_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_permissions",
+ "description": "\n Get the permissions for a user ID. Requires a valid user ID from get_user_id.\n\n :param user_id: The user ID to check permissions for\n :return: The permissions for the user\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id"
+ ],
+ "title": "get_user_permissionsArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "check_file_access",
+ "description": "\n Check if a user can access a specific file. Requires a valid user ID.\n\n :param user_id: The user ID to check access for\n :param filename: The filename to check access to\n :return: Whether the user can access the file (yes/no)\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ },
+ "filename": {
+ "title": "Filename",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id",
+ "filename"
+ ],
+ "title": "check_file_accessArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_id",
+ "description": "\n Get the experiment ID for a given experiment name. This ID is needed to get results.\n\n :param experiment_name: The name of the experiment\n :return: The experiment ID\n ",
+ "parameters": {
+ "properties": {
+ "experiment_name": {
+ "title": "Experiment Name",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_name"
+ ],
+ "title": "get_experiment_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_results",
+ "description": "\n Get the results for an experiment ID. Requires a valid experiment ID from get_experiment_id.\n\n :param experiment_id: The experiment ID to get results for\n :return: The experiment results\n ",
+ "parameters": {
+ "properties": {
+ "experiment_id": {
+ "title": "Experiment Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_id"
+ ],
+ "title": "get_experiment_resultsArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-430a49246c97",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_EsVvmBUqtJb42kNkYnK19QkJ",
+ "function": {
+ "arguments": "",
+ "name": "get_user_id"
+ },
+ "type": "function"
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "Ma7aiZxSs"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-430a49246c97",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "{\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "DXu"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-430a49246c97",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "username",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "rtfrl7gxu80vmN"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-430a49246c97",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\":\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "r"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-430a49246c97",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "alice",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "M"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-430a49246c97",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\"}",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "vSu"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-430a49246c97",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "tool_calls",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "sXfh"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-430a49246c97",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": {
+ "completion_tokens": 15,
+ "prompt_tokens": 454,
+ "total_tokens": 469,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "bEe7hWJ6U62YQ"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/463ab0e2f2914026cfa3c742259c43af318468eb4ef84fd4008ebb40824b7e86.json b/tests/integration/responses/recordings/463ab0e2f2914026cfa3c742259c43af318468eb4ef84fd4008ebb40824b7e86.json
new file mode 100644
index 000000000..3bec72d95
--- /dev/null
+++ b/tests/integration/responses/recordings/463ab0e2f2914026cfa3c742259c43af318468eb4ef84fd4008ebb40824b7e86.json
@@ -0,0 +1,593 @@
+{
+ "test_id": "tests/integration/responses/test_tool_responses.py::test_max_tool_calls_with_function_tools[openai_client-txt=openai/gpt-4o]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "Can you tell me the weather in Paris and the current time?"
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "type": "function",
+ "name": "get_weather",
+ "description": "Get weather information for a specified location",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "location": {
+ "type": "string",
+ "description": "The city name (e.g., 'New York', 'London')"
+ }
+ }
+ },
+ "strict": null
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "type": "function",
+ "name": "get_time",
+ "description": "Get current time for a specified location",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "location": {
+ "type": "string",
+ "description": "The city name (e.g., 'New York', 'London')"
+ }
+ }
+ },
+ "strict": null
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-463ab0e2f291",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "QmTXstGvpa8"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-463ab0e2f291",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_HJMoLtHXfCzhlMQOfqIKt0n3",
+ "function": {
+ "arguments": "",
+ "name": "get_weather"
+ },
+ "type": "function"
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "iFjmkK23KL"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-463ab0e2f291",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "{\"lo",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "7"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-463ab0e2f291",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "catio",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "L"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-463ab0e2f291",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "n\": \"P",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "THa6gWbrWhVmZ6"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-463ab0e2f291",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "aris",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "eL"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-463ab0e2f291",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\"}",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "jng"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-463ab0e2f291",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": "call_vGKvTKZM7aALMaUw3Jas7lRg",
+ "function": {
+ "arguments": "",
+ "name": "get_time"
+ },
+ "type": "function"
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "LSailgMcgSl54"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-463ab0e2f291",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": null,
+ "function": {
+ "arguments": "{\"lo",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "z"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-463ab0e2f291",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": null,
+ "function": {
+ "arguments": "catio",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "4"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-463ab0e2f291",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": null,
+ "function": {
+ "arguments": "n\": \"P",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "0engr6vRvqXTEP"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-463ab0e2f291",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": null,
+ "function": {
+ "arguments": "aris",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "Pe"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-463ab0e2f291",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": null,
+ "function": {
+ "arguments": "\"}",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "LU9"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-463ab0e2f291",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "tool_calls",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "kD7d"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-463ab0e2f291",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": {
+ "completion_tokens": 44,
+ "prompt_tokens": 110,
+ "total_tokens": 154,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "R4ICoxqTqj7ZY"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/51e3ddbc9d23c614ead9a8fd6ad30294237eb43063c00efc83b8a1202c1cc20c.json b/tests/integration/responses/recordings/51e3ddbc9d23c614ead9a8fd6ad30294237eb43063c00efc83b8a1202c1cc20c.json
new file mode 100644
index 000000000..464de788f
--- /dev/null
+++ b/tests/integration/responses/recordings/51e3ddbc9d23c614ead9a8fd6ad30294237eb43063c00efc83b8a1202c1cc20c.json
@@ -0,0 +1,614 @@
+{
+ "test_id": "tests/integration/responses/test_mcp_authentication.py::test_mcp_authorization_backward_compatibility[openai_client-txt=openai/gpt-4o]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "What is the boiling point of myawesomeliquid?"
+ },
+ {
+ "role": "assistant",
+ "content": "",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_UeAsx9M8mAXo1F1LZj6TsEV9",
+ "type": "function",
+ "function": {
+ "name": "get_boiling_point",
+ "arguments": "{\"liquid_name\":\"myawesomeliquid\"}"
+ }
+ }
+ ]
+ },
+ {
+ "role": "tool",
+ "tool_call_id": "call_UeAsx9M8mAXo1F1LZj6TsEV9",
+ "content": [
+ {
+ "type": "text",
+ "text": "-100"
+ }
+ ]
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "greet_everyone",
+ "parameters": {
+ "properties": {
+ "url": {
+ "title": "Url",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "title": "greet_everyoneArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_boiling_point",
+ "description": "\n Returns the boiling point of a liquid in Celsius or Fahrenheit.\n\n :param liquid_name: The name of the liquid\n :param celsius: Whether to return the boiling point in Celsius\n :return: The boiling point of the liquid in Celcius or Fahrenheit\n ",
+ "parameters": {
+ "properties": {
+ "liquid_name": {
+ "title": "Liquid Name",
+ "type": "string"
+ },
+ "celsius": {
+ "default": true,
+ "title": "Celsius",
+ "type": "boolean"
+ }
+ },
+ "required": [
+ "liquid_name"
+ ],
+ "title": "get_boiling_pointArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-51e3ddbc9d23",
+ "choices": [
+ {
+ "delta": {
+ "content": "",
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "c5g42LQpiBwmVH"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-51e3ddbc9d23",
+ "choices": [
+ {
+ "delta": {
+ "content": "The",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "MEmQFjCKEsNDL"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-51e3ddbc9d23",
+ "choices": [
+ {
+ "delta": {
+ "content": " boiling",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "dF3UemYO"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-51e3ddbc9d23",
+ "choices": [
+ {
+ "delta": {
+ "content": " point",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "ENDOmjG37D"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-51e3ddbc9d23",
+ "choices": [
+ {
+ "delta": {
+ "content": " of",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "6kb5u2d4ILV59"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-51e3ddbc9d23",
+ "choices": [
+ {
+ "delta": {
+ "content": " \"",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "Y6Dp6rbT9OdBG"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-51e3ddbc9d23",
+ "choices": [
+ {
+ "delta": {
+ "content": "my",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "EN0ShAkdxF2jIs"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-51e3ddbc9d23",
+ "choices": [
+ {
+ "delta": {
+ "content": "aw",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "1NHavCOT2fSI63"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-51e3ddbc9d23",
+ "choices": [
+ {
+ "delta": {
+ "content": "esom",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "VTwbnRFtKY2W"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-51e3ddbc9d23",
+ "choices": [
+ {
+ "delta": {
+ "content": "eli",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "VJuNhLeGK43e6"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-51e3ddbc9d23",
+ "choices": [
+ {
+ "delta": {
+ "content": "quid",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "bFgxcYCjU42I"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-51e3ddbc9d23",
+ "choices": [
+ {
+ "delta": {
+ "content": "\"",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "5KR4mGTP0Rpu0O"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-51e3ddbc9d23",
+ "choices": [
+ {
+ "delta": {
+ "content": " is",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "KCeY3i4Qo9L1j"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-51e3ddbc9d23",
+ "choices": [
+ {
+ "delta": {
+ "content": " -",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "GgtT2kqCUk8jGH"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-51e3ddbc9d23",
+ "choices": [
+ {
+ "delta": {
+ "content": "100",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "H3E18AkuuATh3"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-51e3ddbc9d23",
+ "choices": [
+ {
+ "delta": {
+ "content": "\u00b0C",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "5kuUoomGw6aPf0"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-51e3ddbc9d23",
+ "choices": [
+ {
+ "delta": {
+ "content": ".",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "CKIiDxWMV3zzcNj"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-51e3ddbc9d23",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "stop",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "9KZoS4rawE"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-51e3ddbc9d23",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": {
+ "completion_tokens": 17,
+ "prompt_tokens": 188,
+ "total_tokens": 205,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "iq2ecCxqopvPO"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/5236eb1d546e5a1bd0712891d8b4866a73cc04ce93db40346beb070f30fafee1.json b/tests/integration/responses/recordings/5236eb1d546e5a1bd0712891d8b4866a73cc04ce93db40346beb070f30fafee1.json
new file mode 100644
index 000000000..66c87e3bb
--- /dev/null
+++ b/tests/integration/responses/recordings/5236eb1d546e5a1bd0712891d8b4866a73cc04ce93db40346beb070f30fafee1.json
@@ -0,0 +1,614 @@
+{
+ "test_id": "tests/integration/responses/test_mcp_authentication.py::test_mcp_authorization_bearer[client_with_models-txt=openai/gpt-4o]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "What is the boiling point of myawesomeliquid?"
+ },
+ {
+ "role": "assistant",
+ "content": "",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_mitVYvmPaFfoSmKjzKo5xmZp",
+ "type": "function",
+ "function": {
+ "name": "get_boiling_point",
+ "arguments": "{\"liquid_name\":\"myawesomeliquid\"}"
+ }
+ }
+ ]
+ },
+ {
+ "role": "tool",
+ "tool_call_id": "call_mitVYvmPaFfoSmKjzKo5xmZp",
+ "content": [
+ {
+ "type": "text",
+ "text": "-100"
+ }
+ ]
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "greet_everyone",
+ "parameters": {
+ "properties": {
+ "url": {
+ "title": "Url",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "title": "greet_everyoneArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_boiling_point",
+ "description": "\n Returns the boiling point of a liquid in Celsius or Fahrenheit.\n\n :param liquid_name: The name of the liquid\n :param celsius: Whether to return the boiling point in Celsius\n :return: The boiling point of the liquid in Celcius or Fahrenheit\n ",
+ "parameters": {
+ "properties": {
+ "liquid_name": {
+ "title": "Liquid Name",
+ "type": "string"
+ },
+ "celsius": {
+ "default": true,
+ "title": "Celsius",
+ "type": "boolean"
+ }
+ },
+ "required": [
+ "liquid_name"
+ ],
+ "title": "get_boiling_pointArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-5236eb1d546e",
+ "choices": [
+ {
+ "delta": {
+ "content": "",
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "veiGKPHTdRNcOX"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-5236eb1d546e",
+ "choices": [
+ {
+ "delta": {
+ "content": "The",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "u9RK8eZYDguJs"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-5236eb1d546e",
+ "choices": [
+ {
+ "delta": {
+ "content": " boiling",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "U0L1RjHF"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-5236eb1d546e",
+ "choices": [
+ {
+ "delta": {
+ "content": " point",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "TMS6QVLJfj"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-5236eb1d546e",
+ "choices": [
+ {
+ "delta": {
+ "content": " of",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "5zokjwZ0nBNlD"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-5236eb1d546e",
+ "choices": [
+ {
+ "delta": {
+ "content": " \"",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "CmOp3DQRu0AqZ"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-5236eb1d546e",
+ "choices": [
+ {
+ "delta": {
+ "content": "my",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "OlnZU0jlGyE2mD"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-5236eb1d546e",
+ "choices": [
+ {
+ "delta": {
+ "content": "aw",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "PGCsCfw8zUqRAj"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-5236eb1d546e",
+ "choices": [
+ {
+ "delta": {
+ "content": "esom",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "8P65fJ4x3QVF"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-5236eb1d546e",
+ "choices": [
+ {
+ "delta": {
+ "content": "eli",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "HVTNGb62o54Ol"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-5236eb1d546e",
+ "choices": [
+ {
+ "delta": {
+ "content": "quid",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "bdRgQioKQZM6"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-5236eb1d546e",
+ "choices": [
+ {
+ "delta": {
+ "content": "\"",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "5djjyePEzwsPID"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-5236eb1d546e",
+ "choices": [
+ {
+ "delta": {
+ "content": " is",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "xoN3TaCEum6A9"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-5236eb1d546e",
+ "choices": [
+ {
+ "delta": {
+ "content": " -",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "UmU8LCL6WJIDrf"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-5236eb1d546e",
+ "choices": [
+ {
+ "delta": {
+ "content": "100",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "FFXxvyme7JKyc"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-5236eb1d546e",
+ "choices": [
+ {
+ "delta": {
+ "content": "\u00b0C",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "8BpDPmgFmIBJQQ"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-5236eb1d546e",
+ "choices": [
+ {
+ "delta": {
+ "content": ".",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "Mey7rwshfBQbVlP"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-5236eb1d546e",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "stop",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "IXaz4vn8As"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-5236eb1d546e",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": {
+ "completion_tokens": 17,
+ "prompt_tokens": 188,
+ "total_tokens": 205,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "9ebnd6bFXcdOY"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/52a2b96781961e252aa3a7b0a5ff77eb5d0989d312e929ed59dda07738487d09.json b/tests/integration/responses/recordings/52a2b96781961e252aa3a7b0a5ff77eb5d0989d312e929ed59dda07738487d09.json
new file mode 100644
index 000000000..fef5f0a62
--- /dev/null
+++ b/tests/integration/responses/recordings/52a2b96781961e252aa3a7b0a5ff77eb5d0989d312e929ed59dda07738487d09.json
@@ -0,0 +1,586 @@
+{
+ "test_id": "tests/integration/responses/test_tool_responses.py::test_response_streaming_multi_turn_tool_execution[openai_client-txt=openai/gpt-4o-user_permissions_workflow]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "Help me with this security check: First, get the user ID for 'charlie', then get the permissions for that user ID, and finally check if that user can access 'secret_file.txt'. Stream your progress as you work through each step. Return only one tool call per step. Summarize the final result with a single 'yes' or 'no' response."
+ },
+ {
+ "role": "assistant",
+ "content": "",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_fsxGbKmceUbLSXCe4sx9WLXO",
+ "type": "function",
+ "function": {
+ "name": "get_user_id",
+ "arguments": "{\"username\":\"charlie\"}"
+ }
+ }
+ ]
+ },
+ {
+ "role": "tool",
+ "tool_call_id": "call_fsxGbKmceUbLSXCe4sx9WLXO",
+ "content": [
+ {
+ "type": "text",
+ "text": "user_11111"
+ }
+ ]
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_id",
+ "description": "\n Get the user ID for a given username. This ID is needed for other operations.\n\n :param username: The username to look up\n :return: The user ID for the username\n ",
+ "parameters": {
+ "properties": {
+ "username": {
+ "title": "Username",
+ "type": "string"
+ }
+ },
+ "required": [
+ "username"
+ ],
+ "title": "get_user_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_permissions",
+ "description": "\n Get the permissions for a user ID. Requires a valid user ID from get_user_id.\n\n :param user_id: The user ID to check permissions for\n :return: The permissions for the user\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id"
+ ],
+ "title": "get_user_permissionsArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "check_file_access",
+ "description": "\n Check if a user can access a specific file. Requires a valid user ID.\n\n :param user_id: The user ID to check access for\n :param filename: The filename to check access to\n :return: Whether the user can access the file (yes/no)\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ },
+ "filename": {
+ "title": "Filename",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id",
+ "filename"
+ ],
+ "title": "check_file_accessArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_id",
+ "description": "\n Get the experiment ID for a given experiment name. This ID is needed to get results.\n\n :param experiment_name: The name of the experiment\n :return: The experiment ID\n ",
+ "parameters": {
+ "properties": {
+ "experiment_name": {
+ "title": "Experiment Name",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_name"
+ ],
+ "title": "get_experiment_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_results",
+ "description": "\n Get the results for an experiment ID. Requires a valid experiment ID from get_experiment_id.\n\n :param experiment_id: The experiment ID to get results for\n :return: The experiment results\n ",
+ "parameters": {
+ "properties": {
+ "experiment_id": {
+ "title": "Experiment Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_id"
+ ],
+ "title": "get_experiment_resultsArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-52a2b9678196",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_moRBxqnBJ48EWTSEoQ1llgib",
+ "function": {
+ "arguments": "",
+ "name": "get_user_permissions"
+ },
+ "type": "function"
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": ""
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-52a2b9678196",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "{\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "00p"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-52a2b9678196",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "user",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "Y0"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-52a2b9678196",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "_id",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "i2I"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-52a2b9678196",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\":\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "P"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-52a2b9678196",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "user",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "IG"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-52a2b9678196",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "_",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "QY61l"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-52a2b9678196",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "111",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "YAZ"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-52a2b9678196",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "11",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "Nw7U"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-52a2b9678196",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\"}",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "Ev7"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-52a2b9678196",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "tool_calls",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "CSaD"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-52a2b9678196",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": {
+ "completion_tokens": 19,
+ "prompt_tokens": 478,
+ "total_tokens": 497,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "kMNEyeKFT75vK"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/541b5db7789e61d2400b70bd41c2ff7145784d249c3216c34299c38c28118328.json b/tests/integration/responses/recordings/541b5db7789e61d2400b70bd41c2ff7145784d249c3216c34299c38c28118328.json
new file mode 100644
index 000000000..6b7e5bc49
--- /dev/null
+++ b/tests/integration/responses/recordings/541b5db7789e61d2400b70bd41c2ff7145784d249c3216c34299c38c28118328.json
@@ -0,0 +1,524 @@
+{
+ "test_id": "tests/integration/responses/test_tool_responses.py::test_response_non_streaming_multi_turn_tool_execution[openai_client-txt=openai/gpt-4o-experiment_results_lookup]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "I need to get the results for the 'boiling_point' experiment. First, get the experiment ID for 'boiling_point', then use that ID to get the experiment results. Tell me the boiling point in Celsius."
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_id",
+ "description": "\n Get the user ID for a given username. This ID is needed for other operations.\n\n :param username: The username to look up\n :return: The user ID for the username\n ",
+ "parameters": {
+ "properties": {
+ "username": {
+ "title": "Username",
+ "type": "string"
+ }
+ },
+ "required": [
+ "username"
+ ],
+ "title": "get_user_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_permissions",
+ "description": "\n Get the permissions for a user ID. Requires a valid user ID from get_user_id.\n\n :param user_id: The user ID to check permissions for\n :return: The permissions for the user\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id"
+ ],
+ "title": "get_user_permissionsArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "check_file_access",
+ "description": "\n Check if a user can access a specific file. Requires a valid user ID.\n\n :param user_id: The user ID to check access for\n :param filename: The filename to check access to\n :return: Whether the user can access the file (yes/no)\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ },
+ "filename": {
+ "title": "Filename",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id",
+ "filename"
+ ],
+ "title": "check_file_accessArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_id",
+ "description": "\n Get the experiment ID for a given experiment name. This ID is needed to get results.\n\n :param experiment_name: The name of the experiment\n :return: The experiment ID\n ",
+ "parameters": {
+ "properties": {
+ "experiment_name": {
+ "title": "Experiment Name",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_name"
+ ],
+ "title": "get_experiment_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_results",
+ "description": "\n Get the results for an experiment ID. Requires a valid experiment ID from get_experiment_id.\n\n :param experiment_id: The experiment ID to get results for\n :return: The experiment results\n ",
+ "parameters": {
+ "properties": {
+ "experiment_id": {
+ "title": "Experiment Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_id"
+ ],
+ "title": "get_experiment_resultsArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-541b5db7789e",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_dZwjBxH3aTRhnaS0bJVPqRcz",
+ "function": {
+ "arguments": "",
+ "name": "get_experiment_id"
+ },
+ "type": "function"
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "W3B"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-541b5db7789e",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "{\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "L7n"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-541b5db7789e",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "experiment",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "lXUc0FKJkRea"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-541b5db7789e",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "_name",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "0"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-541b5db7789e",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\":\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "D"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-541b5db7789e",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "bo",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "3dUQ"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-541b5db7789e",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "iling",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "1"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-541b5db7789e",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "_point",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": ""
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-541b5db7789e",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\"}",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "48i"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-541b5db7789e",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "tool_calls",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "eQyU"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-541b5db7789e",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": {
+ "completion_tokens": 19,
+ "prompt_tokens": 418,
+ "total_tokens": 437,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "5tVrc5IEigum8"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/54aa690e31b5c33a0488a5d7403393e5712917253462292829b37b9320d6df82.json b/tests/integration/responses/recordings/54aa690e31b5c33a0488a5d7403393e5712917253462292829b37b9320d6df82.json
index a6c31dc72..a8e1e8611 100644
--- a/tests/integration/responses/recordings/54aa690e31b5c33a0488a5d7403393e5712917253462292829b37b9320d6df82.json
+++ b/tests/integration/responses/recordings/54aa690e31b5c33a0488a5d7403393e5712917253462292829b37b9320d6df82.json
@@ -10,7 +10,7 @@
},
"response": {
"body": {
- "__type__": "llama_stack.apis.tools.tools.ToolInvocationResult",
+ "__type__": "llama_stack_api.tools.ToolInvocationResult",
"__data__": {
"content": "{\"query\": \"Llama 4 Maverick model number of experts\", \"top_k\": [{\"url\": \"https://console.groq.com/docs/model/meta-llama/llama-4-maverick-17b-128e-instruct\", \"title\": \"Llama 4 Maverick 17B 128E\", \"content\": \"Llama 4 Maverick is Meta's natively multimodal model that enables text and image understanding. With a 17 billion parameter mixture-of-experts architecture (128 experts), this model offers industry-leading performance for multimodal tasks like natural assistant-like chat, image recognition, and coding tasks. Llama 4 Maverick features an auto-regressive language model that uses a mixture-of-experts (MoE) architecture with 17B activated parameters (400B total) and incorporates early fusion for native multimodality. The model uses 128 experts to efficiently handle both text and image inputs while maintaining high performance across chat, knowledge, and code generation tasks, with a knowledge cutoff of August 2024. * For multimodal applications, this model supports up to 5 image inputs create( model =\\\"meta-llama/llama-4-maverick-17b-128e-instruct\\\", messages =[ { \\\"role\\\": \\\"user\\\", \\\"content\\\": \\\"Explain why fast inference is critical for reasoning models\\\" } ] ) print(completion.\", \"score\": 0.9287263, \"raw_content\": null}, {\"url\": \"https://huggingface.co/meta-llama/Llama-4-Maverick-17B-128E\", \"title\": \"meta-llama/Llama-4-Maverick-17B-128E\", \"content\": \"... model with 16 experts, and Llama 4 Maverick, a 17 billion parameter model with 128 experts. Model developer: Meta. Model Architecture: The\", \"score\": 0.9183121, \"raw_content\": null}, {\"url\": \"https://build.nvidia.com/meta/llama-4-maverick-17b-128e-instruct/modelcard\", \"title\": \"llama-4-maverick-17b-128e-instruct Model by Meta\", \"content\": \"... model with 16 experts, and Llama 4 Maverick, a 17 billion parameter model with 128 experts. Third-Party Community Consideration. This model\", \"score\": 0.91399205, \"raw_content\": null}, {\"url\": \"https://replicate.com/meta/llama-4-maverick-instruct\", \"title\": \"meta/llama-4-maverick-instruct | Run with an API on ...\", \"content\": \"... model with 16 experts, and Llama 4 Maverick, a 17 billion parameter model with 128 experts. All services are online \\u00b7 Home \\u00b7 About \\u00b7 Changelog\", \"score\": 0.9073207, \"raw_content\": null}, {\"url\": \"https://openrouter.ai/meta-llama/llama-4-maverick\", \"title\": \"Llama 4 Maverick - API, Providers, Stats\", \"content\": \"# Meta: Llama 4 Maverick ### meta-llama/llama-4-maverick Llama 4 Maverick 17B Instruct (128E) is a high-capacity multimodal language model from Meta, built on a mixture-of-experts (MoE) architecture with 128 experts and 17 billion active parameters per forward pass (400B total). Released on April 5, 2025 under the Llama 4 Community License, Maverick is suited for research and commercial applications requiring advanced multimodal understanding and high model throughput. Llama 4 Maverick - API, Providers, Stats | OpenRouter ## Providers for Llama 4 Maverick ## Performance for Llama 4 Maverick ## Apps using Llama 4 Maverick ## Recent activity on Llama 4 Maverick ## Uptime stats for Llama 4 Maverick ## Sample code and API for Llama 4 Maverick\", \"score\": 0.8958969, \"raw_content\": null}]}",
"error_message": null,
diff --git a/tests/integration/responses/recordings/56ddb450d81590f461113ec5a55d0532e8f5b9418b22e5f874afff695601da16.json b/tests/integration/responses/recordings/56ddb450d81590f461113ec5a55d0532e8f5b9418b22e5f874afff695601da16.json
new file mode 100644
index 000000000..bacefe818
--- /dev/null
+++ b/tests/integration/responses/recordings/56ddb450d81590f461113ec5a55d0532e8f5b9418b22e5f874afff695601da16.json
@@ -0,0 +1,574 @@
+{
+ "test_id": "tests/integration/responses/test_mcp_authentication.py::test_mcp_authorization_backward_compatibility[openai_client-txt=openai/gpt-4o]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "What is the boiling point of myawesomeliquid?"
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "greet_everyone",
+ "parameters": {
+ "properties": {
+ "url": {
+ "title": "Url",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "title": "greet_everyoneArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_boiling_point",
+ "description": "\n Returns the boiling point of a liquid in Celsius or Fahrenheit.\n\n :param liquid_name: The name of the liquid\n :param celsius: Whether to return the boiling point in Celsius\n :return: The boiling point of the liquid in Celcius or Fahrenheit\n ",
+ "parameters": {
+ "properties": {
+ "liquid_name": {
+ "title": "Liquid Name",
+ "type": "string"
+ },
+ "celsius": {
+ "default": true,
+ "title": "Celsius",
+ "type": "boolean"
+ }
+ },
+ "required": [
+ "liquid_name"
+ ],
+ "title": "get_boiling_pointArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-56ddb450d815",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_UeAsx9M8mAXo1F1LZj6TsEV9",
+ "function": {
+ "arguments": "",
+ "name": "get_boiling_point"
+ },
+ "type": "function"
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "bKe"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-56ddb450d815",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "{\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "kxw"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-56ddb450d815",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "li",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "cKkF"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-56ddb450d815",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "quid",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "md"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-56ddb450d815",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "_name",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "O"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-56ddb450d815",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\":\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "o"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-56ddb450d815",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "my",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "nRfv"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-56ddb450d815",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "aw",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "1M8i"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-56ddb450d815",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "esom",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "7q"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-56ddb450d815",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "eli",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "R2Q"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-56ddb450d815",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "quid",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "lB"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-56ddb450d815",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\"}",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "MDi"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-56ddb450d815",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "tool_calls",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "7KwE"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-56ddb450d815",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": {
+ "completion_tokens": 22,
+ "prompt_tokens": 154,
+ "total_tokens": 176,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "9IipvPESur5Y7"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/59faeeca84b137e9b2c7d310ea47dc01025aeb2ee6203ef478133313e0a0e250.json b/tests/integration/responses/recordings/59faeeca84b137e9b2c7d310ea47dc01025aeb2ee6203ef478133313e0a0e250.json
new file mode 100644
index 000000000..7ab319fb8
--- /dev/null
+++ b/tests/integration/responses/recordings/59faeeca84b137e9b2c7d310ea47dc01025aeb2ee6203ef478133313e0a0e250.json
@@ -0,0 +1,614 @@
+{
+ "test_id": "tests/integration/responses/test_mcp_authentication.py::test_mcp_authorization_bearer[openai_client-txt=openai/gpt-4o]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "What is the boiling point of myawesomeliquid?"
+ },
+ {
+ "role": "assistant",
+ "content": "",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_2lYntxgdJV66JFvD6OuICQCB",
+ "type": "function",
+ "function": {
+ "name": "get_boiling_point",
+ "arguments": "{\"liquid_name\":\"myawesomeliquid\"}"
+ }
+ }
+ ]
+ },
+ {
+ "role": "tool",
+ "tool_call_id": "call_2lYntxgdJV66JFvD6OuICQCB",
+ "content": [
+ {
+ "type": "text",
+ "text": "-100"
+ }
+ ]
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "greet_everyone",
+ "parameters": {
+ "properties": {
+ "url": {
+ "title": "Url",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "title": "greet_everyoneArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_boiling_point",
+ "description": "\n Returns the boiling point of a liquid in Celsius or Fahrenheit.\n\n :param liquid_name: The name of the liquid\n :param celsius: Whether to return the boiling point in Celsius\n :return: The boiling point of the liquid in Celcius or Fahrenheit\n ",
+ "parameters": {
+ "properties": {
+ "liquid_name": {
+ "title": "Liquid Name",
+ "type": "string"
+ },
+ "celsius": {
+ "default": true,
+ "title": "Celsius",
+ "type": "boolean"
+ }
+ },
+ "required": [
+ "liquid_name"
+ ],
+ "title": "get_boiling_pointArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-59faeeca84b1",
+ "choices": [
+ {
+ "delta": {
+ "content": "",
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "BNpFmbWkpYEjZX"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-59faeeca84b1",
+ "choices": [
+ {
+ "delta": {
+ "content": "The",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "HdnyHcq2CLvjn"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-59faeeca84b1",
+ "choices": [
+ {
+ "delta": {
+ "content": " boiling",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "gOMuwgrp"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-59faeeca84b1",
+ "choices": [
+ {
+ "delta": {
+ "content": " point",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "OTfqq7Yggw"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-59faeeca84b1",
+ "choices": [
+ {
+ "delta": {
+ "content": " of",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "cwJMhZJyf5PIp"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-59faeeca84b1",
+ "choices": [
+ {
+ "delta": {
+ "content": " \"",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "54NR7IGiuBTw5"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-59faeeca84b1",
+ "choices": [
+ {
+ "delta": {
+ "content": "my",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "q1x9cVVPTflQti"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-59faeeca84b1",
+ "choices": [
+ {
+ "delta": {
+ "content": "aw",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "vcudLe3yaadkvB"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-59faeeca84b1",
+ "choices": [
+ {
+ "delta": {
+ "content": "esom",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "uql1pBt4elRL"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-59faeeca84b1",
+ "choices": [
+ {
+ "delta": {
+ "content": "eli",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "M2kzUEkJctjYp"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-59faeeca84b1",
+ "choices": [
+ {
+ "delta": {
+ "content": "quid",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "Waet2ux2zs9P"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-59faeeca84b1",
+ "choices": [
+ {
+ "delta": {
+ "content": "\"",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "KjbjxdGYUZDuiI"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-59faeeca84b1",
+ "choices": [
+ {
+ "delta": {
+ "content": " is",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "Fg8IXJhJv8iAI"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-59faeeca84b1",
+ "choices": [
+ {
+ "delta": {
+ "content": " -",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "wiAqPLAoinVhQq"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-59faeeca84b1",
+ "choices": [
+ {
+ "delta": {
+ "content": "100",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "vJnb9sE969jph"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-59faeeca84b1",
+ "choices": [
+ {
+ "delta": {
+ "content": "\u00b0C",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "5Hgi5CU0aV0sPw"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-59faeeca84b1",
+ "choices": [
+ {
+ "delta": {
+ "content": ".",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "RDfKhuQo4E4TLXU"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-59faeeca84b1",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "stop",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "oN1EYVkDbW"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-59faeeca84b1",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": {
+ "completion_tokens": 17,
+ "prompt_tokens": 188,
+ "total_tokens": 205,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "OfhOTT3VdJ2s7"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/6a05cad89f138e215047fd44d21803c4a397f772ad8b1cb90ec44527ce964a45.json b/tests/integration/responses/recordings/6a05cad89f138e215047fd44d21803c4a397f772ad8b1cb90ec44527ce964a45.json
new file mode 100644
index 000000000..adae894b3
--- /dev/null
+++ b/tests/integration/responses/recordings/6a05cad89f138e215047fd44d21803c4a397f772ad8b1cb90ec44527ce964a45.json
@@ -0,0 +1,614 @@
+{
+ "test_id": "tests/integration/responses/test_tool_responses.py::test_response_non_streaming_mcp_tool[openai_client-txt=openai/gpt-4o-boiling_point_tool]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "What is the boiling point of myawesomeliquid in Celsius?"
+ },
+ {
+ "role": "assistant",
+ "content": "",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_8kf8fNIDcWOelbCmUEcretON",
+ "type": "function",
+ "function": {
+ "name": "get_boiling_point",
+ "arguments": "{\"liquid_name\":\"myawesomeliquid\",\"celsius\":true}"
+ }
+ }
+ ]
+ },
+ {
+ "role": "tool",
+ "tool_call_id": "call_8kf8fNIDcWOelbCmUEcretON",
+ "content": [
+ {
+ "type": "text",
+ "text": "-100"
+ }
+ ]
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "greet_everyone",
+ "parameters": {
+ "properties": {
+ "url": {
+ "title": "Url",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "title": "greet_everyoneArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_boiling_point",
+ "description": "\n Returns the boiling point of a liquid in Celsius or Fahrenheit.\n\n :param liquid_name: The name of the liquid\n :param celsius: Whether to return the boiling point in Celsius\n :return: The boiling point of the liquid in Celcius or Fahrenheit\n ",
+ "parameters": {
+ "properties": {
+ "liquid_name": {
+ "title": "Liquid Name",
+ "type": "string"
+ },
+ "celsius": {
+ "default": true,
+ "title": "Celsius",
+ "type": "boolean"
+ }
+ },
+ "required": [
+ "liquid_name"
+ ],
+ "title": "get_boiling_pointArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-6a05cad89f13",
+ "choices": [
+ {
+ "delta": {
+ "content": "",
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "QvigjcdULEdran"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-6a05cad89f13",
+ "choices": [
+ {
+ "delta": {
+ "content": "The",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "sIHyVud88f1Ri"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-6a05cad89f13",
+ "choices": [
+ {
+ "delta": {
+ "content": " boiling",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "L46IcJeM"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-6a05cad89f13",
+ "choices": [
+ {
+ "delta": {
+ "content": " point",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "j0afpRCRBL"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-6a05cad89f13",
+ "choices": [
+ {
+ "delta": {
+ "content": " of",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "tuzBzZB7jURPj"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-6a05cad89f13",
+ "choices": [
+ {
+ "delta": {
+ "content": " \"",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "iq6vUNVBRuRH5"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-6a05cad89f13",
+ "choices": [
+ {
+ "delta": {
+ "content": "my",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "Nkkz9uUPfhHdqZ"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-6a05cad89f13",
+ "choices": [
+ {
+ "delta": {
+ "content": "aw",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "oR3PEQpsXLwYOJ"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-6a05cad89f13",
+ "choices": [
+ {
+ "delta": {
+ "content": "esom",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "VBFf1ewix1rj"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-6a05cad89f13",
+ "choices": [
+ {
+ "delta": {
+ "content": "eli",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "yEx3rYoaZjsTw"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-6a05cad89f13",
+ "choices": [
+ {
+ "delta": {
+ "content": "quid",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "I6VR8wzPmnpa"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-6a05cad89f13",
+ "choices": [
+ {
+ "delta": {
+ "content": "\"",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "xld69F07KIb2Yc"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-6a05cad89f13",
+ "choices": [
+ {
+ "delta": {
+ "content": " is",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "GKgtQZJiWLVKj"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-6a05cad89f13",
+ "choices": [
+ {
+ "delta": {
+ "content": " -",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "1by4tgiJqNgaI1"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-6a05cad89f13",
+ "choices": [
+ {
+ "delta": {
+ "content": "100",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "2RdP6HDQApUpN"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-6a05cad89f13",
+ "choices": [
+ {
+ "delta": {
+ "content": "\u00b0C",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "21ABialEpJBCcX"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-6a05cad89f13",
+ "choices": [
+ {
+ "delta": {
+ "content": ".",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "uoaaRgmiGLD815k"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-6a05cad89f13",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "stop",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "QKEKTjUUam"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-6a05cad89f13",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": {
+ "completion_tokens": 17,
+ "prompt_tokens": 195,
+ "total_tokens": 212,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "ceWQr6uzZRuj3"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/6d7f54b7be4845c31ae64498e8018a218bb7f4b8363998abc34ec9bb7ba3a03d.json b/tests/integration/responses/recordings/6d7f54b7be4845c31ae64498e8018a218bb7f4b8363998abc34ec9bb7ba3a03d.json
new file mode 100644
index 000000000..997e18bec
--- /dev/null
+++ b/tests/integration/responses/recordings/6d7f54b7be4845c31ae64498e8018a218bb7f4b8363998abc34ec9bb7ba3a03d.json
@@ -0,0 +1,574 @@
+{
+ "test_id": "tests/integration/responses/test_tool_responses.py::test_response_mcp_tool_approval[openai_client-txt=openai/gpt-4o-False-boiling_point_tool]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "What is the boiling point of myawesomeliquid in Celsius?"
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "greet_everyone",
+ "parameters": {
+ "properties": {
+ "url": {
+ "title": "Url",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "title": "greet_everyoneArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_boiling_point",
+ "description": "\n Returns the boiling point of a liquid in Celsius or Fahrenheit.\n\n :param liquid_name: The name of the liquid\n :param celsius: Whether to return the boiling point in Celsius\n :return: The boiling point of the liquid in Celcius or Fahrenheit\n ",
+ "parameters": {
+ "properties": {
+ "liquid_name": {
+ "title": "Liquid Name",
+ "type": "string"
+ },
+ "celsius": {
+ "default": true,
+ "title": "Celsius",
+ "type": "boolean"
+ }
+ },
+ "required": [
+ "liquid_name"
+ ],
+ "title": "get_boiling_pointArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-6d7f54b7be48",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_4ldOwO71od1E0lrdgYQCoe2e",
+ "function": {
+ "arguments": "",
+ "name": "get_boiling_point"
+ },
+ "type": "function"
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "TdV"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-6d7f54b7be48",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "{\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "L5f"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-6d7f54b7be48",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "li",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "qo3z"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-6d7f54b7be48",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "quid",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "i3"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-6d7f54b7be48",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "_name",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "Z"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-6d7f54b7be48",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\":\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "Z"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-6d7f54b7be48",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "my",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "QdX5"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-6d7f54b7be48",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "aw",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "sJYi"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-6d7f54b7be48",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "esom",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "Yk"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-6d7f54b7be48",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "eli",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "pnS"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-6d7f54b7be48",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "quid",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "y5"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-6d7f54b7be48",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\"}",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "Tjs"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-6d7f54b7be48",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "tool_calls",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "Cx0I"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-6d7f54b7be48",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": {
+ "completion_tokens": 22,
+ "prompt_tokens": 156,
+ "total_tokens": 178,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "bmRrd4XLuhmCv"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/73c9287059db75cd80dc56cff905fe3ff21e6c39189ab93778335439f288158f.json b/tests/integration/responses/recordings/73c9287059db75cd80dc56cff905fe3ff21e6c39189ab93778335439f288158f.json
new file mode 100644
index 000000000..53f1a8125
--- /dev/null
+++ b/tests/integration/responses/recordings/73c9287059db75cd80dc56cff905fe3ff21e6c39189ab93778335439f288158f.json
@@ -0,0 +1,771 @@
+{
+ "test_id": "tests/integration/responses/test_tool_responses.py::test_response_non_streaming_multi_turn_tool_execution[openai_client-txt=openai/gpt-4o-user_file_access_check]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "I need to check if user 'alice' can access the file 'document.txt'. First, get alice's user ID, then check if that user ID can access the file 'document.txt'. Do this as a series of steps, where each step is a separate message. Return only one tool call per step. Summarize the final result with a single 'yes' or 'no' response."
+ },
+ {
+ "role": "assistant",
+ "content": "",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_EsVvmBUqtJb42kNkYnK19QkJ",
+ "type": "function",
+ "function": {
+ "name": "get_user_id",
+ "arguments": "{\"username\":\"alice\"}"
+ }
+ }
+ ]
+ },
+ {
+ "role": "tool",
+ "tool_call_id": "call_EsVvmBUqtJb42kNkYnK19QkJ",
+ "content": [
+ {
+ "type": "text",
+ "text": "user_12345"
+ }
+ ]
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_id",
+ "description": "\n Get the user ID for a given username. This ID is needed for other operations.\n\n :param username: The username to look up\n :return: The user ID for the username\n ",
+ "parameters": {
+ "properties": {
+ "username": {
+ "title": "Username",
+ "type": "string"
+ }
+ },
+ "required": [
+ "username"
+ ],
+ "title": "get_user_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_permissions",
+ "description": "\n Get the permissions for a user ID. Requires a valid user ID from get_user_id.\n\n :param user_id: The user ID to check permissions for\n :return: The permissions for the user\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id"
+ ],
+ "title": "get_user_permissionsArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "check_file_access",
+ "description": "\n Check if a user can access a specific file. Requires a valid user ID.\n\n :param user_id: The user ID to check access for\n :param filename: The filename to check access to\n :return: Whether the user can access the file (yes/no)\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ },
+ "filename": {
+ "title": "Filename",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id",
+ "filename"
+ ],
+ "title": "check_file_accessArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_id",
+ "description": "\n Get the experiment ID for a given experiment name. This ID is needed to get results.\n\n :param experiment_name: The name of the experiment\n :return: The experiment ID\n ",
+ "parameters": {
+ "properties": {
+ "experiment_name": {
+ "title": "Experiment Name",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_name"
+ ],
+ "title": "get_experiment_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_results",
+ "description": "\n Get the results for an experiment ID. Requires a valid experiment ID from get_experiment_id.\n\n :param experiment_id: The experiment ID to get results for\n :return: The experiment results\n ",
+ "parameters": {
+ "properties": {
+ "experiment_id": {
+ "title": "Experiment Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_id"
+ ],
+ "title": "get_experiment_resultsArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-73c9287059db",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_kCmSE8ORKfQoiEsW2UCYr5Sh",
+ "function": {
+ "arguments": "",
+ "name": "check_file_access"
+ },
+ "type": "function"
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "sCU"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-73c9287059db",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "{\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "iHp"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-73c9287059db",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "user",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "3b"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-73c9287059db",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "_id",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "4hG"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-73c9287059db",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\":\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "z"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-73c9287059db",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "user",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "zX"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-73c9287059db",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "_",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "WRFf5"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-73c9287059db",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "123",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "PvE"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-73c9287059db",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "45",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "xak8"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-73c9287059db",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\",\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "v"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-73c9287059db",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "filename",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "l7Rfy5le49BJu0"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-73c9287059db",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\":\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "p"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-73c9287059db",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "document",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "EpFPZH128OUIsw"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-73c9287059db",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": ".txt",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "Zg"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-73c9287059db",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\"}",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "jH3"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-73c9287059db",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "tool_calls",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "UubI"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-73c9287059db",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": {
+ "completion_tokens": 24,
+ "prompt_tokens": 482,
+ "total_tokens": 506,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "GITY7sf69sAJd"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/775a161a318a252454fd44f9850b37c6ec15eb17dfaa95f015dcc6f65fa10c94.json b/tests/integration/responses/recordings/775a161a318a252454fd44f9850b37c6ec15eb17dfaa95f015dcc6f65fa10c94.json
new file mode 100644
index 000000000..c2c8bbd80
--- /dev/null
+++ b/tests/integration/responses/recordings/775a161a318a252454fd44f9850b37c6ec15eb17dfaa95f015dcc6f65fa10c94.json
@@ -0,0 +1,574 @@
+{
+ "test_id": "tests/integration/responses/test_mcp_authentication.py::test_mcp_authorization_bearer[openai_client-txt=openai/gpt-4o]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "What is the boiling point of myawesomeliquid?"
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "greet_everyone",
+ "parameters": {
+ "properties": {
+ "url": {
+ "title": "Url",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "title": "greet_everyoneArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_boiling_point",
+ "description": "\n Returns the boiling point of a liquid in Celsius or Fahrenheit.\n\n :param liquid_name: The name of the liquid\n :param celsius: Whether to return the boiling point in Celsius\n :return: The boiling point of the liquid in Celcius or Fahrenheit\n ",
+ "parameters": {
+ "properties": {
+ "liquid_name": {
+ "title": "Liquid Name",
+ "type": "string"
+ },
+ "celsius": {
+ "default": true,
+ "title": "Celsius",
+ "type": "boolean"
+ }
+ },
+ "required": [
+ "liquid_name"
+ ],
+ "title": "get_boiling_pointArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-775a161a318a",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_2lYntxgdJV66JFvD6OuICQCB",
+ "function": {
+ "arguments": "",
+ "name": "get_boiling_point"
+ },
+ "type": "function"
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "UmB"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-775a161a318a",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "{\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "ejb"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-775a161a318a",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "li",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "Loxj"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-775a161a318a",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "quid",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "IQ"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-775a161a318a",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "_name",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "8"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-775a161a318a",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\":\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "G"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-775a161a318a",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "my",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "lo9p"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-775a161a318a",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "aw",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "YWPA"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-775a161a318a",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "esom",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "vV"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-775a161a318a",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "eli",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "e0t"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-775a161a318a",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "quid",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "kv"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-775a161a318a",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\"}",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "h2F"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-775a161a318a",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "tool_calls",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "B9QY"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-775a161a318a",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": {
+ "completion_tokens": 22,
+ "prompt_tokens": 154,
+ "total_tokens": 176,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "MH88zIptmy2Xs"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/77ad6e42c34823ac51a784cfe4fa0ee18d09bd413189a7c03b24bf3871e3d8d7.json b/tests/integration/responses/recordings/77ad6e42c34823ac51a784cfe4fa0ee18d09bd413189a7c03b24bf3871e3d8d7.json
index b92c67940..dd7884012 100644
--- a/tests/integration/responses/recordings/77ad6e42c34823ac51a784cfe4fa0ee18d09bd413189a7c03b24bf3871e3d8d7.json
+++ b/tests/integration/responses/recordings/77ad6e42c34823ac51a784cfe4fa0ee18d09bd413189a7c03b24bf3871e3d8d7.json
@@ -10,7 +10,7 @@
},
"response": {
"body": {
- "__type__": "llama_stack.apis.tools.tools.ToolInvocationResult",
+ "__type__": "llama_stack_api.tools.ToolInvocationResult",
"__data__": {
"content": "{\"query\": \"latest version of Python\", \"top_k\": [{\"url\": \"https://www.liquidweb.com/blog/latest-python-version/\", \"title\": \"The latest Python version: Python 3.14 - Liquid Web\", \"content\": \"The latest major version, Python 3.14 was officially released on October 7, 2025. Let's explore the key features of Python's current version, how to download\", \"score\": 0.890761, \"raw_content\": null}, {\"url\": \"https://docs.python.org/3/whatsnew/3.14.html\", \"title\": \"What's new in Python 3.14 \\u2014 Python 3.14.0 documentation\", \"content\": \"Python 3.14 is the latest stable release of the Python programming language, with a mix of changes to the language, the implementation, and the standard\", \"score\": 0.8124067, \"raw_content\": null}, {\"url\": \"https://devguide.python.org/versions/\", \"title\": \"Status of Python versions - Python Developer's Guide\", \"content\": \"The main branch is currently the future Python 3.15, and is the only branch that accepts new features. The latest release for each Python version can be found\", \"score\": 0.80089486, \"raw_content\": null}, {\"url\": \"https://www.python.org/doc/versions/\", \"title\": \"Python documentation by version\", \"content\": \"Python 3.12.4, documentation released on 6 June 2024. Python 3.12.3, documentation released on 9 April 2024. Python 3.12.2, documentation released on 6 February\", \"score\": 0.74563974, \"raw_content\": null}, {\"url\": \"https://www.python.org/downloads/\", \"title\": \"Download Python | Python.org\", \"content\": \"Active Python Releases \\u00b7 3.15 pre-release 2026-10-07 (planned) 2031-10 PEP 790 \\u00b7 3.14 bugfix 2025-10-07 2030-10 PEP 745 \\u00b7 3.13 bugfix 2024-10-07 2029-10 PEP 719\", \"score\": 0.6551821, \"raw_content\": null}]}",
"error_message": null,
diff --git a/tests/integration/responses/recordings/9f10c42f1338ae4b535cb877851520db560af78e9bc38159e526b68b8daa168e.json b/tests/integration/responses/recordings/9f10c42f1338ae4b535cb877851520db560af78e9bc38159e526b68b8daa168e.json
new file mode 100644
index 000000000..5c9d6ee91
--- /dev/null
+++ b/tests/integration/responses/recordings/9f10c42f1338ae4b535cb877851520db560af78e9bc38159e526b68b8daa168e.json
@@ -0,0 +1,759 @@
+{
+ "test_id": "tests/integration/responses/test_tool_responses.py::test_response_sequential_mcp_tool[openai_client-txt=openai/gpt-4o-boiling_point_tool]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "What is the boiling point of myawesomeliquid in Celsius?"
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "greet_everyone",
+ "parameters": {
+ "properties": {
+ "url": {
+ "title": "Url",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "title": "greet_everyoneArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_boiling_point",
+ "description": "\n Returns the boiling point of a liquid in Celsius or Fahrenheit.\n\n :param liquid_name: The name of the liquid\n :param celsius: Whether to return the boiling point in Celsius\n :return: The boiling point of the liquid in Celcius or Fahrenheit\n ",
+ "parameters": {
+ "properties": {
+ "liquid_name": {
+ "title": "Liquid Name",
+ "type": "string"
+ },
+ "celsius": {
+ "default": true,
+ "title": "Celsius",
+ "type": "boolean"
+ }
+ },
+ "required": [
+ "liquid_name"
+ ],
+ "title": "get_boiling_pointArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-9f10c42f1338",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_b5k2yeqIi5ucElnnrVPyYU4x",
+ "function": {
+ "arguments": "",
+ "name": "get_boiling_point"
+ },
+ "type": "function"
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "AhH"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-9f10c42f1338",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "{\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "SMa"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-9f10c42f1338",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "li",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "fBD0"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-9f10c42f1338",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "quid",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "LL"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-9f10c42f1338",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "_name",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "h"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-9f10c42f1338",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\":\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "5"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-9f10c42f1338",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "my",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "ySpU"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-9f10c42f1338",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "aw",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "fra1"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-9f10c42f1338",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "esom",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "Hb"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-9f10c42f1338",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "eli",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "INi"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-9f10c42f1338",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "quid",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "jF"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-9f10c42f1338",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\",\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "i"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-9f10c42f1338",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "c",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "2dDeK"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-9f10c42f1338",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "elsius",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": ""
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-9f10c42f1338",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\":",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "DSb"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-9f10c42f1338",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "true",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "vP"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-9f10c42f1338",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "}",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "9boiy"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-9f10c42f1338",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "tool_calls",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "ZZRa"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-9f10c42f1338",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": {
+ "completion_tokens": 27,
+ "prompt_tokens": 156,
+ "total_tokens": 183,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "HoutUcx6gZI1g"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/a97d8a2f2fd75b4a5ca732e632b981ca011dd1b6c29df530d12726b1cf7989e5.json b/tests/integration/responses/recordings/a97d8a2f2fd75b4a5ca732e632b981ca011dd1b6c29df530d12726b1cf7989e5.json
new file mode 100644
index 000000000..3ba6af144
--- /dev/null
+++ b/tests/integration/responses/recordings/a97d8a2f2fd75b4a5ca732e632b981ca011dd1b6c29df530d12726b1cf7989e5.json
@@ -0,0 +1,833 @@
+{
+ "test_id": "tests/integration/responses/test_tool_responses.py::test_response_streaming_multi_turn_tool_execution[openai_client-txt=openai/gpt-4o-user_permissions_workflow]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "Help me with this security check: First, get the user ID for 'charlie', then get the permissions for that user ID, and finally check if that user can access 'secret_file.txt'. Stream your progress as you work through each step. Return only one tool call per step. Summarize the final result with a single 'yes' or 'no' response."
+ },
+ {
+ "role": "assistant",
+ "content": "",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_fsxGbKmceUbLSXCe4sx9WLXO",
+ "type": "function",
+ "function": {
+ "name": "get_user_id",
+ "arguments": "{\"username\":\"charlie\"}"
+ }
+ }
+ ]
+ },
+ {
+ "role": "tool",
+ "tool_call_id": "call_fsxGbKmceUbLSXCe4sx9WLXO",
+ "content": [
+ {
+ "type": "text",
+ "text": "user_11111"
+ }
+ ]
+ },
+ {
+ "role": "assistant",
+ "content": "",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_moRBxqnBJ48EWTSEoQ1llgib",
+ "type": "function",
+ "function": {
+ "name": "get_user_permissions",
+ "arguments": "{\"user_id\":\"user_11111\"}"
+ }
+ }
+ ]
+ },
+ {
+ "role": "tool",
+ "tool_call_id": "call_moRBxqnBJ48EWTSEoQ1llgib",
+ "content": [
+ {
+ "type": "text",
+ "text": "admin"
+ }
+ ]
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_id",
+ "description": "\n Get the user ID for a given username. This ID is needed for other operations.\n\n :param username: The username to look up\n :return: The user ID for the username\n ",
+ "parameters": {
+ "properties": {
+ "username": {
+ "title": "Username",
+ "type": "string"
+ }
+ },
+ "required": [
+ "username"
+ ],
+ "title": "get_user_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_permissions",
+ "description": "\n Get the permissions for a user ID. Requires a valid user ID from get_user_id.\n\n :param user_id: The user ID to check permissions for\n :return: The permissions for the user\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id"
+ ],
+ "title": "get_user_permissionsArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "check_file_access",
+ "description": "\n Check if a user can access a specific file. Requires a valid user ID.\n\n :param user_id: The user ID to check access for\n :param filename: The filename to check access to\n :return: Whether the user can access the file (yes/no)\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ },
+ "filename": {
+ "title": "Filename",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id",
+ "filename"
+ ],
+ "title": "check_file_accessArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_id",
+ "description": "\n Get the experiment ID for a given experiment name. This ID is needed to get results.\n\n :param experiment_name: The name of the experiment\n :return: The experiment ID\n ",
+ "parameters": {
+ "properties": {
+ "experiment_name": {
+ "title": "Experiment Name",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_name"
+ ],
+ "title": "get_experiment_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_results",
+ "description": "\n Get the results for an experiment ID. Requires a valid experiment ID from get_experiment_id.\n\n :param experiment_id: The experiment ID to get results for\n :return: The experiment results\n ",
+ "parameters": {
+ "properties": {
+ "experiment_id": {
+ "title": "Experiment Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_id"
+ ],
+ "title": "get_experiment_resultsArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-a97d8a2f2fd7",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_ybUqAP9oQn3rwQqVdOLs5Wb4",
+ "function": {
+ "arguments": "",
+ "name": "check_file_access"
+ },
+ "type": "function"
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "xpc"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-a97d8a2f2fd7",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "{\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "xXs"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-a97d8a2f2fd7",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "user",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "XY"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-a97d8a2f2fd7",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "_id",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "HbC"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-a97d8a2f2fd7",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\":\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "f"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-a97d8a2f2fd7",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "user",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "Ds"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-a97d8a2f2fd7",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "_",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "Osfy3"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-a97d8a2f2fd7",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "111",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "ioI"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-a97d8a2f2fd7",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "11",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "GQg6"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-a97d8a2f2fd7",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\",\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "2"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-a97d8a2f2fd7",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "filename",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "b2qqKbGC68nHMB"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-a97d8a2f2fd7",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\":\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "H"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-a97d8a2f2fd7",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "secret",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": ""
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-a97d8a2f2fd7",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "_file",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "6"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-a97d8a2f2fd7",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": ".txt",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "Wz"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-a97d8a2f2fd7",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\"}",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "ImW"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-a97d8a2f2fd7",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "tool_calls",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "nRAE"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-a97d8a2f2fd7",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": {
+ "completion_tokens": 25,
+ "prompt_tokens": 507,
+ "total_tokens": 532,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "rgbYyZ54cN8La"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/b218af7fa0663e60b12633f54cfddbcf60a1fedd85c501850b9f7e759443809f.json b/tests/integration/responses/recordings/b218af7fa0663e60b12633f54cfddbcf60a1fedd85c501850b9f7e759443809f.json
new file mode 100644
index 000000000..ee32a4396
--- /dev/null
+++ b/tests/integration/responses/recordings/b218af7fa0663e60b12633f54cfddbcf60a1fedd85c501850b9f7e759443809f.json
@@ -0,0 +1,773 @@
+{
+ "test_id": "tests/integration/responses/test_tool_responses.py::test_max_tool_calls_with_mcp_tools[openai_client-txt=openai/gpt-4o]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "Get the experiment ID for 'boiling_point' and get the user ID for 'charlie'"
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_id",
+ "description": "\n Get the user ID for a given username. This ID is needed for other operations.\n\n :param username: The username to look up\n :return: The user ID for the username\n ",
+ "parameters": {
+ "properties": {
+ "username": {
+ "title": "Username",
+ "type": "string"
+ }
+ },
+ "required": [
+ "username"
+ ],
+ "title": "get_user_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_permissions",
+ "description": "\n Get the permissions for a user ID. Requires a valid user ID from get_user_id.\n\n :param user_id: The user ID to check permissions for\n :return: The permissions for the user\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id"
+ ],
+ "title": "get_user_permissionsArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "check_file_access",
+ "description": "\n Check if a user can access a specific file. Requires a valid user ID.\n\n :param user_id: The user ID to check access for\n :param filename: The filename to check access to\n :return: Whether the user can access the file (yes/no)\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ },
+ "filename": {
+ "title": "Filename",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id",
+ "filename"
+ ],
+ "title": "check_file_accessArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_id",
+ "description": "\n Get the experiment ID for a given experiment name. This ID is needed to get results.\n\n :param experiment_name: The name of the experiment\n :return: The experiment ID\n ",
+ "parameters": {
+ "properties": {
+ "experiment_name": {
+ "title": "Experiment Name",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_name"
+ ],
+ "title": "get_experiment_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_results",
+ "description": "\n Get the results for an experiment ID. Requires a valid experiment ID from get_experiment_id.\n\n :param experiment_id: The experiment ID to get results for\n :return: The experiment results\n ",
+ "parameters": {
+ "properties": {
+ "experiment_id": {
+ "title": "Experiment Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_id"
+ ],
+ "title": "get_experiment_resultsArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b218af7fa066",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "N5OTLR9CfmU"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b218af7fa066",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_z8P1RQv54BLxyMlRdMFkcCGd",
+ "function": {
+ "arguments": "",
+ "name": "get_experiment_id"
+ },
+ "type": "function"
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "3EKK"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b218af7fa066",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "{\"ex",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "R"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b218af7fa066",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "perim",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "Q"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b218af7fa066",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "ent_na",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": ""
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b218af7fa066",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "me\":",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "6"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b218af7fa066",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": " \"boi",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": ""
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b218af7fa066",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "ling_p",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": ""
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b218af7fa066",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "oint",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "pw"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b218af7fa066",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\"}",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "Gfk"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b218af7fa066",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": "call_I5tcLgyMADoVwLKDj9HkTCs5",
+ "function": {
+ "arguments": "",
+ "name": "get_user_id"
+ },
+ "type": "function"
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "Yp7IueDs5V"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b218af7fa066",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": null,
+ "function": {
+ "arguments": "{\"us",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "8"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b218af7fa066",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": null,
+ "function": {
+ "arguments": "ernam",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "X"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b218af7fa066",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": null,
+ "function": {
+ "arguments": "e\": \"c",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "2oif8BwVnTCnAF"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b218af7fa066",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": null,
+ "function": {
+ "arguments": "harl",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "hv"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b218af7fa066",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": null,
+ "function": {
+ "arguments": "ie\"}",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "C"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b218af7fa066",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "tool_calls",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "ctjO"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b218af7fa066",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": {
+ "completion_tokens": 51,
+ "prompt_tokens": 393,
+ "total_tokens": 444,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "fclbZeBSSKN4C"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/b2b5903325356ef0d90af4f2bb8c2a685da5e743820a68de74640451f0072184.json b/tests/integration/responses/recordings/b2b5903325356ef0d90af4f2bb8c2a685da5e743820a68de74640451f0072184.json
new file mode 100644
index 000000000..2f5d2364f
--- /dev/null
+++ b/tests/integration/responses/recordings/b2b5903325356ef0d90af4f2bb8c2a685da5e743820a68de74640451f0072184.json
@@ -0,0 +1,1099 @@
+{
+ "test_id": "tests/integration/responses/test_tool_responses.py::test_max_tool_calls_with_mcp_tools[client_with_models-txt=openai/gpt-4o]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "Get the experiment ID for 'boiling_point' and get the user ID for 'charlie'"
+ },
+ {
+ "role": "assistant",
+ "content": "",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_y8S7JKR2Qhu4Bh1uxdHRcNDg",
+ "type": "function",
+ "function": {
+ "name": "get_experiment_id",
+ "arguments": "{\"experiment_name\": \"boiling_point\"}"
+ }
+ },
+ {
+ "index": 1,
+ "id": "call_HELkyZOm2fzLx2CeTH3bEcS2",
+ "type": "function",
+ "function": {
+ "name": "get_user_id",
+ "arguments": "{\"username\": \"charlie\"}"
+ }
+ }
+ ]
+ },
+ {
+ "role": "tool",
+ "tool_call_id": "call_y8S7JKR2Qhu4Bh1uxdHRcNDg",
+ "content": [
+ {
+ "type": "text",
+ "text": "exp_004"
+ }
+ ]
+ },
+ {
+ "role": "tool",
+ "tool_call_id": "call_HELkyZOm2fzLx2CeTH3bEcS2",
+ "content": [
+ {
+ "type": "text",
+ "text": "user_11111"
+ }
+ ]
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_id",
+ "description": "\n Get the user ID for a given username. This ID is needed for other operations.\n\n :param username: The username to look up\n :return: The user ID for the username\n ",
+ "parameters": {
+ "properties": {
+ "username": {
+ "title": "Username",
+ "type": "string"
+ }
+ },
+ "required": [
+ "username"
+ ],
+ "title": "get_user_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_permissions",
+ "description": "\n Get the permissions for a user ID. Requires a valid user ID from get_user_id.\n\n :param user_id: The user ID to check permissions for\n :return: The permissions for the user\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id"
+ ],
+ "title": "get_user_permissionsArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "check_file_access",
+ "description": "\n Check if a user can access a specific file. Requires a valid user ID.\n\n :param user_id: The user ID to check access for\n :param filename: The filename to check access to\n :return: Whether the user can access the file (yes/no)\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ },
+ "filename": {
+ "title": "Filename",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id",
+ "filename"
+ ],
+ "title": "check_file_accessArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_id",
+ "description": "\n Get the experiment ID for a given experiment name. This ID is needed to get results.\n\n :param experiment_name: The name of the experiment\n :return: The experiment ID\n ",
+ "parameters": {
+ "properties": {
+ "experiment_name": {
+ "title": "Experiment Name",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_name"
+ ],
+ "title": "get_experiment_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_results",
+ "description": "\n Get the results for an experiment ID. Requires a valid experiment ID from get_experiment_id.\n\n :param experiment_id: The experiment ID to get results for\n :return: The experiment results\n ",
+ "parameters": {
+ "properties": {
+ "experiment_id": {
+ "title": "Experiment Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_id"
+ ],
+ "title": "get_experiment_resultsArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": "",
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "YYi7jfwMArDwjF"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": "The",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "02OX5OI6tENcr"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": " experiment",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "4WNc0"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": " ID",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "tKtJ1sl5pfaDr"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": " for",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "Hvj1aWM1rpv8"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": " '",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "9E9CvQfqolGi9S"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": "bo",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "j4WB9GjVD9jcfN"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": "iling",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "TTDWSqM29LF"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": "_point",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "AjjxQybBbe"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": "'",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "1gVblRiURtILOET"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": " is",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "0R3NJvfpXy2dP"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": " `",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "A7ulc3isZRh1Wy"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": "exp",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "FPq6iOQwJS1aQ"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": "_",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "Kc20HZgwXltY5rS"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": "004",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "2FCOJr6gSDviM"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": "`,",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "zcC44JB9JLv8DJ"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": " and",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "YkHz4dmGI8Ip"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": " the",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "WU1FWVwHa8kT"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": " user",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "F89Whppjswq"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": " ID",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "WSOnxHfHCWTqS"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": " for",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "xdc4FO9TTNKE"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": " '",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "815WDeN0y91Hke"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": "char",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "xp6WP0YmWjNZ"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": "lie",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "apUUpE3jkpxjm"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": "'",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "TfCA46aEfur7ddv"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": " is",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "4q5btS7EmyGo4"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": " `",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "a5UVTkIvEXtjbH"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": "user",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "UGU1lPYHNno0"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": "_",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "4axBUdqWraTmuNf"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": "111",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "ZtMOpwGI78JEH"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": "11",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "LqPjHcx2BmtLO1"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": "`.",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "l5q2xqEWQx4dA4"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "stop",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "sM6qZWT3Vp"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b2b590332535",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": {
+ "completion_tokens": 32,
+ "prompt_tokens": 465,
+ "total_tokens": 497,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "Nr5ToBPpxyZu4"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/b30da63114770b8c975bf66e24aee40546a0658db3df58b9b4d948e4e95b0961.json b/tests/integration/responses/recordings/b30da63114770b8c975bf66e24aee40546a0658db3df58b9b4d948e4e95b0961.json
new file mode 100644
index 000000000..80cce1358
--- /dev/null
+++ b/tests/integration/responses/recordings/b30da63114770b8c975bf66e24aee40546a0658db3df58b9b4d948e4e95b0961.json
@@ -0,0 +1,524 @@
+{
+ "test_id": "tests/integration/responses/test_tool_responses.py::test_response_streaming_multi_turn_tool_execution[openai_client-txt=openai/gpt-4o-experiment_analysis_streaming]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "I need a complete analysis: First, get the experiment ID for 'chemical_reaction', then get the results for that experiment, and tell me if the yield was above 80%. Return only one tool call per step. Please stream your analysis process."
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_id",
+ "description": "\n Get the user ID for a given username. This ID is needed for other operations.\n\n :param username: The username to look up\n :return: The user ID for the username\n ",
+ "parameters": {
+ "properties": {
+ "username": {
+ "title": "Username",
+ "type": "string"
+ }
+ },
+ "required": [
+ "username"
+ ],
+ "title": "get_user_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_permissions",
+ "description": "\n Get the permissions for a user ID. Requires a valid user ID from get_user_id.\n\n :param user_id: The user ID to check permissions for\n :return: The permissions for the user\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id"
+ ],
+ "title": "get_user_permissionsArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "check_file_access",
+ "description": "\n Check if a user can access a specific file. Requires a valid user ID.\n\n :param user_id: The user ID to check access for\n :param filename: The filename to check access to\n :return: Whether the user can access the file (yes/no)\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ },
+ "filename": {
+ "title": "Filename",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id",
+ "filename"
+ ],
+ "title": "check_file_accessArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_id",
+ "description": "\n Get the experiment ID for a given experiment name. This ID is needed to get results.\n\n :param experiment_name: The name of the experiment\n :return: The experiment ID\n ",
+ "parameters": {
+ "properties": {
+ "experiment_name": {
+ "title": "Experiment Name",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_name"
+ ],
+ "title": "get_experiment_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_results",
+ "description": "\n Get the results for an experiment ID. Requires a valid experiment ID from get_experiment_id.\n\n :param experiment_id: The experiment ID to get results for\n :return: The experiment results\n ",
+ "parameters": {
+ "properties": {
+ "experiment_id": {
+ "title": "Experiment Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_id"
+ ],
+ "title": "get_experiment_resultsArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b30da6311477",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_Q9Gcxub7UbQsxJWVkiy4FETr",
+ "function": {
+ "arguments": "",
+ "name": "get_experiment_id"
+ },
+ "type": "function"
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "c8d"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b30da6311477",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "{\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "QoE"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b30da6311477",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "experiment",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "1krtmewG8p36"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b30da6311477",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "_name",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "P"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b30da6311477",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\":\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "D"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b30da6311477",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "chemical",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "FoS4ov7pi99K5h"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b30da6311477",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "_re",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "BhD"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b30da6311477",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "action",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": ""
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b30da6311477",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\"}",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "KWC"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b30da6311477",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "tool_calls",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "PFmv"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b30da6311477",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": {
+ "completion_tokens": 19,
+ "prompt_tokens": 425,
+ "total_tokens": 444,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "NYdC3zepOXLsO"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/b376e47c185753246e6b47e33dd6700e308ebbe9389bc5a1da8f4840fc9031ef.json b/tests/integration/responses/recordings/b376e47c185753246e6b47e33dd6700e308ebbe9389bc5a1da8f4840fc9031ef.json
new file mode 100644
index 000000000..3c9321759
--- /dev/null
+++ b/tests/integration/responses/recordings/b376e47c185753246e6b47e33dd6700e308ebbe9389bc5a1da8f4840fc9031ef.json
@@ -0,0 +1,1099 @@
+{
+ "test_id": "tests/integration/responses/test_tool_responses.py::test_max_tool_calls_with_mcp_tools[openai_client-txt=openai/gpt-4o]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "Get the experiment ID for 'boiling_point' and get the user ID for 'charlie'"
+ },
+ {
+ "role": "assistant",
+ "content": "",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_z8P1RQv54BLxyMlRdMFkcCGd",
+ "type": "function",
+ "function": {
+ "name": "get_experiment_id",
+ "arguments": "{\"experiment_name\": \"boiling_point\"}"
+ }
+ },
+ {
+ "index": 1,
+ "id": "call_I5tcLgyMADoVwLKDj9HkTCs5",
+ "type": "function",
+ "function": {
+ "name": "get_user_id",
+ "arguments": "{\"username\": \"charlie\"}"
+ }
+ }
+ ]
+ },
+ {
+ "role": "tool",
+ "tool_call_id": "call_z8P1RQv54BLxyMlRdMFkcCGd",
+ "content": [
+ {
+ "type": "text",
+ "text": "exp_004"
+ }
+ ]
+ },
+ {
+ "role": "tool",
+ "tool_call_id": "call_I5tcLgyMADoVwLKDj9HkTCs5",
+ "content": [
+ {
+ "type": "text",
+ "text": "user_11111"
+ }
+ ]
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_id",
+ "description": "\n Get the user ID for a given username. This ID is needed for other operations.\n\n :param username: The username to look up\n :return: The user ID for the username\n ",
+ "parameters": {
+ "properties": {
+ "username": {
+ "title": "Username",
+ "type": "string"
+ }
+ },
+ "required": [
+ "username"
+ ],
+ "title": "get_user_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_permissions",
+ "description": "\n Get the permissions for a user ID. Requires a valid user ID from get_user_id.\n\n :param user_id: The user ID to check permissions for\n :return: The permissions for the user\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id"
+ ],
+ "title": "get_user_permissionsArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "check_file_access",
+ "description": "\n Check if a user can access a specific file. Requires a valid user ID.\n\n :param user_id: The user ID to check access for\n :param filename: The filename to check access to\n :return: Whether the user can access the file (yes/no)\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ },
+ "filename": {
+ "title": "Filename",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id",
+ "filename"
+ ],
+ "title": "check_file_accessArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_id",
+ "description": "\n Get the experiment ID for a given experiment name. This ID is needed to get results.\n\n :param experiment_name: The name of the experiment\n :return: The experiment ID\n ",
+ "parameters": {
+ "properties": {
+ "experiment_name": {
+ "title": "Experiment Name",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_name"
+ ],
+ "title": "get_experiment_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_results",
+ "description": "\n Get the results for an experiment ID. Requires a valid experiment ID from get_experiment_id.\n\n :param experiment_id: The experiment ID to get results for\n :return: The experiment results\n ",
+ "parameters": {
+ "properties": {
+ "experiment_id": {
+ "title": "Experiment Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_id"
+ ],
+ "title": "get_experiment_resultsArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": "",
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "wwHFAiwvH4WszR"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": "The",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "9715Kiw8g6FeU"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": " experiment",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "f3RUP"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": " ID",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "uTou0sZw0Trqr"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": " for",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "O3FUhiRX4t3O"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": " '",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "8Row2VeWyXlavX"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": "bo",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "R6KU5Aed2Y4hdt"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": "iling",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "aXOqmJlIAIp"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": "_point",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "AEyQ67P1E9"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": "'",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "pxs1ElabWHWYTsE"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": " is",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "f4fvZlQAsoFLb"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": " `",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "XIUUCRzVlWEjdW"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": "exp",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "x2dM9CVkT0ICQ"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": "_",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "Ls8dfHOXPeHjdGE"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": "004",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "RF1hpcOB964EM"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": "`,",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "QnLWon1Lh1bPrb"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": " and",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "0OHZT5bnbdwa"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": " the",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "jtbU7bWjfj72"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": " user",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "nCopvKj1JIE"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": " ID",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "2ZDuFZoCixweF"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": " for",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "u3QmR0zYiExg"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": " '",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "z6tGgyH3Gw667d"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": "char",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "HalCDTgB5QRV"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": "lie",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "5UJBpMTsZMjVF"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": "'",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "p8zU7xEpcUR63Lh"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": " is",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "t0fKxlCyUxaFU"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": " `",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "lRSEHqi9mVmVZJ"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": "user",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "8C6DeNABBjpJ"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": "_",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "L4qXmW7bonqcf97"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": "111",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "zje3cRhC3fzKb"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": "11",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "NgeVi1nYcUbkmN"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": "`.",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "d83dlilKTeA1RE"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "stop",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "HnPRpNWz4n"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b376e47c1857",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": {
+ "completion_tokens": 32,
+ "prompt_tokens": 465,
+ "total_tokens": 497,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "sfrloH58kmZpA"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/b6b7282ca0ad5a3c59321d2b045a91ebca1cbaeb4f7aab22c5b9e246b476272f.json b/tests/integration/responses/recordings/b6b7282ca0ad5a3c59321d2b045a91ebca1cbaeb4f7aab22c5b9e246b476272f.json
new file mode 100644
index 000000000..040998a3b
--- /dev/null
+++ b/tests/integration/responses/recordings/b6b7282ca0ad5a3c59321d2b045a91ebca1cbaeb4f7aab22c5b9e246b476272f.json
@@ -0,0 +1,649 @@
+{
+ "test_id": "tests/integration/responses/test_tool_responses.py::test_response_sequential_mcp_tool[openai_client-txt=openai/gpt-4o-boiling_point_tool]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "What is the boiling point of myawesomeliquid in Celsius?"
+ },
+ {
+ "role": "assistant",
+ "content": "",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_b5k2yeqIi5ucElnnrVPyYU4x",
+ "type": "function",
+ "function": {
+ "name": "get_boiling_point",
+ "arguments": "{\"liquid_name\":\"myawesomeliquid\",\"celsius\":true}"
+ }
+ }
+ ]
+ },
+ {
+ "role": "tool",
+ "tool_call_id": "call_b5k2yeqIi5ucElnnrVPyYU4x",
+ "content": [
+ {
+ "type": "text",
+ "text": "-100"
+ }
+ ]
+ },
+ {
+ "role": "assistant",
+ "content": "The boiling point of \"myawesomeliquid\" is -100 degrees Celsius."
+ },
+ {
+ "role": "user",
+ "content": "What is the boiling point of myawesomeliquid in Celsius?"
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "greet_everyone",
+ "parameters": {
+ "properties": {
+ "url": {
+ "title": "Url",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "title": "greet_everyoneArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_boiling_point",
+ "description": "\n Returns the boiling point of a liquid in Celsius or Fahrenheit.\n\n :param liquid_name: The name of the liquid\n :param celsius: Whether to return the boiling point in Celsius\n :return: The boiling point of the liquid in Celcius or Fahrenheit\n ",
+ "parameters": {
+ "properties": {
+ "liquid_name": {
+ "title": "Liquid Name",
+ "type": "string"
+ },
+ "celsius": {
+ "default": true,
+ "title": "Celsius",
+ "type": "boolean"
+ }
+ },
+ "required": [
+ "liquid_name"
+ ],
+ "title": "get_boiling_pointArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b6b7282ca0ad",
+ "choices": [
+ {
+ "delta": {
+ "content": "",
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "7S5XpbMeFTTZba"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b6b7282ca0ad",
+ "choices": [
+ {
+ "delta": {
+ "content": "The",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "G4KYajpQCgm5p"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b6b7282ca0ad",
+ "choices": [
+ {
+ "delta": {
+ "content": " boiling",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "krw8d3Np"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b6b7282ca0ad",
+ "choices": [
+ {
+ "delta": {
+ "content": " point",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "sOEsvVtCEV"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b6b7282ca0ad",
+ "choices": [
+ {
+ "delta": {
+ "content": " of",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "5eAw89OUrx7VT"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b6b7282ca0ad",
+ "choices": [
+ {
+ "delta": {
+ "content": " \"",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "PFghmTocqCYea"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b6b7282ca0ad",
+ "choices": [
+ {
+ "delta": {
+ "content": "my",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "IRJRbKIoXwNh0e"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b6b7282ca0ad",
+ "choices": [
+ {
+ "delta": {
+ "content": "aw",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "wuoL6MoA21KfMP"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b6b7282ca0ad",
+ "choices": [
+ {
+ "delta": {
+ "content": "esom",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "DLRS3D5YVekk"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b6b7282ca0ad",
+ "choices": [
+ {
+ "delta": {
+ "content": "eli",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "PQZQlOncwl01F"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b6b7282ca0ad",
+ "choices": [
+ {
+ "delta": {
+ "content": "quid",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "TVfNNxYtZgXQ"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b6b7282ca0ad",
+ "choices": [
+ {
+ "delta": {
+ "content": "\"",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "LscPqJGnbMf6Qw"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b6b7282ca0ad",
+ "choices": [
+ {
+ "delta": {
+ "content": " is",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "X8NSrxHcpYYXL"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b6b7282ca0ad",
+ "choices": [
+ {
+ "delta": {
+ "content": " -",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "5nfdb4DuFapoeT"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b6b7282ca0ad",
+ "choices": [
+ {
+ "delta": {
+ "content": "100",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "K2qXQYFAd591w"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b6b7282ca0ad",
+ "choices": [
+ {
+ "delta": {
+ "content": " degrees",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "b0rvHdF1"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b6b7282ca0ad",
+ "choices": [
+ {
+ "delta": {
+ "content": " Celsius",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "kFoGt52c"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b6b7282ca0ad",
+ "choices": [
+ {
+ "delta": {
+ "content": ".",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "SJjhJwz2zgz693C"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b6b7282ca0ad",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "stop",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "MityMxFgBz"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-b6b7282ca0ad",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": {
+ "completion_tokens": 18,
+ "prompt_tokens": 234,
+ "total_tokens": 252,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "qf0j6dzuNPifV"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/c1b953d78e040ae516301c6dd5004cf049a522bd106852b6d09e9baf41df88d3.json b/tests/integration/responses/recordings/c1b953d78e040ae516301c6dd5004cf049a522bd106852b6d09e9baf41df88d3.json
new file mode 100644
index 000000000..821bd20c4
--- /dev/null
+++ b/tests/integration/responses/recordings/c1b953d78e040ae516301c6dd5004cf049a522bd106852b6d09e9baf41df88d3.json
@@ -0,0 +1,1634 @@
+{
+ "test_id": "tests/integration/responses/test_tool_responses.py::test_max_tool_calls_with_mcp_tools[client_with_models-txt=openai/gpt-4o]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "Get the experiment ID for 'boiling_point' and get the user ID for 'charlie'"
+ },
+ {
+ "role": "assistant",
+ "content": "",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_y8S7JKR2Qhu4Bh1uxdHRcNDg",
+ "type": "function",
+ "function": {
+ "name": "get_experiment_id",
+ "arguments": "{\"experiment_name\": \"boiling_point\"}"
+ }
+ },
+ {
+ "index": 1,
+ "id": "call_HELkyZOm2fzLx2CeTH3bEcS2",
+ "type": "function",
+ "function": {
+ "name": "get_user_id",
+ "arguments": "{\"username\": \"charlie\"}"
+ }
+ }
+ ]
+ },
+ {
+ "role": "tool",
+ "tool_call_id": "call_y8S7JKR2Qhu4Bh1uxdHRcNDg",
+ "content": [
+ {
+ "type": "text",
+ "text": "exp_004"
+ }
+ ]
+ },
+ {
+ "role": "tool",
+ "tool_call_id": "call_HELkyZOm2fzLx2CeTH3bEcS2",
+ "content": "Tool call skipped: maximum tool calls limit (1) reached."
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_id",
+ "description": "\n Get the user ID for a given username. This ID is needed for other operations.\n\n :param username: The username to look up\n :return: The user ID for the username\n ",
+ "parameters": {
+ "properties": {
+ "username": {
+ "title": "Username",
+ "type": "string"
+ }
+ },
+ "required": [
+ "username"
+ ],
+ "title": "get_user_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_permissions",
+ "description": "\n Get the permissions for a user ID. Requires a valid user ID from get_user_id.\n\n :param user_id: The user ID to check permissions for\n :return: The permissions for the user\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id"
+ ],
+ "title": "get_user_permissionsArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "check_file_access",
+ "description": "\n Check if a user can access a specific file. Requires a valid user ID.\n\n :param user_id: The user ID to check access for\n :param filename: The filename to check access to\n :return: Whether the user can access the file (yes/no)\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ },
+ "filename": {
+ "title": "Filename",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id",
+ "filename"
+ ],
+ "title": "check_file_accessArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_id",
+ "description": "\n Get the experiment ID for a given experiment name. This ID is needed to get results.\n\n :param experiment_name: The name of the experiment\n :return: The experiment ID\n ",
+ "parameters": {
+ "properties": {
+ "experiment_name": {
+ "title": "Experiment Name",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_name"
+ ],
+ "title": "get_experiment_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_results",
+ "description": "\n Get the results for an experiment ID. Requires a valid experiment ID from get_experiment_id.\n\n :param experiment_id: The experiment ID to get results for\n :return: The experiment results\n ",
+ "parameters": {
+ "properties": {
+ "experiment_id": {
+ "title": "Experiment Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_id"
+ ],
+ "title": "get_experiment_resultsArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": "",
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "9zm2knPUrQf9Ti"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": "The",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "dBZWt7n0cY28K"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " experiment",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "gBkUe"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " ID",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "DK27AidkjJEUs"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " for",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "BvRS3fe55saU"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " '",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "Q30TpKRJ8sqbaj"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": "bo",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "uZIcYxencsPVq7"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": "iling",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "OTlywqpO2gu"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": "_point",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "1D39HJt78o"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": "'",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "z9q3XLiA1zUj69i"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " is",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "YilL3DwdzhGNE"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " `",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "yLvB3LVIF9yqTB"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": "exp",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "aQ2ZgA6wBrzgb"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": "_",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "0jzpzruxw3CNxO3"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": "004",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "Wl5Eu8yWUoj2V"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": "`.",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "F3a7FpN1N5MOoL"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " However",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "oC3Sc1Oj"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": ",",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "dR3KxirqoL6RMvN"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " I",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "HDIUF9MxNvDNC8"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " wasn't",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "jvYMbj7Jb"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " able",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "wA25F90roLY"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " to",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "1kP6AeTeGmGNU"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " get",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "8zixGSMc9fiH"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " the",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "UCSCTgIKkLiT"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " user",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "1hHm53qitSi"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " ID",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "N3NBeCvE43ZRW"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " for",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "ul7bMYRpL04n"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " '",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "ABgwNSe6WHqE9N"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": "char",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "6q5tAeJOMEC8"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": "lie",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "gxcccAWJYWckn"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": "'",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "qpqi3k54AaZDnNH"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " due",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "OB5oYuchm2uE"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " to",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "MKHpNGKsdWpLO"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " a",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "zYt4J00NPy69fJ"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " tool",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "Z0kM0bozww8"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " call",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "qbQA28Mr3PO"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " limit",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "ZzevZnpsYj"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": ".",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "QBno7Vj0QhMrSjO"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " Please",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "hEj0RemlE"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " let",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "xN8xRqzcxXCR"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " me",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "0LxJ9leKvCunj"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " know",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "KoHcgiBEVc6"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " if",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "eT2hCjpvISlxh"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " you",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "9LJdcoWEzgMP"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " would",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "bxChZ0IYYP"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " like",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "oU5UBQRKEpI"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " me",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "HQHzzykuhNV7v"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " to",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "YJ86yXpqctfF5"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " attempt",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "ToTM0n5O"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " that",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "SateSvqBggb"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": " again",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "APRnnp4Qce"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": ".",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "Xe9yNJcVnFP4PZl"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "stop",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "ZH7NR5wSoI"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c1b953d78e04",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": {
+ "completion_tokens": 52,
+ "prompt_tokens": 474,
+ "total_tokens": 526,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "2P0uXrABC0X8d"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/c27df465b2996c4d7c909e9ccfac53deb1ac47d064a1b5c70a78b7436438818f.json b/tests/integration/responses/recordings/c27df465b2996c4d7c909e9ccfac53deb1ac47d064a1b5c70a78b7436438818f.json
new file mode 100644
index 000000000..c79ed1010
--- /dev/null
+++ b/tests/integration/responses/recordings/c27df465b2996c4d7c909e9ccfac53deb1ac47d064a1b5c70a78b7436438818f.json
@@ -0,0 +1,450 @@
+{
+ "test_id": "tests/integration/responses/test_tool_responses.py::test_response_streaming_multi_turn_tool_execution[openai_client-txt=openai/gpt-4o-user_permissions_workflow]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "Help me with this security check: First, get the user ID for 'charlie', then get the permissions for that user ID, and finally check if that user can access 'secret_file.txt'. Stream your progress as you work through each step. Return only one tool call per step. Summarize the final result with a single 'yes' or 'no' response."
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_id",
+ "description": "\n Get the user ID for a given username. This ID is needed for other operations.\n\n :param username: The username to look up\n :return: The user ID for the username\n ",
+ "parameters": {
+ "properties": {
+ "username": {
+ "title": "Username",
+ "type": "string"
+ }
+ },
+ "required": [
+ "username"
+ ],
+ "title": "get_user_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_permissions",
+ "description": "\n Get the permissions for a user ID. Requires a valid user ID from get_user_id.\n\n :param user_id: The user ID to check permissions for\n :return: The permissions for the user\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id"
+ ],
+ "title": "get_user_permissionsArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "check_file_access",
+ "description": "\n Check if a user can access a specific file. Requires a valid user ID.\n\n :param user_id: The user ID to check access for\n :param filename: The filename to check access to\n :return: Whether the user can access the file (yes/no)\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ },
+ "filename": {
+ "title": "Filename",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id",
+ "filename"
+ ],
+ "title": "check_file_accessArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_id",
+ "description": "\n Get the experiment ID for a given experiment name. This ID is needed to get results.\n\n :param experiment_name: The name of the experiment\n :return: The experiment ID\n ",
+ "parameters": {
+ "properties": {
+ "experiment_name": {
+ "title": "Experiment Name",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_name"
+ ],
+ "title": "get_experiment_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_results",
+ "description": "\n Get the results for an experiment ID. Requires a valid experiment ID from get_experiment_id.\n\n :param experiment_id: The experiment ID to get results for\n :return: The experiment results\n ",
+ "parameters": {
+ "properties": {
+ "experiment_id": {
+ "title": "Experiment Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_id"
+ ],
+ "title": "get_experiment_resultsArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c27df465b299",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_fsxGbKmceUbLSXCe4sx9WLXO",
+ "function": {
+ "arguments": "",
+ "name": "get_user_id"
+ },
+ "type": "function"
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "sOa6fZEKZ"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c27df465b299",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "{\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "HBO"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c27df465b299",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "username",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "7kcXlaglccmA8a"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c27df465b299",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\":\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "a"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c27df465b299",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "char",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "bS"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c27df465b299",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "lie",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "d2e"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c27df465b299",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\"}",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "fhE"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c27df465b299",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "tool_calls",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "SlsZ"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c27df465b299",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": {
+ "completion_tokens": 16,
+ "prompt_tokens": 449,
+ "total_tokens": 465,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "fjMWRTbF1Ni06"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/c84e894f47a6d7f4d4556829d24ea14cd2869c77972c33e66d9b42438e2165cd.json b/tests/integration/responses/recordings/c84e894f47a6d7f4d4556829d24ea14cd2869c77972c33e66d9b42438e2165cd.json
new file mode 100644
index 000000000..37a29324e
--- /dev/null
+++ b/tests/integration/responses/recordings/c84e894f47a6d7f4d4556829d24ea14cd2869c77972c33e66d9b42438e2165cd.json
@@ -0,0 +1,574 @@
+{
+ "test_id": "tests/integration/responses/test_mcp_authentication.py::test_mcp_authorization_bearer[client_with_models-txt=openai/gpt-4o]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "What is the boiling point of myawesomeliquid?"
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "greet_everyone",
+ "parameters": {
+ "properties": {
+ "url": {
+ "title": "Url",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "title": "greet_everyoneArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_boiling_point",
+ "description": "\n Returns the boiling point of a liquid in Celsius or Fahrenheit.\n\n :param liquid_name: The name of the liquid\n :param celsius: Whether to return the boiling point in Celsius\n :return: The boiling point of the liquid in Celcius or Fahrenheit\n ",
+ "parameters": {
+ "properties": {
+ "liquid_name": {
+ "title": "Liquid Name",
+ "type": "string"
+ },
+ "celsius": {
+ "default": true,
+ "title": "Celsius",
+ "type": "boolean"
+ }
+ },
+ "required": [
+ "liquid_name"
+ ],
+ "title": "get_boiling_pointArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c84e894f47a6",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_mitVYvmPaFfoSmKjzKo5xmZp",
+ "function": {
+ "arguments": "",
+ "name": "get_boiling_point"
+ },
+ "type": "function"
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "5Y1"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c84e894f47a6",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "{\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "QzQ"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c84e894f47a6",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "li",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "4NPm"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c84e894f47a6",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "quid",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "Lh"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c84e894f47a6",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "_name",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "r"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c84e894f47a6",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\":\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "w"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c84e894f47a6",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "my",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "GSVa"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c84e894f47a6",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "aw",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "AWZm"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c84e894f47a6",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "esom",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "DG"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c84e894f47a6",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "eli",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "1Bw"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c84e894f47a6",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "quid",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "Oq"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c84e894f47a6",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\"}",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "cI8"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c84e894f47a6",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "tool_calls",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "kKqh"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c84e894f47a6",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": {
+ "completion_tokens": 22,
+ "prompt_tokens": 154,
+ "total_tokens": 176,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "etTUytEvlkJ99"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/c9c723cd01233311d9033f55d6db610b38555bb86f93c507ede8752af47cda6a.json b/tests/integration/responses/recordings/c9c723cd01233311d9033f55d6db610b38555bb86f93c507ede8752af47cda6a.json
new file mode 100644
index 000000000..e98f64b93
--- /dev/null
+++ b/tests/integration/responses/recordings/c9c723cd01233311d9033f55d6db610b38555bb86f93c507ede8752af47cda6a.json
@@ -0,0 +1,574 @@
+{
+ "test_id": "tests/integration/responses/test_mcp_authentication.py::test_mcp_authorization_backward_compatibility[client_with_models-txt=openai/gpt-4o]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "What is the boiling point of myawesomeliquid?"
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "greet_everyone",
+ "parameters": {
+ "properties": {
+ "url": {
+ "title": "Url",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "title": "greet_everyoneArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_boiling_point",
+ "description": "\n Returns the boiling point of a liquid in Celsius or Fahrenheit.\n\n :param liquid_name: The name of the liquid\n :param celsius: Whether to return the boiling point in Celsius\n :return: The boiling point of the liquid in Celcius or Fahrenheit\n ",
+ "parameters": {
+ "properties": {
+ "liquid_name": {
+ "title": "Liquid Name",
+ "type": "string"
+ },
+ "celsius": {
+ "default": true,
+ "title": "Celsius",
+ "type": "boolean"
+ }
+ },
+ "required": [
+ "liquid_name"
+ ],
+ "title": "get_boiling_pointArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c9c723cd0123",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_wnbihJuwYAfnI8uxy84Yl48j",
+ "function": {
+ "arguments": "",
+ "name": "get_boiling_point"
+ },
+ "type": "function"
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "TC0"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c9c723cd0123",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "{\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "hDL"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c9c723cd0123",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "li",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "4G8Z"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c9c723cd0123",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "quid",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "ow"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c9c723cd0123",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "_name",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "P"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c9c723cd0123",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\":\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "M"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c9c723cd0123",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "my",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "yhAk"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c9c723cd0123",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "aw",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "SdIN"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c9c723cd0123",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "esom",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "2z"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c9c723cd0123",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "eli",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "nEC"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c9c723cd0123",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "quid",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "2B"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c9c723cd0123",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\"}",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "DoL"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c9c723cd0123",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "tool_calls",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "cSRf"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-c9c723cd0123",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": {
+ "completion_tokens": 22,
+ "prompt_tokens": 154,
+ "total_tokens": 176,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "ejlSF0NzXFFso"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/d073f434d28c2f72bea92232de0de4d4f415f237e22b2b6983677a1e1319a0d3.json b/tests/integration/responses/recordings/d073f434d28c2f72bea92232de0de4d4f415f237e22b2b6983677a1e1319a0d3.json
new file mode 100644
index 000000000..450d84176
--- /dev/null
+++ b/tests/integration/responses/recordings/d073f434d28c2f72bea92232de0de4d4f415f237e22b2b6983677a1e1319a0d3.json
@@ -0,0 +1,593 @@
+{
+ "test_id": "tests/integration/responses/test_tool_responses.py::test_max_tool_calls_with_function_tools[client_with_models-txt=openai/gpt-4o]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "Can you tell me the weather in Paris and the current time?"
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "type": "function",
+ "name": "get_weather",
+ "description": "Get weather information for a specified location",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "location": {
+ "type": "string",
+ "description": "The city name (e.g., 'New York', 'London')"
+ }
+ }
+ },
+ "strict": null
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "type": "function",
+ "name": "get_time",
+ "description": "Get current time for a specified location",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "location": {
+ "type": "string",
+ "description": "The city name (e.g., 'New York', 'London')"
+ }
+ }
+ },
+ "strict": null
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d073f434d28c",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "iUduPiCYBRb"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d073f434d28c",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_Wv3G8aEQOJLNXGRaK3hAWzq3",
+ "function": {
+ "arguments": "",
+ "name": "get_weather"
+ },
+ "type": "function"
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "cqZKgzm65y"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d073f434d28c",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "{\"lo",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "8"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d073f434d28c",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "catio",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "L"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d073f434d28c",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "n\": \"P",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "zbBLzavvnEdLz0"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d073f434d28c",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "aris",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "Gj"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d073f434d28c",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\"}",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "LQo"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d073f434d28c",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": "call_8xkOmOgJpV77n5W2dSx6ytW6",
+ "function": {
+ "arguments": "",
+ "name": "get_time"
+ },
+ "type": "function"
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "eltoncGlxI8Go"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d073f434d28c",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": null,
+ "function": {
+ "arguments": "{\"lo",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "S"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d073f434d28c",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": null,
+ "function": {
+ "arguments": "catio",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "N"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d073f434d28c",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": null,
+ "function": {
+ "arguments": "n\": \"P",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "2bTn1MaAXYFoVK"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d073f434d28c",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": null,
+ "function": {
+ "arguments": "aris",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "VF"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d073f434d28c",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 1,
+ "id": null,
+ "function": {
+ "arguments": "\"}",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "BHi"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d073f434d28c",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "tool_calls",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": null,
+ "obfuscation": "WaYG"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d073f434d28c",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_b1442291a8",
+ "usage": {
+ "completion_tokens": 44,
+ "prompt_tokens": 110,
+ "total_tokens": 154,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "aevj6ZWLqfCK6"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/d35c1244fbbe9898da3958113c1d054d5f5dd6bdd3c4333db6cef7361fb32feb.json b/tests/integration/responses/recordings/d35c1244fbbe9898da3958113c1d054d5f5dd6bdd3c4333db6cef7361fb32feb.json
new file mode 100644
index 000000000..a41104fd5
--- /dev/null
+++ b/tests/integration/responses/recordings/d35c1244fbbe9898da3958113c1d054d5f5dd6bdd3c4333db6cef7361fb32feb.json
@@ -0,0 +1,759 @@
+{
+ "test_id": "tests/integration/responses/test_tool_responses.py::test_response_non_streaming_mcp_tool[openai_client-txt=openai/gpt-4o-boiling_point_tool]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "What is the boiling point of myawesomeliquid in Celsius?"
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "greet_everyone",
+ "parameters": {
+ "properties": {
+ "url": {
+ "title": "Url",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "title": "greet_everyoneArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_boiling_point",
+ "description": "\n Returns the boiling point of a liquid in Celsius or Fahrenheit.\n\n :param liquid_name: The name of the liquid\n :param celsius: Whether to return the boiling point in Celsius\n :return: The boiling point of the liquid in Celcius or Fahrenheit\n ",
+ "parameters": {
+ "properties": {
+ "liquid_name": {
+ "title": "Liquid Name",
+ "type": "string"
+ },
+ "celsius": {
+ "default": true,
+ "title": "Celsius",
+ "type": "boolean"
+ }
+ },
+ "required": [
+ "liquid_name"
+ ],
+ "title": "get_boiling_pointArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d35c1244fbbe",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_8kf8fNIDcWOelbCmUEcretON",
+ "function": {
+ "arguments": "",
+ "name": "get_boiling_point"
+ },
+ "type": "function"
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "1xG"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d35c1244fbbe",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "{\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "RQj"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d35c1244fbbe",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "li",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "XncI"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d35c1244fbbe",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "quid",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "86"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d35c1244fbbe",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "_name",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "L"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d35c1244fbbe",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\":\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "8"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d35c1244fbbe",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "my",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "lnSu"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d35c1244fbbe",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "aw",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "ksr1"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d35c1244fbbe",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "esom",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "CU"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d35c1244fbbe",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "eli",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "hrv"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d35c1244fbbe",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "quid",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "K9"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d35c1244fbbe",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\",\"",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "a"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d35c1244fbbe",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "c",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "LKw52"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d35c1244fbbe",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "elsius",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": ""
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d35c1244fbbe",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "\":",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "yGY"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d35c1244fbbe",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "true",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "wC"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d35c1244fbbe",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": null,
+ "function": {
+ "arguments": "}",
+ "name": null
+ },
+ "type": null
+ }
+ ]
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "8fF8B"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d35c1244fbbe",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "tool_calls",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "bbwp"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d35c1244fbbe",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": {
+ "completion_tokens": 27,
+ "prompt_tokens": 156,
+ "total_tokens": 183,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "k0bo4JwUfLNKW"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/d42e1020edee86d9f6da7df909c2a453cb8f2e11e80beb8e5506439345c428eb.json b/tests/integration/responses/recordings/d42e1020edee86d9f6da7df909c2a453cb8f2e11e80beb8e5506439345c428eb.json
new file mode 100644
index 000000000..610fe96b1
--- /dev/null
+++ b/tests/integration/responses/recordings/d42e1020edee86d9f6da7df909c2a453cb8f2e11e80beb8e5506439345c428eb.json
@@ -0,0 +1,808 @@
+{
+ "test_id": "tests/integration/responses/test_tool_responses.py::test_response_streaming_multi_turn_tool_execution[openai_client-txt=openai/gpt-4o-experiment_analysis_streaming]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "I need a complete analysis: First, get the experiment ID for 'chemical_reaction', then get the results for that experiment, and tell me if the yield was above 80%. Return only one tool call per step. Please stream your analysis process."
+ },
+ {
+ "role": "assistant",
+ "content": "",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_Q9Gcxub7UbQsxJWVkiy4FETr",
+ "type": "function",
+ "function": {
+ "name": "get_experiment_id",
+ "arguments": "{\"experiment_name\":\"chemical_reaction\"}"
+ }
+ }
+ ]
+ },
+ {
+ "role": "tool",
+ "tool_call_id": "call_Q9Gcxub7UbQsxJWVkiy4FETr",
+ "content": [
+ {
+ "type": "text",
+ "text": "exp_003"
+ }
+ ]
+ },
+ {
+ "role": "assistant",
+ "content": "",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_yTMuQEKu7x115q8XvhqelRub",
+ "type": "function",
+ "function": {
+ "name": "get_experiment_results",
+ "arguments": "{\"experiment_id\":\"exp_003\"}"
+ }
+ }
+ ]
+ },
+ {
+ "role": "tool",
+ "tool_call_id": "call_yTMuQEKu7x115q8XvhqelRub",
+ "content": [
+ {
+ "type": "text",
+ "text": "Yield: 85%, Status: Complete"
+ }
+ ]
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_id",
+ "description": "\n Get the user ID for a given username. This ID is needed for other operations.\n\n :param username: The username to look up\n :return: The user ID for the username\n ",
+ "parameters": {
+ "properties": {
+ "username": {
+ "title": "Username",
+ "type": "string"
+ }
+ },
+ "required": [
+ "username"
+ ],
+ "title": "get_user_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_permissions",
+ "description": "\n Get the permissions for a user ID. Requires a valid user ID from get_user_id.\n\n :param user_id: The user ID to check permissions for\n :return: The permissions for the user\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id"
+ ],
+ "title": "get_user_permissionsArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "check_file_access",
+ "description": "\n Check if a user can access a specific file. Requires a valid user ID.\n\n :param user_id: The user ID to check access for\n :param filename: The filename to check access to\n :return: Whether the user can access the file (yes/no)\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ },
+ "filename": {
+ "title": "Filename",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id",
+ "filename"
+ ],
+ "title": "check_file_accessArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_id",
+ "description": "\n Get the experiment ID for a given experiment name. This ID is needed to get results.\n\n :param experiment_name: The name of the experiment\n :return: The experiment ID\n ",
+ "parameters": {
+ "properties": {
+ "experiment_name": {
+ "title": "Experiment Name",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_name"
+ ],
+ "title": "get_experiment_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_results",
+ "description": "\n Get the results for an experiment ID. Requires a valid experiment ID from get_experiment_id.\n\n :param experiment_id: The experiment ID to get results for\n :return: The experiment results\n ",
+ "parameters": {
+ "properties": {
+ "experiment_id": {
+ "title": "Experiment Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_id"
+ ],
+ "title": "get_experiment_resultsArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d42e1020edee",
+ "choices": [
+ {
+ "delta": {
+ "content": "",
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "7yA3503fehs27D"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d42e1020edee",
+ "choices": [
+ {
+ "delta": {
+ "content": "The",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "T95BeWrgJQMHt"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d42e1020edee",
+ "choices": [
+ {
+ "delta": {
+ "content": " yield",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "VveNEnHuMQ"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d42e1020edee",
+ "choices": [
+ {
+ "delta": {
+ "content": " for",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "KupSssWahehO"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d42e1020edee",
+ "choices": [
+ {
+ "delta": {
+ "content": " the",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "Ogot8KLW0IXw"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d42e1020edee",
+ "choices": [
+ {
+ "delta": {
+ "content": " '",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "dYKJ6jPstuAso4"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d42e1020edee",
+ "choices": [
+ {
+ "delta": {
+ "content": "chemical",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "wcSKhZVd"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d42e1020edee",
+ "choices": [
+ {
+ "delta": {
+ "content": "_re",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "6ZlTlRGLyclHo"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d42e1020edee",
+ "choices": [
+ {
+ "delta": {
+ "content": "action",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "WpYqOmrhXr"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d42e1020edee",
+ "choices": [
+ {
+ "delta": {
+ "content": "'",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "qUhq7HrrwdFEyuY"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d42e1020edee",
+ "choices": [
+ {
+ "delta": {
+ "content": " experiment",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "WWO2y"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d42e1020edee",
+ "choices": [
+ {
+ "delta": {
+ "content": " is",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "pFVMO1BRN37n4"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d42e1020edee",
+ "choices": [
+ {
+ "delta": {
+ "content": " ",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "TtQlcHeU2mPl830"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d42e1020edee",
+ "choices": [
+ {
+ "delta": {
+ "content": "85",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "zyw8OdA0pXZCp5"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d42e1020edee",
+ "choices": [
+ {
+ "delta": {
+ "content": "%,",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "VcHVTGGXrqvev1"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d42e1020edee",
+ "choices": [
+ {
+ "delta": {
+ "content": " which",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "FI9FAA2rX6"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d42e1020edee",
+ "choices": [
+ {
+ "delta": {
+ "content": " is",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "Cc65gPYGA6Xfd"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d42e1020edee",
+ "choices": [
+ {
+ "delta": {
+ "content": " above",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "T7BlLMIQGs"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d42e1020edee",
+ "choices": [
+ {
+ "delta": {
+ "content": " ",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "2oKThCybRdG8MzZ"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d42e1020edee",
+ "choices": [
+ {
+ "delta": {
+ "content": "80",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "QHWdJWXK6hzQVS"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d42e1020edee",
+ "choices": [
+ {
+ "delta": {
+ "content": "%.",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "lJnplmQYyl0SL3"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d42e1020edee",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "stop",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "NPaAVrOB4J"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-d42e1020edee",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": {
+ "completion_tokens": 21,
+ "prompt_tokens": 494,
+ "total_tokens": 515,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "ngidabPDDHECm"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/db81127157a8364ce8f7a81e10d9b84bf814950e3c8f11eed7ed9f11d4462237.json b/tests/integration/responses/recordings/db81127157a8364ce8f7a81e10d9b84bf814950e3c8f11eed7ed9f11d4462237.json
new file mode 100644
index 000000000..67c78f3ed
--- /dev/null
+++ b/tests/integration/responses/recordings/db81127157a8364ce8f7a81e10d9b84bf814950e3c8f11eed7ed9f11d4462237.json
@@ -0,0 +1,614 @@
+{
+ "test_id": "tests/integration/responses/test_mcp_authentication.py::test_mcp_authorization_backward_compatibility[client_with_models-txt=openai/gpt-4o]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "What is the boiling point of myawesomeliquid?"
+ },
+ {
+ "role": "assistant",
+ "content": "",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_wnbihJuwYAfnI8uxy84Yl48j",
+ "type": "function",
+ "function": {
+ "name": "get_boiling_point",
+ "arguments": "{\"liquid_name\":\"myawesomeliquid\"}"
+ }
+ }
+ ]
+ },
+ {
+ "role": "tool",
+ "tool_call_id": "call_wnbihJuwYAfnI8uxy84Yl48j",
+ "content": [
+ {
+ "type": "text",
+ "text": "-100"
+ }
+ ]
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "greet_everyone",
+ "parameters": {
+ "properties": {
+ "url": {
+ "title": "Url",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "title": "greet_everyoneArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_boiling_point",
+ "description": "\n Returns the boiling point of a liquid in Celsius or Fahrenheit.\n\n :param liquid_name: The name of the liquid\n :param celsius: Whether to return the boiling point in Celsius\n :return: The boiling point of the liquid in Celcius or Fahrenheit\n ",
+ "parameters": {
+ "properties": {
+ "liquid_name": {
+ "title": "Liquid Name",
+ "type": "string"
+ },
+ "celsius": {
+ "default": true,
+ "title": "Celsius",
+ "type": "boolean"
+ }
+ },
+ "required": [
+ "liquid_name"
+ ],
+ "title": "get_boiling_pointArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-db81127157a8",
+ "choices": [
+ {
+ "delta": {
+ "content": "",
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "Usdowqbd6beiYB"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-db81127157a8",
+ "choices": [
+ {
+ "delta": {
+ "content": "The",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "nVevItSH27TBR"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-db81127157a8",
+ "choices": [
+ {
+ "delta": {
+ "content": " boiling",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "HWyYtVAl"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-db81127157a8",
+ "choices": [
+ {
+ "delta": {
+ "content": " point",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "kvvcut6Eib"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-db81127157a8",
+ "choices": [
+ {
+ "delta": {
+ "content": " of",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "E0osAbGBpCPvy"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-db81127157a8",
+ "choices": [
+ {
+ "delta": {
+ "content": " \"",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "GmH7m44fmv0Mk"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-db81127157a8",
+ "choices": [
+ {
+ "delta": {
+ "content": "my",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "oJ4DV7z5GiqJqX"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-db81127157a8",
+ "choices": [
+ {
+ "delta": {
+ "content": "aw",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "8AmNNAYPXMNrEr"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-db81127157a8",
+ "choices": [
+ {
+ "delta": {
+ "content": "esom",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "JEzK8X8AD9hP"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-db81127157a8",
+ "choices": [
+ {
+ "delta": {
+ "content": "eli",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "8EGj5LyQzpZMt"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-db81127157a8",
+ "choices": [
+ {
+ "delta": {
+ "content": "quid",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "wQG19uBuvC7j"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-db81127157a8",
+ "choices": [
+ {
+ "delta": {
+ "content": "\"",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "8Wyenb7E997f9E"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-db81127157a8",
+ "choices": [
+ {
+ "delta": {
+ "content": " is",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "SVXiel7RHA6f3"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-db81127157a8",
+ "choices": [
+ {
+ "delta": {
+ "content": " -",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "ynScunJEjmOWBo"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-db81127157a8",
+ "choices": [
+ {
+ "delta": {
+ "content": "100",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "po2PLlPavc9TN"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-db81127157a8",
+ "choices": [
+ {
+ "delta": {
+ "content": "\u00b0C",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "mt2jiL22pWkH93"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-db81127157a8",
+ "choices": [
+ {
+ "delta": {
+ "content": ".",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "32gJJ61zmjmftOn"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-db81127157a8",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "stop",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "HszNIiCJ12"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-db81127157a8",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": {
+ "completion_tokens": 17,
+ "prompt_tokens": 188,
+ "total_tokens": 205,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "cAx3IDg7toBDJ"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/e2dc09dc546d9b8b99096804fe75fae1f1eb09efe6e4f86c115a78a3db5a59bc.json b/tests/integration/responses/recordings/e2dc09dc546d9b8b99096804fe75fae1f1eb09efe6e4f86c115a78a3db5a59bc.json
new file mode 100644
index 000000000..ce771f24e
--- /dev/null
+++ b/tests/integration/responses/recordings/e2dc09dc546d9b8b99096804fe75fae1f1eb09efe6e4f86c115a78a3db5a59bc.json
@@ -0,0 +1,668 @@
+{
+ "test_id": "tests/integration/responses/test_tool_responses.py::test_response_mcp_tool_approval[openai_client-txt=openai/gpt-4o-True-boiling_point_tool]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "What is the boiling point of myawesomeliquid in Celsius?"
+ },
+ {
+ "role": "assistant",
+ "content": "",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_bL84OWNnE1s75GJEqGLAK35W",
+ "type": "function",
+ "function": {
+ "name": "get_boiling_point",
+ "arguments": "{\"liquid_name\":\"myawesomeliquid\",\"celsius\":true}"
+ }
+ }
+ ]
+ },
+ {
+ "role": "tool",
+ "tool_call_id": "call_bL84OWNnE1s75GJEqGLAK35W",
+ "content": [
+ {
+ "type": "text",
+ "text": "-100"
+ }
+ ]
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "greet_everyone",
+ "parameters": {
+ "properties": {
+ "url": {
+ "title": "Url",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "title": "greet_everyoneArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_boiling_point",
+ "description": "\n Returns the boiling point of a liquid in Celsius or Fahrenheit.\n\n :param liquid_name: The name of the liquid\n :param celsius: Whether to return the boiling point in Celsius\n :return: The boiling point of the liquid in Celcius or Fahrenheit\n ",
+ "parameters": {
+ "properties": {
+ "liquid_name": {
+ "title": "Liquid Name",
+ "type": "string"
+ },
+ "celsius": {
+ "default": true,
+ "title": "Celsius",
+ "type": "boolean"
+ }
+ },
+ "required": [
+ "liquid_name"
+ ],
+ "title": "get_boiling_pointArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e2dc09dc546d",
+ "choices": [
+ {
+ "delta": {
+ "content": "",
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "STnb1nbwTsG4JZ"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e2dc09dc546d",
+ "choices": [
+ {
+ "delta": {
+ "content": "The",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "aEUUYMIYjnZpH"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e2dc09dc546d",
+ "choices": [
+ {
+ "delta": {
+ "content": " boiling",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "2QzI8Zau"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e2dc09dc546d",
+ "choices": [
+ {
+ "delta": {
+ "content": " point",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "gZw7vp0bnu"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e2dc09dc546d",
+ "choices": [
+ {
+ "delta": {
+ "content": " of",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "TYru3DcfZVc6B"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e2dc09dc546d",
+ "choices": [
+ {
+ "delta": {
+ "content": " \"",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "h5P3cluszFa21"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e2dc09dc546d",
+ "choices": [
+ {
+ "delta": {
+ "content": "my",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "ggSDGSgtWOR3d9"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e2dc09dc546d",
+ "choices": [
+ {
+ "delta": {
+ "content": "aw",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "lm72CS5Lt7lW76"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e2dc09dc546d",
+ "choices": [
+ {
+ "delta": {
+ "content": "esom",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "fKXRsLB1CG0e"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e2dc09dc546d",
+ "choices": [
+ {
+ "delta": {
+ "content": "eli",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "JxZBNjkfyXquH"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e2dc09dc546d",
+ "choices": [
+ {
+ "delta": {
+ "content": "quid",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "egtKHFRBAqZn"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e2dc09dc546d",
+ "choices": [
+ {
+ "delta": {
+ "content": "\"",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "R7MdHaS5Rj2mMV"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e2dc09dc546d",
+ "choices": [
+ {
+ "delta": {
+ "content": " in",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "LydsYLrAIj6PU"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e2dc09dc546d",
+ "choices": [
+ {
+ "delta": {
+ "content": " Celsius",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "4MmAUDk0"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e2dc09dc546d",
+ "choices": [
+ {
+ "delta": {
+ "content": " is",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "Ivlu4M0VfRH8b"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e2dc09dc546d",
+ "choices": [
+ {
+ "delta": {
+ "content": " -",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "OfTmU32oCtMsuo"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e2dc09dc546d",
+ "choices": [
+ {
+ "delta": {
+ "content": "100",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "IUbbHa5oyIPjr"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e2dc09dc546d",
+ "choices": [
+ {
+ "delta": {
+ "content": "\u00b0C",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "llluAF0LBNJIwi"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e2dc09dc546d",
+ "choices": [
+ {
+ "delta": {
+ "content": ".",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "LnUC3LPx43OfUbC"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e2dc09dc546d",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "stop",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "ULfebGmmMn"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e2dc09dc546d",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": {
+ "completion_tokens": 19,
+ "prompt_tokens": 195,
+ "total_tokens": 214,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "w11BVXjZVXRtg"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/e3e2e64c57bb36f2a6ba5f68410d0b947d35c870ff825f06d8997a84dca1f5bf.json b/tests/integration/responses/recordings/e3e2e64c57bb36f2a6ba5f68410d0b947d35c870ff825f06d8997a84dca1f5bf.json
new file mode 100644
index 000000000..089242af3
--- /dev/null
+++ b/tests/integration/responses/recordings/e3e2e64c57bb36f2a6ba5f68410d0b947d35c870ff825f06d8997a84dca1f5bf.json
@@ -0,0 +1,1661 @@
+{
+ "test_id": "tests/integration/responses/test_tool_responses.py::test_max_tool_calls_with_mcp_tools[openai_client-txt=openai/gpt-4o]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "Get the experiment ID for 'boiling_point' and get the user ID for 'charlie'"
+ },
+ {
+ "role": "assistant",
+ "content": "",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_z8P1RQv54BLxyMlRdMFkcCGd",
+ "type": "function",
+ "function": {
+ "name": "get_experiment_id",
+ "arguments": "{\"experiment_name\": \"boiling_point\"}"
+ }
+ },
+ {
+ "index": 1,
+ "id": "call_I5tcLgyMADoVwLKDj9HkTCs5",
+ "type": "function",
+ "function": {
+ "name": "get_user_id",
+ "arguments": "{\"username\": \"charlie\"}"
+ }
+ }
+ ]
+ },
+ {
+ "role": "tool",
+ "tool_call_id": "call_z8P1RQv54BLxyMlRdMFkcCGd",
+ "content": [
+ {
+ "type": "text",
+ "text": "exp_004"
+ }
+ ]
+ },
+ {
+ "role": "tool",
+ "tool_call_id": "call_I5tcLgyMADoVwLKDj9HkTCs5",
+ "content": "Tool call skipped: maximum tool calls limit (1) reached."
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_id",
+ "description": "\n Get the user ID for a given username. This ID is needed for other operations.\n\n :param username: The username to look up\n :return: The user ID for the username\n ",
+ "parameters": {
+ "properties": {
+ "username": {
+ "title": "Username",
+ "type": "string"
+ }
+ },
+ "required": [
+ "username"
+ ],
+ "title": "get_user_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_permissions",
+ "description": "\n Get the permissions for a user ID. Requires a valid user ID from get_user_id.\n\n :param user_id: The user ID to check permissions for\n :return: The permissions for the user\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id"
+ ],
+ "title": "get_user_permissionsArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "check_file_access",
+ "description": "\n Check if a user can access a specific file. Requires a valid user ID.\n\n :param user_id: The user ID to check access for\n :param filename: The filename to check access to\n :return: Whether the user can access the file (yes/no)\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ },
+ "filename": {
+ "title": "Filename",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id",
+ "filename"
+ ],
+ "title": "check_file_accessArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_id",
+ "description": "\n Get the experiment ID for a given experiment name. This ID is needed to get results.\n\n :param experiment_name: The name of the experiment\n :return: The experiment ID\n ",
+ "parameters": {
+ "properties": {
+ "experiment_name": {
+ "title": "Experiment Name",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_name"
+ ],
+ "title": "get_experiment_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_results",
+ "description": "\n Get the results for an experiment ID. Requires a valid experiment ID from get_experiment_id.\n\n :param experiment_id: The experiment ID to get results for\n :return: The experiment results\n ",
+ "parameters": {
+ "properties": {
+ "experiment_id": {
+ "title": "Experiment Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_id"
+ ],
+ "title": "get_experiment_resultsArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": "",
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "uoj10MYhhjCsjQ"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": "The",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "RbrwfJ20BVqRi"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " experiment",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "88xHU"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " ID",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "lXhzWF230RZCL"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " for",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "McIrBR2XVfyS"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " '",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "7SiItrYff13YKr"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": "bo",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "pf232bD4VeXdXc"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": "iling",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "z0kyzhP7ioh"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": "_point",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "3TUkmyiT28"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": "'",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "kFAkj6BHwM6YKZQ"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " is",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "fiRWSM9LNpP4J"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " `",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "VRPBkgW9PrA6C7"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": "exp",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "YqSi9vVuexh3e"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": "_",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "y64suQvx1Nfp8Pj"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": "004",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "kouF1KXaF3fSv"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": "`.",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "Ju1xHmwme71tPA"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " However",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "TZuAhRJ8"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": ",",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "ikVKxLAdOhUPHHa"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " I",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "pntThOzs2GzlYs"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " couldn't",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "v4ihoTx"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " retrieve",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "476NjPo"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " the",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "AFDAUQw3ezkM"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " user",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "ztweLiyDuwu"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " ID",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "q575s9DLRlXDL"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " for",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "oEoKwHu8H1FD"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " '",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "KOgPjHTbZYg83A"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": "char",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "PmTsVhsBBtRV"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": "lie",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "hkXsP7qhxNrQ0"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": "'",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "C9RtrovVHvrH33B"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " at",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "fhJHhlmbEWrnY"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " this",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "pvYlADlLGnc"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " time",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "N787ynNkyIU"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " due",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "lkX5gCjexTSI"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " to",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "ecopEBh7Ckmai"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " a",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "Nf1X9c8Z4TduoA"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " tool",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "MtnVKdm0UnR"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " call",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "ExJ8aBPckoF"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " limitation",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "jE7bT"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": ".",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "AaaLnYdPLucETYH"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " Please",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "cPsBAfFXF"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " let",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "nGUo5AX3lQpP"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " me",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "shpHT1JYFdHrS"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " know",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "RG8m7peAEPl"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " if",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "i4q8OeCvU08qi"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " there's",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "lXBbPXWn"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " anything",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "EyZRgWl"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " else",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "h87NDUy4I75"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " I",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "1CJqPAnvuBVEXV"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " can",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "9Ava6GiwMlu5"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " assist",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "fl9TQoNlV"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " you",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "4PwMuL1TPPvZ"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": " with",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "XeIvTn2s7ap"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": "!",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "U93F4p2ENgwWFKN"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "stop",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": null,
+ "obfuscation": "3P0Kp8n8xH"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e3e2e64c57bb",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_c98e05ca17",
+ "usage": {
+ "completion_tokens": 53,
+ "prompt_tokens": 474,
+ "total_tokens": 527,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "zjt0xUw7Sz8p9"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/e9f1cc3da4297f143b7b2a4b21b34cf2f55727b67c1e1854a106b9d8c7c64b70.json b/tests/integration/responses/recordings/e9f1cc3da4297f143b7b2a4b21b34cf2f55727b67c1e1854a106b9d8c7c64b70.json
new file mode 100644
index 000000000..f8472055f
--- /dev/null
+++ b/tests/integration/responses/recordings/e9f1cc3da4297f143b7b2a4b21b34cf2f55727b67c1e1854a106b9d8c7c64b70.json
@@ -0,0 +1,700 @@
+{
+ "test_id": "tests/integration/responses/test_tool_responses.py::test_response_non_streaming_multi_turn_tool_execution[openai_client-txt=openai/gpt-4o-experiment_results_lookup]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "I need to get the results for the 'boiling_point' experiment. First, get the experiment ID for 'boiling_point', then use that ID to get the experiment results. Tell me the boiling point in Celsius."
+ },
+ {
+ "role": "assistant",
+ "content": "",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_dZwjBxH3aTRhnaS0bJVPqRcz",
+ "type": "function",
+ "function": {
+ "name": "get_experiment_id",
+ "arguments": "{\"experiment_name\":\"boiling_point\"}"
+ }
+ }
+ ]
+ },
+ {
+ "role": "tool",
+ "tool_call_id": "call_dZwjBxH3aTRhnaS0bJVPqRcz",
+ "content": [
+ {
+ "type": "text",
+ "text": "exp_004"
+ }
+ ]
+ },
+ {
+ "role": "assistant",
+ "content": "",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_skNUKbERbtdoADH834U9OE91",
+ "type": "function",
+ "function": {
+ "name": "get_experiment_results",
+ "arguments": "{\"experiment_id\":\"exp_004\"}"
+ }
+ }
+ ]
+ },
+ {
+ "role": "tool",
+ "tool_call_id": "call_skNUKbERbtdoADH834U9OE91",
+ "content": [
+ {
+ "type": "text",
+ "text": "Boiling Point: 100\u00b0C, Status: Verified"
+ }
+ ]
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_id",
+ "description": "\n Get the user ID for a given username. This ID is needed for other operations.\n\n :param username: The username to look up\n :return: The user ID for the username\n ",
+ "parameters": {
+ "properties": {
+ "username": {
+ "title": "Username",
+ "type": "string"
+ }
+ },
+ "required": [
+ "username"
+ ],
+ "title": "get_user_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_user_permissions",
+ "description": "\n Get the permissions for a user ID. Requires a valid user ID from get_user_id.\n\n :param user_id: The user ID to check permissions for\n :return: The permissions for the user\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id"
+ ],
+ "title": "get_user_permissionsArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "check_file_access",
+ "description": "\n Check if a user can access a specific file. Requires a valid user ID.\n\n :param user_id: The user ID to check access for\n :param filename: The filename to check access to\n :return: Whether the user can access the file (yes/no)\n ",
+ "parameters": {
+ "properties": {
+ "user_id": {
+ "title": "User Id",
+ "type": "string"
+ },
+ "filename": {
+ "title": "Filename",
+ "type": "string"
+ }
+ },
+ "required": [
+ "user_id",
+ "filename"
+ ],
+ "title": "check_file_accessArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_id",
+ "description": "\n Get the experiment ID for a given experiment name. This ID is needed to get results.\n\n :param experiment_name: The name of the experiment\n :return: The experiment ID\n ",
+ "parameters": {
+ "properties": {
+ "experiment_name": {
+ "title": "Experiment Name",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_name"
+ ],
+ "title": "get_experiment_idArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_experiment_results",
+ "description": "\n Get the results for an experiment ID. Requires a valid experiment ID from get_experiment_id.\n\n :param experiment_id: The experiment ID to get results for\n :return: The experiment results\n ",
+ "parameters": {
+ "properties": {
+ "experiment_id": {
+ "title": "Experiment Id",
+ "type": "string"
+ }
+ },
+ "required": [
+ "experiment_id"
+ ],
+ "title": "get_experiment_resultsArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e9f1cc3da429",
+ "choices": [
+ {
+ "delta": {
+ "content": "",
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "OzNg5nfMI5VouN"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e9f1cc3da429",
+ "choices": [
+ {
+ "delta": {
+ "content": "The",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "EBvjjqFPfytPb"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e9f1cc3da429",
+ "choices": [
+ {
+ "delta": {
+ "content": " boiling",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "HhEiLgKg"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e9f1cc3da429",
+ "choices": [
+ {
+ "delta": {
+ "content": " point",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "hLc2aAgg1D"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e9f1cc3da429",
+ "choices": [
+ {
+ "delta": {
+ "content": " for",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "q3AsmJJ6Rvyt"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e9f1cc3da429",
+ "choices": [
+ {
+ "delta": {
+ "content": " the",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "4QJrcjxcuFLd"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e9f1cc3da429",
+ "choices": [
+ {
+ "delta": {
+ "content": " experiment",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "BQQJ8"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e9f1cc3da429",
+ "choices": [
+ {
+ "delta": {
+ "content": " '",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "nj2SOixVU5KocZ"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e9f1cc3da429",
+ "choices": [
+ {
+ "delta": {
+ "content": "bo",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "ookLm9qkLqQQ3M"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e9f1cc3da429",
+ "choices": [
+ {
+ "delta": {
+ "content": "iling",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "J4axWnSRvQU"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e9f1cc3da429",
+ "choices": [
+ {
+ "delta": {
+ "content": "_point",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "QG6jvQWF8t"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e9f1cc3da429",
+ "choices": [
+ {
+ "delta": {
+ "content": "'",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "veUGdbLd3d8r2yU"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e9f1cc3da429",
+ "choices": [
+ {
+ "delta": {
+ "content": " is",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "ZOCkbhGksYmsF"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e9f1cc3da429",
+ "choices": [
+ {
+ "delta": {
+ "content": " ",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "fbNuaYkAA8gREQ7"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e9f1cc3da429",
+ "choices": [
+ {
+ "delta": {
+ "content": "100",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "3rdZxDq7QoXcl"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e9f1cc3da429",
+ "choices": [
+ {
+ "delta": {
+ "content": "\u00b0C",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "upjHViB9dUBWAd"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e9f1cc3da429",
+ "choices": [
+ {
+ "delta": {
+ "content": ".",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "hBZNqRjyLGCIMjg"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e9f1cc3da429",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "stop",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "PrtgvDwRZp"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-e9f1cc3da429",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": {
+ "completion_tokens": 17,
+ "prompt_tokens": 490,
+ "total_tokens": 507,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "euYYBnLE4Mj0Z"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/recordings/ed89b57fec937fa8602b4911a21a9a1a9488fb2347bf73d6e3bc2203a9a47a61.json b/tests/integration/responses/recordings/ed89b57fec937fa8602b4911a21a9a1a9488fb2347bf73d6e3bc2203a9a47a61.json
new file mode 100644
index 000000000..d8d87a16e
--- /dev/null
+++ b/tests/integration/responses/recordings/ed89b57fec937fa8602b4911a21a9a1a9488fb2347bf73d6e3bc2203a9a47a61.json
@@ -0,0 +1,641 @@
+{
+ "test_id": "tests/integration/responses/test_tool_responses.py::test_response_sequential_mcp_tool[openai_client-txt=openai/gpt-4o-boiling_point_tool]",
+ "request": {
+ "method": "POST",
+ "url": "https://api.openai.com/v1/v1/chat/completions",
+ "headers": {},
+ "body": {
+ "model": "gpt-4o",
+ "messages": [
+ {
+ "role": "user",
+ "content": "What is the boiling point of myawesomeliquid in Celsius?"
+ },
+ {
+ "role": "assistant",
+ "content": "",
+ "tool_calls": [
+ {
+ "index": 0,
+ "id": "call_b5k2yeqIi5ucElnnrVPyYU4x",
+ "type": "function",
+ "function": {
+ "name": "get_boiling_point",
+ "arguments": "{\"liquid_name\":\"myawesomeliquid\",\"celsius\":true}"
+ }
+ }
+ ]
+ },
+ {
+ "role": "tool",
+ "tool_call_id": "call_b5k2yeqIi5ucElnnrVPyYU4x",
+ "content": [
+ {
+ "type": "text",
+ "text": "-100"
+ }
+ ]
+ }
+ ],
+ "stream": true,
+ "stream_options": {
+ "include_usage": true
+ },
+ "tools": [
+ {
+ "type": "function",
+ "function": {
+ "name": "greet_everyone",
+ "parameters": {
+ "properties": {
+ "url": {
+ "title": "Url",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "title": "greet_everyoneArguments",
+ "type": "object"
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "get_boiling_point",
+ "description": "\n Returns the boiling point of a liquid in Celsius or Fahrenheit.\n\n :param liquid_name: The name of the liquid\n :param celsius: Whether to return the boiling point in Celsius\n :return: The boiling point of the liquid in Celcius or Fahrenheit\n ",
+ "parameters": {
+ "properties": {
+ "liquid_name": {
+ "title": "Liquid Name",
+ "type": "string"
+ },
+ "celsius": {
+ "default": true,
+ "title": "Celsius",
+ "type": "boolean"
+ }
+ },
+ "required": [
+ "liquid_name"
+ ],
+ "title": "get_boiling_pointArguments",
+ "type": "object"
+ }
+ }
+ }
+ ]
+ },
+ "endpoint": "/v1/chat/completions",
+ "model": "gpt-4o"
+ },
+ "response": {
+ "body": [
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-ed89b57fec93",
+ "choices": [
+ {
+ "delta": {
+ "content": "",
+ "function_call": null,
+ "refusal": null,
+ "role": "assistant",
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "WGXCgkwfwMDUCG"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-ed89b57fec93",
+ "choices": [
+ {
+ "delta": {
+ "content": "The",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "pkdvw6gGNrtXN"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-ed89b57fec93",
+ "choices": [
+ {
+ "delta": {
+ "content": " boiling",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "RO5YJeZc"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-ed89b57fec93",
+ "choices": [
+ {
+ "delta": {
+ "content": " point",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "riZZHSDEz0"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-ed89b57fec93",
+ "choices": [
+ {
+ "delta": {
+ "content": " of",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "1zjk8zIdt2Y2b"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-ed89b57fec93",
+ "choices": [
+ {
+ "delta": {
+ "content": " \"",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "XGHv0dlif7IrC"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-ed89b57fec93",
+ "choices": [
+ {
+ "delta": {
+ "content": "my",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "Ii2KeTyV3U0uiU"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-ed89b57fec93",
+ "choices": [
+ {
+ "delta": {
+ "content": "aw",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "3OyYvSytdOYhpT"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-ed89b57fec93",
+ "choices": [
+ {
+ "delta": {
+ "content": "esom",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "zCnXbjW4JE6l"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-ed89b57fec93",
+ "choices": [
+ {
+ "delta": {
+ "content": "eli",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "0bwcz2K91q7EO"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-ed89b57fec93",
+ "choices": [
+ {
+ "delta": {
+ "content": "quid",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "Um0jFlJegpXI"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-ed89b57fec93",
+ "choices": [
+ {
+ "delta": {
+ "content": "\"",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "4OllZlS2JmoD3l"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-ed89b57fec93",
+ "choices": [
+ {
+ "delta": {
+ "content": " is",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "x4jApO80AyXpX"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-ed89b57fec93",
+ "choices": [
+ {
+ "delta": {
+ "content": " -",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "wq0D3Wzc1l3h6S"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-ed89b57fec93",
+ "choices": [
+ {
+ "delta": {
+ "content": "100",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "Dn78V58iZ9wKK"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-ed89b57fec93",
+ "choices": [
+ {
+ "delta": {
+ "content": " degrees",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "fjHDBTqT"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-ed89b57fec93",
+ "choices": [
+ {
+ "delta": {
+ "content": " Celsius",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "Cnp6KULL"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-ed89b57fec93",
+ "choices": [
+ {
+ "delta": {
+ "content": ".",
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": null,
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "grbygHexDT4JwGx"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-ed89b57fec93",
+ "choices": [
+ {
+ "delta": {
+ "content": null,
+ "function_call": null,
+ "refusal": null,
+ "role": null,
+ "tool_calls": null
+ },
+ "finish_reason": "stop",
+ "index": 0,
+ "logprobs": null
+ }
+ ],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": null,
+ "obfuscation": "upSRpiQQKE"
+ }
+ },
+ {
+ "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
+ "__data__": {
+ "id": "rec-ed89b57fec93",
+ "choices": [],
+ "created": 0,
+ "model": "gpt-4o-2024-08-06",
+ "object": "chat.completion.chunk",
+ "service_tier": "default",
+ "system_fingerprint": "fp_cbf1785567",
+ "usage": {
+ "completion_tokens": 18,
+ "prompt_tokens": 195,
+ "total_tokens": 213,
+ "completion_tokens_details": {
+ "accepted_prediction_tokens": 0,
+ "audio_tokens": 0,
+ "reasoning_tokens": 0,
+ "rejected_prediction_tokens": 0
+ },
+ "prompt_tokens_details": {
+ "audio_tokens": 0,
+ "cached_tokens": 0
+ }
+ },
+ "obfuscation": "psE6Es6zZ2Kz4"
+ }
+ }
+ ],
+ "is_streaming": true
+ },
+ "id_normalization_mapping": {}
+}
diff --git a/tests/integration/responses/test_basic_responses.py b/tests/integration/responses/test_basic_responses.py
index a764084af..d72a43375 100644
--- a/tests/integration/responses/test_basic_responses.py
+++ b/tests/integration/responses/test_basic_responses.py
@@ -13,8 +13,8 @@ from .streaming_assertions import StreamingValidator
@pytest.mark.parametrize("case", basic_test_cases)
-def test_response_non_streaming_basic(compat_client, text_model_id, case):
- response = compat_client.responses.create(
+def test_response_non_streaming_basic(responses_client, text_model_id, case):
+ response = responses_client.responses.create(
model=text_model_id,
input=case.input,
stream=False,
@@ -31,10 +31,10 @@ def test_response_non_streaming_basic(compat_client, text_model_id, case):
"Total tokens should equal input + output tokens"
)
- retrieved_response = compat_client.responses.retrieve(response_id=response.id)
+ retrieved_response = responses_client.responses.retrieve(response_id=response.id)
assert retrieved_response.output_text == response.output_text
- next_response = compat_client.responses.create(
+ next_response = responses_client.responses.create(
model=text_model_id,
input="Repeat your previous response in all caps.",
previous_response_id=response.id,
@@ -44,8 +44,8 @@ def test_response_non_streaming_basic(compat_client, text_model_id, case):
@pytest.mark.parametrize("case", basic_test_cases)
-def test_response_streaming_basic(compat_client, text_model_id, case):
- response = compat_client.responses.create(
+def test_response_streaming_basic(responses_client, text_model_id, case):
+ response = responses_client.responses.create(
model=text_model_id,
input=case.input,
stream=True,
@@ -98,15 +98,15 @@ def test_response_streaming_basic(compat_client, text_model_id, case):
validator.assert_response_consistency()
# Verify stored response matches streamed response
- retrieved_response = compat_client.responses.retrieve(response_id=response_id)
+ retrieved_response = responses_client.responses.retrieve(response_id=response_id)
final_event = events[-1]
assert retrieved_response.output_text == final_event.response.output_text
@pytest.mark.parametrize("case", basic_test_cases)
-def test_response_streaming_incremental_content(compat_client, text_model_id, case):
+def test_response_streaming_incremental_content(responses_client, text_model_id, case):
"""Test that streaming actually delivers content incrementally, not just at the end."""
- response = compat_client.responses.create(
+ response = responses_client.responses.create(
model=text_model_id,
input=case.input,
stream=True,
@@ -170,10 +170,10 @@ def test_response_streaming_incremental_content(compat_client, text_model_id, ca
@pytest.mark.parametrize("case", multi_turn_test_cases)
-def test_response_non_streaming_multi_turn(compat_client, text_model_id, case):
+def test_response_non_streaming_multi_turn(responses_client, text_model_id, case):
previous_response_id = None
for turn_input, turn_expected in case.turns:
- response = compat_client.responses.create(
+ response = responses_client.responses.create(
model=text_model_id,
input=turn_input,
previous_response_id=previous_response_id,
@@ -184,8 +184,8 @@ def test_response_non_streaming_multi_turn(compat_client, text_model_id, case):
@pytest.mark.parametrize("case", image_test_cases)
-def test_response_non_streaming_image(compat_client, text_model_id, case):
- response = compat_client.responses.create(
+def test_response_non_streaming_image(responses_client, text_model_id, case):
+ response = responses_client.responses.create(
model=text_model_id,
input=case.input,
stream=False,
@@ -195,10 +195,10 @@ def test_response_non_streaming_image(compat_client, text_model_id, case):
@pytest.mark.parametrize("case", multi_turn_image_test_cases)
-def test_response_non_streaming_multi_turn_image(compat_client, text_model_id, case):
+def test_response_non_streaming_multi_turn_image(responses_client, text_model_id, case):
previous_response_id = None
for turn_input, turn_expected in case.turns:
- response = compat_client.responses.create(
+ response = responses_client.responses.create(
model=text_model_id,
input=turn_input,
previous_response_id=previous_response_id,
diff --git a/tests/integration/responses/test_conversation_responses.py b/tests/integration/responses/test_conversation_responses.py
index ef7ea7c4e..ce249f6a0 100644
--- a/tests/integration/responses/test_conversation_responses.py
+++ b/tests/integration/responses/test_conversation_responses.py
@@ -65,8 +65,14 @@ class TestConversationResponses:
conversation_items = openai_client.conversations.items.list(conversation.id)
assert len(conversation_items.data) >= 4 # 2 user + 2 assistant messages
+ @pytest.mark.timeout(60, method="thread")
def test_conversation_context_loading(self, openai_client, text_model_id):
- """Test that conversation context is properly loaded for responses."""
+ """Test that conversation context is properly loaded for responses.
+
+ Note: 60s timeout added due to CI-specific deadlock in pytest/OpenAI client/httpx
+ after running 25+ tests. Hangs before first HTTP request is made. Works fine locally.
+ Investigation needed: connection pool exhaustion or event loop state issue.
+ """
conversation = openai_client.conversations.create(
items=[
{"type": "message", "role": "user", "content": "My name is Alice. I like to eat apples."},
@@ -82,6 +88,7 @@ class TestConversationResponses:
assert "apple" in response.output_text.lower()
+ @pytest.mark.timeout(60, method="thread")
def test_conversation_error_handling(self, openai_client, text_model_id):
"""Test error handling for invalid and nonexistent conversations."""
# Invalid conversation ID format
@@ -125,18 +132,18 @@ class TestConversationResponses:
assert len(response.output_text.strip()) > 0
# this is not ready yet
- # def test_conversation_compat_client(self, compat_client, text_model_id):
+ # def test_conversation_compat_client(self, responses_client, text_model_id):
# """Test conversation parameter works with compatibility client."""
- # if not hasattr(compat_client, "conversations"):
- # pytest.skip("compat_client does not support conversations API")
+ # if not hasattr(responses_client, "conversations"):
+ # pytest.skip("responses_client does not support conversations API")
#
- # conversation = compat_client.conversations.create()
- # response = compat_client.responses.create(
+ # conversation = responses_client.conversations.create()
+ # response = responses_client.responses.create(
# model=text_model_id, input="Tell me a joke", conversation=conversation.id
# )
#
# assert response is not None
# assert len(response.output_text.strip()) > 0
#
- # conversation_items = compat_client.conversations.items.list(conversation.id)
+ # conversation_items = responses_client.conversations.items.list(conversation.id)
# assert len(conversation_items.data) >= 2
diff --git a/tests/integration/responses/test_file_search.py b/tests/integration/responses/test_file_search.py
index dde5fd7f6..b2a634fb0 100644
--- a/tests/integration/responses/test_file_search.py
+++ b/tests/integration/responses/test_file_search.py
@@ -9,8 +9,6 @@ import time
import pytest
-from llama_stack.core.library_client import LlamaStackAsLibraryClient
-
from .helpers import new_vector_store, upload_file
@@ -28,12 +26,9 @@ from .helpers import new_vector_store, upload_file
},
],
)
-def test_response_text_format(compat_client, text_model_id, text_format):
- if isinstance(compat_client, LlamaStackAsLibraryClient):
- pytest.skip("Responses API text format is not yet supported in library client.")
-
+def test_response_text_format(responses_client, text_model_id, text_format):
stream = False
- response = compat_client.responses.create(
+ response = responses_client.responses.create(
model=text_model_id,
input="What is the capital of France?",
stream=stream,
@@ -47,13 +42,10 @@ def test_response_text_format(compat_client, text_model_id, text_format):
@pytest.fixture
-def vector_store_with_filtered_files(compat_client, embedding_model_id, embedding_dimension, tmp_path_factory):
+def vector_store_with_filtered_files(responses_client, embedding_model_id, embedding_dimension, tmp_path_factory):
# """Create a vector store with multiple files that have different attributes for filtering tests."""
- if isinstance(compat_client, LlamaStackAsLibraryClient):
- pytest.skip("upload_file() is not yet supported in library client somehow?")
-
vector_store = new_vector_store(
- compat_client, "test_vector_store_with_filters", embedding_model_id, embedding_dimension
+ responses_client, "test_vector_store_with_filters", embedding_model_id, embedding_dimension
)
tmp_path = tmp_path_factory.mktemp("filter_test_files")
@@ -104,11 +96,11 @@ def vector_store_with_filtered_files(compat_client, embedding_model_id, embeddin
file_path.write_text(file_data["content"])
# Upload file
- file_response = upload_file(compat_client, file_data["name"], str(file_path))
+ file_response = upload_file(responses_client, file_data["name"], str(file_path))
file_ids.append(file_response.id)
# Attach file to vector store with attributes
- file_attach_response = compat_client.vector_stores.files.create(
+ file_attach_response = responses_client.vector_stores.files.create(
vector_store_id=vector_store.id,
file_id=file_response.id,
attributes=file_data["attributes"],
@@ -117,7 +109,7 @@ def vector_store_with_filtered_files(compat_client, embedding_model_id, embeddin
# Wait for attachment
while file_attach_response.status == "in_progress":
time.sleep(0.1)
- file_attach_response = compat_client.vector_stores.files.retrieve(
+ file_attach_response = responses_client.vector_stores.files.retrieve(
vector_store_id=vector_store.id,
file_id=file_response.id,
)
@@ -127,17 +119,17 @@ def vector_store_with_filtered_files(compat_client, embedding_model_id, embeddin
# Cleanup: delete vector store and files
try:
- compat_client.vector_stores.delete(vector_store_id=vector_store.id)
+ responses_client.vector_stores.delete(vector_store_id=vector_store.id)
for file_id in file_ids:
try:
- compat_client.files.delete(file_id=file_id)
+ responses_client.files.delete(file_id=file_id)
except Exception:
pass # File might already be deleted
except Exception:
pass # Best effort cleanup
-def test_response_file_search_filter_by_region(compat_client, text_model_id, vector_store_with_filtered_files):
+def test_response_file_search_filter_by_region(responses_client, text_model_id, vector_store_with_filtered_files):
"""Test file search with region equality filter."""
tools = [
{
@@ -147,7 +139,7 @@ def test_response_file_search_filter_by_region(compat_client, text_model_id, vec
}
]
- response = compat_client.responses.create(
+ response = responses_client.responses.create(
model=text_model_id,
input="What are the updates from the US region?",
tools=tools,
@@ -168,7 +160,7 @@ def test_response_file_search_filter_by_region(compat_client, text_model_id, vec
assert "asia" not in result.text.lower()
-def test_response_file_search_filter_by_category(compat_client, text_model_id, vector_store_with_filtered_files):
+def test_response_file_search_filter_by_category(responses_client, text_model_id, vector_store_with_filtered_files):
"""Test file search with category equality filter."""
tools = [
{
@@ -178,7 +170,7 @@ def test_response_file_search_filter_by_category(compat_client, text_model_id, v
}
]
- response = compat_client.responses.create(
+ response = responses_client.responses.create(
model=text_model_id,
input="Show me all marketing reports",
tools=tools,
@@ -198,7 +190,7 @@ def test_response_file_search_filter_by_category(compat_client, text_model_id, v
assert "revenue figures" not in result.text.lower()
-def test_response_file_search_filter_by_date_range(compat_client, text_model_id, vector_store_with_filtered_files):
+def test_response_file_search_filter_by_date_range(responses_client, text_model_id, vector_store_with_filtered_files):
"""Test file search with date range filter using compound AND."""
tools = [
{
@@ -222,7 +214,7 @@ def test_response_file_search_filter_by_date_range(compat_client, text_model_id,
}
]
- response = compat_client.responses.create(
+ response = responses_client.responses.create(
model=text_model_id,
input="What happened in Q1 2023?",
tools=tools,
@@ -241,7 +233,7 @@ def test_response_file_search_filter_by_date_range(compat_client, text_model_id,
assert "q3" not in result.text.lower()
-def test_response_file_search_filter_compound_and(compat_client, text_model_id, vector_store_with_filtered_files):
+def test_response_file_search_filter_compound_and(responses_client, text_model_id, vector_store_with_filtered_files):
"""Test file search with compound AND filter (region AND category)."""
tools = [
{
@@ -257,7 +249,7 @@ def test_response_file_search_filter_compound_and(compat_client, text_model_id,
}
]
- response = compat_client.responses.create(
+ response = responses_client.responses.create(
model=text_model_id,
input="What are the engineering updates from the US?",
tools=tools,
@@ -277,7 +269,7 @@ def test_response_file_search_filter_compound_and(compat_client, text_model_id,
assert "promotional" not in result.text.lower() and "revenue" not in result.text.lower()
-def test_response_file_search_filter_compound_or(compat_client, text_model_id, vector_store_with_filtered_files):
+def test_response_file_search_filter_compound_or(responses_client, text_model_id, vector_store_with_filtered_files):
"""Test file search with compound OR filter (marketing OR sales)."""
tools = [
{
@@ -293,7 +285,7 @@ def test_response_file_search_filter_compound_or(compat_client, text_model_id, v
}
]
- response = compat_client.responses.create(
+ response = responses_client.responses.create(
model=text_model_id,
input="Show me marketing and sales documents",
tools=tools,
@@ -320,7 +312,7 @@ def test_response_file_search_filter_compound_or(compat_client, text_model_id, v
assert categories_found.issubset({"marketing", "sales"}), f"Found unexpected categories: {categories_found}"
-def test_response_file_search_streaming_events(compat_client, text_model_id, vector_store_with_filtered_files):
+def test_response_file_search_streaming_events(responses_client, text_model_id, vector_store_with_filtered_files):
"""Test that file search emits proper streaming events (in_progress, searching, completed)."""
tools = [
{
@@ -329,7 +321,7 @@ def test_response_file_search_streaming_events(compat_client, text_model_id, vec
}
]
- stream = compat_client.responses.create(
+ stream = responses_client.responses.create(
model=text_model_id,
input="What are the marketing updates?",
tools=tools,
diff --git a/tests/integration/responses/test_mcp_authentication.py b/tests/integration/responses/test_mcp_authentication.py
new file mode 100644
index 000000000..5c990ff6a
--- /dev/null
+++ b/tests/integration/responses/test_mcp_authentication.py
@@ -0,0 +1,105 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+
+import pytest
+
+from tests.common.mcp import make_mcp_server
+
+from .helpers import setup_mcp_tools
+
+# MCP authentication tests with recordings
+# Tests for bearer token authorization support in MCP tool configurations
+
+
+def test_mcp_authorization_bearer(responses_client, text_model_id):
+ """Test that bearer authorization is correctly applied to MCP requests."""
+ test_token = "test-bearer-token-789"
+ with make_mcp_server(required_auth_token=test_token) as mcp_server_info:
+ tools = setup_mcp_tools(
+ [
+ {
+ "type": "mcp",
+ "server_label": "auth-mcp",
+ "server_url": "",
+ "authorization": test_token, # Just the token, not "Bearer "
+ }
+ ],
+ mcp_server_info,
+ )
+
+ # Create response - authorization should be applied
+ response = responses_client.responses.create(
+ model=text_model_id,
+ input="What is the boiling point of myawesomeliquid?",
+ tools=tools,
+ stream=False,
+ )
+
+ # Verify list_tools succeeded (requires auth)
+ assert len(response.output) >= 3
+ assert response.output[0].type == "mcp_list_tools"
+ assert len(response.output[0].tools) == 2
+
+ # Verify tool invocation succeeded (requires auth)
+ assert response.output[1].type == "mcp_call"
+ assert response.output[1].error is None
+
+
+def test_mcp_authorization_error_when_header_provided(responses_client, text_model_id):
+ """Test that providing Authorization in headers raises a security error."""
+ test_token = "test-token-123"
+ with make_mcp_server(required_auth_token=test_token) as mcp_server_info:
+ tools = setup_mcp_tools(
+ [
+ {
+ "type": "mcp",
+ "server_label": "header-auth-mcp",
+ "server_url": "",
+ "headers": {"Authorization": f"Bearer {test_token}"}, # Security risk - should be rejected
+ }
+ ],
+ mcp_server_info,
+ )
+
+ # Create response - should raise BadRequestError for security reasons
+ with pytest.raises((ValueError, Exception), match="Authorization header cannot be passed via 'headers'"):
+ responses_client.responses.create(
+ model=text_model_id,
+ input="What is the boiling point of myawesomeliquid?",
+ tools=tools,
+ stream=False,
+ )
+
+
+def test_mcp_authorization_backward_compatibility(responses_client, text_model_id):
+ """Test that MCP tools work without authorization (backward compatibility)."""
+ # No authorization required
+ with make_mcp_server(required_auth_token=None) as mcp_server_info:
+ tools = setup_mcp_tools(
+ [
+ {
+ "type": "mcp",
+ "server_label": "noauth-mcp",
+ "server_url": "",
+ }
+ ],
+ mcp_server_info,
+ )
+
+ # Create response without authorization
+ response = responses_client.responses.create(
+ model=text_model_id,
+ input="What is the boiling point of myawesomeliquid?",
+ tools=tools,
+ stream=False,
+ )
+
+ # Verify operations succeeded without auth
+ assert len(response.output) >= 3
+ assert response.output[0].type == "mcp_list_tools"
+ assert response.output[1].type == "mcp_call"
+ assert response.output[1].error is None
diff --git a/tests/integration/responses/test_tool_responses.py b/tests/integration/responses/test_tool_responses.py
index 9bf58c6ff..49bcd050b 100644
--- a/tests/integration/responses/test_tool_responses.py
+++ b/tests/integration/responses/test_tool_responses.py
@@ -9,6 +9,7 @@ import logging # allow-direct-logging
import os
import httpx
+import llama_stack_client
import openai
import pytest
@@ -29,8 +30,8 @@ from .streaming_assertions import StreamingValidator
@pytest.mark.parametrize("case", web_search_test_cases)
-def test_response_non_streaming_web_search(compat_client, text_model_id, case):
- response = compat_client.responses.create(
+def test_response_non_streaming_web_search(responses_client, text_model_id, case):
+ response = responses_client.responses.create(
model=text_model_id,
input=case.input,
tools=case.tools,
@@ -48,12 +49,9 @@ def test_response_non_streaming_web_search(compat_client, text_model_id, case):
@pytest.mark.parametrize("case", file_search_test_cases)
def test_response_non_streaming_file_search(
- compat_client, text_model_id, embedding_model_id, embedding_dimension, tmp_path, case
+ responses_client, text_model_id, embedding_model_id, embedding_dimension, tmp_path, case
):
- if isinstance(compat_client, LlamaStackAsLibraryClient):
- pytest.skip("Responses API file search is not yet supported in library client.")
-
- vector_store = new_vector_store(compat_client, "test_vector_store", embedding_model_id, embedding_dimension)
+ vector_store = new_vector_store(responses_client, "test_vector_store", embedding_model_id, embedding_dimension)
if case.file_content:
file_name = "test_response_non_streaming_file_search.txt"
@@ -65,16 +63,16 @@ def test_response_non_streaming_file_search(
else:
raise ValueError("No file content or path provided for case")
- file_response = upload_file(compat_client, file_name, file_path)
+ file_response = upload_file(responses_client, file_name, file_path)
# Attach our file to the vector store
- compat_client.vector_stores.files.create(
+ responses_client.vector_stores.files.create(
vector_store_id=vector_store.id,
file_id=file_response.id,
)
# Wait for the file to be attached
- wait_for_file_attachment(compat_client, vector_store.id, file_response.id)
+ wait_for_file_attachment(responses_client, vector_store.id, file_response.id)
# Update our tools with the right vector store id
tools = case.tools
@@ -83,7 +81,7 @@ def test_response_non_streaming_file_search(
tool["vector_store_ids"] = [vector_store.id]
# Create the response request, which should query our vector store
- response = compat_client.responses.create(
+ response = responses_client.responses.create(
model=text_model_id,
input=case.input,
tools=tools,
@@ -105,15 +103,12 @@ def test_response_non_streaming_file_search(
def test_response_non_streaming_file_search_empty_vector_store(
- compat_client, text_model_id, embedding_model_id, embedding_dimension
+ responses_client, text_model_id, embedding_model_id, embedding_dimension
):
- if isinstance(compat_client, LlamaStackAsLibraryClient):
- pytest.skip("Responses API file search is not yet supported in library client.")
-
- vector_store = new_vector_store(compat_client, "test_vector_store", embedding_model_id, embedding_dimension)
+ vector_store = new_vector_store(responses_client, "test_vector_store", embedding_model_id, embedding_dimension)
# Create the response request, which should query our vector store
- response = compat_client.responses.create(
+ response = responses_client.responses.create(
model=text_model_id,
input="How many experts does the Llama 4 Maverick model have?",
tools=[{"type": "file_search", "vector_store_ids": [vector_store.id]}],
@@ -133,13 +128,10 @@ def test_response_non_streaming_file_search_empty_vector_store(
def test_response_sequential_file_search(
- compat_client, text_model_id, embedding_model_id, embedding_dimension, tmp_path
+ responses_client, text_model_id, embedding_model_id, embedding_dimension, tmp_path
):
"""Test file search with sequential responses using previous_response_id."""
- if isinstance(compat_client, LlamaStackAsLibraryClient):
- pytest.skip("Responses API file search is not yet supported in library client.")
-
- vector_store = new_vector_store(compat_client, "test_vector_store", embedding_model_id, embedding_dimension)
+ vector_store = new_vector_store(responses_client, "test_vector_store", embedding_model_id, embedding_dimension)
# Create a test file with content
file_content = "The Llama 4 Maverick model has 128 experts in its mixture of experts architecture."
@@ -147,21 +139,21 @@ def test_response_sequential_file_search(
file_path = tmp_path / file_name
file_path.write_text(file_content)
- file_response = upload_file(compat_client, file_name, file_path)
+ file_response = upload_file(responses_client, file_name, file_path)
# Attach the file to the vector store
- compat_client.vector_stores.files.create(
+ responses_client.vector_stores.files.create(
vector_store_id=vector_store.id,
file_id=file_response.id,
)
# Wait for the file to be attached
- wait_for_file_attachment(compat_client, vector_store.id, file_response.id)
+ wait_for_file_attachment(responses_client, vector_store.id, file_response.id)
tools = [{"type": "file_search", "vector_store_ids": [vector_store.id]}]
# First response request with file search
- response = compat_client.responses.create(
+ response = responses_client.responses.create(
model=text_model_id,
input="How many experts does the Llama 4 Maverick model have?",
tools=tools,
@@ -178,7 +170,7 @@ def test_response_sequential_file_search(
assert "128" in response.output_text or "experts" in response.output_text.lower()
# Second response request using previous_response_id
- response2 = compat_client.responses.create(
+ response2 = responses_client.responses.create(
model=text_model_id,
input="Can you tell me more about the architecture?",
tools=tools,
@@ -199,14 +191,11 @@ def test_response_sequential_file_search(
@pytest.mark.parametrize("case", mcp_tool_test_cases)
-def test_response_non_streaming_mcp_tool(compat_client, text_model_id, case, caplog):
- if not isinstance(compat_client, LlamaStackAsLibraryClient):
- pytest.skip("in-process MCP server is only supported in library client")
-
+def test_response_non_streaming_mcp_tool(responses_client, text_model_id, case, caplog):
with make_mcp_server() as mcp_server_info:
tools = setup_mcp_tools(case.tools, mcp_server_info)
- response = compat_client.responses.create(
+ response = responses_client.responses.create(
model=text_model_id,
input=case.input,
tools=tools,
@@ -243,15 +232,15 @@ def test_response_non_streaming_mcp_tool(compat_client, text_model_id, case, cap
exc_type = (
AuthenticationRequiredError
- if isinstance(compat_client, LlamaStackAsLibraryClient)
- else (httpx.HTTPStatusError, openai.AuthenticationError)
+ if isinstance(responses_client, LlamaStackAsLibraryClient)
+ else (httpx.HTTPStatusError, openai.AuthenticationError, llama_stack_client.AuthenticationError)
)
# Suppress expected auth error logs only for the failing auth attempt
with caplog.at_level(
logging.CRITICAL, logger="llama_stack.providers.inline.agents.meta_reference.responses.streaming"
):
with pytest.raises(exc_type):
- compat_client.responses.create(
+ responses_client.responses.create(
model=text_model_id,
input=case.input,
tools=tools,
@@ -260,9 +249,9 @@ def test_response_non_streaming_mcp_tool(compat_client, text_model_id, case, cap
for tool in tools:
if tool["type"] == "mcp":
- tool["headers"] = {"Authorization": "Bearer test-token"}
+ tool["authorization"] = "test-token"
- response = compat_client.responses.create(
+ response = responses_client.responses.create(
model=text_model_id,
input=case.input,
tools=tools,
@@ -272,14 +261,11 @@ def test_response_non_streaming_mcp_tool(compat_client, text_model_id, case, cap
@pytest.mark.parametrize("case", mcp_tool_test_cases)
-def test_response_sequential_mcp_tool(compat_client, text_model_id, case):
- if not isinstance(compat_client, LlamaStackAsLibraryClient):
- pytest.skip("in-process MCP server is only supported in library client")
-
+def test_response_sequential_mcp_tool(responses_client, text_model_id, case):
with make_mcp_server() as mcp_server_info:
tools = setup_mcp_tools(case.tools, mcp_server_info)
- response = compat_client.responses.create(
+ response = responses_client.responses.create(
model=text_model_id,
input=case.input,
tools=tools,
@@ -311,7 +297,7 @@ def test_response_sequential_mcp_tool(compat_client, text_model_id, case):
text_content = message.content[0].text
assert "boiling point" in text_content.lower()
- response2 = compat_client.responses.create(
+ response2 = responses_client.responses.create(
model=text_model_id, input=case.input, tools=tools, stream=False, previous_response_id=response.id
)
@@ -323,16 +309,13 @@ def test_response_sequential_mcp_tool(compat_client, text_model_id, case):
@pytest.mark.parametrize("case", mcp_tool_test_cases)
@pytest.mark.parametrize("approve", [True, False])
-def test_response_mcp_tool_approval(compat_client, text_model_id, case, approve):
- if not isinstance(compat_client, LlamaStackAsLibraryClient):
- pytest.skip("in-process MCP server is only supported in library client")
-
+def test_response_mcp_tool_approval(responses_client, text_model_id, case, approve):
with make_mcp_server() as mcp_server_info:
tools = setup_mcp_tools(case.tools, mcp_server_info)
for tool in tools:
tool["require_approval"] = "always"
- response = compat_client.responses.create(
+ response = responses_client.responses.create(
model=text_model_id,
input=case.input,
tools=tools,
@@ -352,13 +335,13 @@ def test_response_mcp_tool_approval(compat_client, text_model_id, case, approve)
approval_request = response.output[1]
assert approval_request.type == "mcp_approval_request"
assert approval_request.name == "get_boiling_point"
- assert json.loads(approval_request.arguments) == {
- "liquid_name": "myawesomeliquid",
- "celsius": True,
- }
+ args = json.loads(approval_request.arguments)
+ assert args["liquid_name"] == "myawesomeliquid"
+ # celsius has a default value of True, so it may be omitted or explicitly set
+ assert args.get("celsius", True) is True
# send approval response
- response = compat_client.responses.create(
+ response = responses_client.responses.create(
previous_response_id=response.id,
model=text_model_id,
input=[{"type": "mcp_approval_response", "approval_request_id": approval_request.id, "approve": approve}],
@@ -398,8 +381,8 @@ def test_response_mcp_tool_approval(compat_client, text_model_id, case, approve)
@pytest.mark.parametrize("case", custom_tool_test_cases)
-def test_response_non_streaming_custom_tool(compat_client, text_model_id, case):
- response = compat_client.responses.create(
+def test_response_non_streaming_custom_tool(responses_client, text_model_id, case):
+ response = responses_client.responses.create(
model=text_model_id,
input=case.input,
tools=case.tools,
@@ -412,8 +395,8 @@ def test_response_non_streaming_custom_tool(compat_client, text_model_id, case):
@pytest.mark.parametrize("case", custom_tool_test_cases)
-def test_response_function_call_ordering_1(compat_client, text_model_id, case):
- response = compat_client.responses.create(
+def test_response_function_call_ordering_1(responses_client, text_model_id, case):
+ response = responses_client.responses.create(
model=text_model_id,
input=case.input,
tools=case.tools,
@@ -437,13 +420,13 @@ def test_response_function_call_ordering_1(compat_client, text_model_id, case):
"call_id": response.output[0].call_id,
}
)
- response = compat_client.responses.create(
+ response = responses_client.responses.create(
model=text_model_id, input=inputs, tools=case.tools, stream=False, previous_response_id=response.id
)
assert len(response.output) == 1
-def test_response_function_call_ordering_2(compat_client, text_model_id):
+def test_response_function_call_ordering_2(responses_client, text_model_id):
tools = [
{
"type": "function",
@@ -468,7 +451,7 @@ def test_response_function_call_ordering_2(compat_client, text_model_id):
"content": "Is the weather better in San Francisco or Los Angeles?",
}
]
- response = compat_client.responses.create(
+ response = responses_client.responses.create(
model=text_model_id,
input=inputs,
tools=tools,
@@ -489,7 +472,7 @@ def test_response_function_call_ordering_2(compat_client, text_model_id):
"call_id": output.call_id,
}
)
- response = compat_client.responses.create(
+ response = responses_client.responses.create(
model=text_model_id,
input=inputs,
tools=tools,
@@ -500,15 +483,12 @@ def test_response_function_call_ordering_2(compat_client, text_model_id):
@pytest.mark.parametrize("case", multi_turn_tool_execution_test_cases)
-def test_response_non_streaming_multi_turn_tool_execution(compat_client, text_model_id, case):
+def test_response_non_streaming_multi_turn_tool_execution(responses_client, text_model_id, case):
"""Test multi-turn tool execution where multiple MCP tool calls are performed in sequence."""
- if not isinstance(compat_client, LlamaStackAsLibraryClient):
- pytest.skip("in-process MCP server is only supported in library client")
-
with make_mcp_server(tools=dependency_tools()) as mcp_server_info:
tools = setup_mcp_tools(case.tools, mcp_server_info)
- response = compat_client.responses.create(
+ response = responses_client.responses.create(
input=case.input,
model=text_model_id,
tools=tools,
@@ -550,15 +530,12 @@ def test_response_non_streaming_multi_turn_tool_execution(compat_client, text_mo
@pytest.mark.parametrize("case", multi_turn_tool_execution_streaming_test_cases)
-def test_response_streaming_multi_turn_tool_execution(compat_client, text_model_id, case):
+def test_response_streaming_multi_turn_tool_execution(responses_client, text_model_id, case):
"""Test streaming multi-turn tool execution where multiple MCP tool calls are performed in sequence."""
- if not isinstance(compat_client, LlamaStackAsLibraryClient):
- pytest.skip("in-process MCP server is only supported in library client")
-
with make_mcp_server(tools=dependency_tools()) as mcp_server_info:
tools = setup_mcp_tools(case.tools, mcp_server_info)
- stream = compat_client.responses.create(
+ stream = responses_client.responses.create(
input=case.input,
model=text_model_id,
tools=tools,
@@ -623,3 +600,155 @@ def test_response_streaming_multi_turn_tool_execution(compat_client, text_model_
assert expected_output.lower() in final_response.output_text.lower(), (
f"Expected '{expected_output}' to appear in response: {final_response.output_text}"
)
+
+
+def test_max_tool_calls_with_function_tools(responses_client, text_model_id):
+ """Test handling of max_tool_calls with function tools in responses."""
+
+ max_tool_calls = 1
+ tools = [
+ {
+ "type": "function",
+ "name": "get_weather",
+ "description": "Get weather information for a specified location",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "location": {
+ "type": "string",
+ "description": "The city name (e.g., 'New York', 'London')",
+ },
+ },
+ },
+ },
+ {
+ "type": "function",
+ "name": "get_time",
+ "description": "Get current time for a specified location",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "location": {
+ "type": "string",
+ "description": "The city name (e.g., 'New York', 'London')",
+ },
+ },
+ },
+ },
+ ]
+
+ response = responses_client.responses.create(
+ model=text_model_id,
+ input="Can you tell me the weather in Paris and the current time?",
+ tools=tools,
+ stream=False,
+ max_tool_calls=max_tool_calls,
+ )
+
+ # Verify we got two function calls and that the max_tool_calls does not affect function tools
+ assert len(response.output) == 2
+ assert response.output[0].type == "function_call"
+ assert response.output[0].name == "get_weather"
+ assert response.output[0].status == "completed"
+ assert response.output[1].type == "function_call"
+ assert response.output[1].name == "get_time"
+ assert response.output[1].status == "completed"
+
+ # Verify we have a valid max_tool_calls field
+ assert response.max_tool_calls == max_tool_calls
+
+
+def test_max_tool_calls_invalid(responses_client, text_model_id):
+ """Test handling of invalid max_tool_calls in responses."""
+
+ input = "Search for today's top technology news."
+ invalid_max_tool_calls = 0
+ tools = [
+ {"type": "web_search"},
+ ]
+
+ # Create a response with an invalid max_tool_calls value i.e. 0
+ # Handle ValueError from LLS and BadRequestError from OpenAI client
+ with pytest.raises((ValueError, llama_stack_client.BadRequestError, openai.BadRequestError)) as excinfo:
+ responses_client.responses.create(
+ model=text_model_id,
+ input=input,
+ tools=tools,
+ stream=False,
+ max_tool_calls=invalid_max_tool_calls,
+ )
+
+ error_message = str(excinfo.value)
+ assert f"Invalid max_tool_calls={invalid_max_tool_calls}; should be >= 1" in error_message, (
+ f"Expected error message about invalid max_tool_calls, got: {error_message}"
+ )
+
+
+def test_max_tool_calls_with_mcp_tools(responses_client, text_model_id):
+ """Test handling of max_tool_calls with mcp tools in responses."""
+
+ with make_mcp_server(tools=dependency_tools()) as mcp_server_info:
+ input = "Get the experiment ID for 'boiling_point' and get the user ID for 'charlie'"
+ max_tool_calls = [1, 5]
+ tools = [
+ {"type": "mcp", "server_label": "localmcp", "server_url": mcp_server_info["server_url"]},
+ ]
+
+ # First create a response that triggers mcp tools without max_tool_calls
+ response = responses_client.responses.create(
+ model=text_model_id,
+ input=input,
+ tools=tools,
+ stream=False,
+ )
+
+ # Verify we got two mcp tool calls followed by a message
+ assert len(response.output) == 4
+ mcp_list_tools = [output for output in response.output if output.type == "mcp_list_tools"]
+ mcp_calls = [output for output in response.output if output.type == "mcp_call"]
+ message_outputs = [output for output in response.output if output.type == "message"]
+ assert len(mcp_list_tools) == 1
+ assert len(mcp_calls) == 2, f"Expected two mcp calls, got {len(mcp_calls)}"
+ assert len(message_outputs) == 1, f"Expected one message output, got {len(message_outputs)}"
+
+ # Next create a response that triggers mcp tools with max_tool_calls set to 1
+ response_2 = responses_client.responses.create(
+ model=text_model_id,
+ input=input,
+ tools=tools,
+ stream=False,
+ max_tool_calls=max_tool_calls[0],
+ )
+
+ # Verify we got one mcp tool call followed by a message
+ assert len(response_2.output) == 3
+ mcp_list_tools = [output for output in response_2.output if output.type == "mcp_list_tools"]
+ mcp_calls = [output for output in response_2.output if output.type == "mcp_call"]
+ message_outputs = [output for output in response_2.output if output.type == "message"]
+ assert len(mcp_list_tools) == 1
+ assert len(mcp_calls) == 1, f"Expected one mcp call, got {len(mcp_calls)}"
+ assert len(message_outputs) == 1, f"Expected one message output, got {len(message_outputs)}"
+
+ # Verify we have a valid max_tool_calls field
+ assert response_2.max_tool_calls == max_tool_calls[0]
+
+ # Finally create a response that triggers mcp tools with max_tool_calls set to 5
+ response_3 = responses_client.responses.create(
+ model=text_model_id,
+ input=input,
+ tools=tools,
+ stream=False,
+ max_tool_calls=max_tool_calls[1],
+ )
+
+ # Verify we got two mcp tool calls followed by a message
+ assert len(response_3.output) == 4
+ mcp_list_tools = [output for output in response_3.output if output.type == "mcp_list_tools"]
+ mcp_calls = [output for output in response_3.output if output.type == "mcp_call"]
+ message_outputs = [output for output in response_3.output if output.type == "message"]
+ assert len(mcp_list_tools) == 1
+ assert len(mcp_calls) == 2, f"Expected two mcp calls, got {len(mcp_calls)}"
+ assert len(message_outputs) == 1, f"Expected one message output, got {len(message_outputs)}"
+
+ # Verify we have a valid max_tool_calls field
+ assert response_3.max_tool_calls == max_tool_calls[1]
diff --git a/tests/integration/safety/test_llama_guard.py b/tests/integration/safety/test_llama_guard.py
index 5a73bb044..a554752cd 100644
--- a/tests/integration/safety/test_llama_guard.py
+++ b/tests/integration/safety/test_llama_guard.py
@@ -13,8 +13,8 @@ from collections.abc import Generator
import pytest
-from llama_stack.apis.safety import ViolationLevel
from llama_stack.models.llama.sku_types import CoreModelId
+from llama_stack_api import ViolationLevel
# Llama Guard models available for text and vision shields
LLAMA_GUARD_TEXT_MODELS = [CoreModelId.llama_guard_4_12b.value]
diff --git a/tests/integration/safety/test_safety.py b/tests/integration/safety/test_safety.py
index 6337abc9c..857ff2f81 100644
--- a/tests/integration/safety/test_safety.py
+++ b/tests/integration/safety/test_safety.py
@@ -8,7 +8,7 @@ import mimetypes
import pytest
-from llama_stack.apis.safety import ViolationLevel
+from llama_stack_api import ViolationLevel
CODE_SCANNER_ENABLED_PROVIDERS = {"ollama", "together", "fireworks"}
diff --git a/tests/integration/safety/test_vision_safety.py b/tests/integration/safety/test_vision_safety.py
index 7b3779e9e..dc7b7e1ad 100644
--- a/tests/integration/safety/test_vision_safety.py
+++ b/tests/integration/safety/test_vision_safety.py
@@ -10,7 +10,7 @@ import os
import pytest
-from llama_stack.apis.safety import ViolationLevel
+from llama_stack_api import ViolationLevel
VISION_SHIELD_ENABLED_PROVIDERS = {"together"}
diff --git a/tests/integration/suites.py b/tests/integration/suites.py
index 0cec66afe..10c872705 100644
--- a/tests/integration/suites.py
+++ b/tests/integration/suites.py
@@ -50,7 +50,7 @@ SETUP_DEFINITIONS: dict[str, Setup] = {
name="ollama",
description="Local Ollama provider with text + safety models",
env={
- "OLLAMA_URL": "http://0.0.0.0:11434",
+ "OLLAMA_URL": "http://0.0.0.0:11434/v1",
"SAFETY_MODEL": "ollama/llama-guard3:1b",
},
defaults={
@@ -64,13 +64,33 @@ SETUP_DEFINITIONS: dict[str, Setup] = {
name="ollama",
description="Local Ollama provider with a vision model",
env={
- "OLLAMA_URL": "http://0.0.0.0:11434",
+ "OLLAMA_URL": "http://0.0.0.0:11434/v1",
},
defaults={
"vision_model": "ollama/llama3.2-vision:11b",
"embedding_model": "ollama/nomic-embed-text:v1.5",
},
),
+ "ollama-postgres": Setup(
+ name="ollama-postgres",
+ description="Server-mode tests with Postgres-backed persistence",
+ env={
+ "OLLAMA_URL": "http://0.0.0.0:11434/v1",
+ "SAFETY_MODEL": "ollama/llama-guard3:1b",
+ "POSTGRES_HOST": "127.0.0.1",
+ "POSTGRES_PORT": "5432",
+ "POSTGRES_DB": "llamastack",
+ "POSTGRES_USER": "llamastack",
+ "POSTGRES_PASSWORD": "llamastack",
+ "LLAMA_STACK_LOGGING": "openai_responses=info",
+ },
+ defaults={
+ "text_model": "ollama/llama3.2:3b-instruct-fp16",
+ "embedding_model": "sentence-transformers/nomic-embed-text-v1.5",
+ "safety_model": "ollama/llama-guard3:1b",
+ "safety_shield": "llama-guard",
+ },
+ ),
"vllm": Setup(
name="vllm",
description="vLLM provider with a text model",
diff --git a/tests/integration/tool_runtime/test_mcp.py b/tests/integration/tool_runtime/test_mcp.py
index 3a8fde37f..074a92afb 100644
--- a/tests/integration/tool_runtime/test_mcp.py
+++ b/tests/integration/tool_runtime/test_mcp.py
@@ -4,14 +4,10 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
-import json
-
import pytest
from llama_stack_client.lib.agents.agent import Agent
from llama_stack_client.lib.agents.turn_events import StepCompleted, StepProgress, ToolCallIssuedDelta
-from llama_stack.core.library_client import LlamaStackAsLibraryClient
-
AUTH_TOKEN = "test-token"
from tests.common.mcp import MCP_TOOLGROUP_ID, make_mcp_server
@@ -24,9 +20,6 @@ def mcp_server():
def test_mcp_invocation(llama_stack_client, text_model_id, mcp_server):
- if not isinstance(llama_stack_client, LlamaStackAsLibraryClient):
- pytest.skip("The local MCP server only reliably reachable from library client.")
-
test_toolgroup_id = MCP_TOOLGROUP_ID
uri = mcp_server["server_url"]
@@ -42,31 +35,20 @@ def test_mcp_invocation(llama_stack_client, text_model_id, mcp_server):
mcp_endpoint=dict(uri=uri),
)
- provider_data = {
- "mcp_headers": {
- uri: {
- "Authorization": f"Bearer {AUTH_TOKEN}",
- },
- },
- }
- auth_headers = {
- "X-LlamaStack-Provider-Data": json.dumps(provider_data),
- }
-
- with pytest.raises(Exception, match="Unauthorized"):
- llama_stack_client.tools.list(toolgroup_id=test_toolgroup_id)
-
- tools_list = llama_stack_client.tools.list(
- toolgroup_id=test_toolgroup_id,
- extra_headers=auth_headers,
+ # Use the dedicated authorization parameter (no more provider_data headers)
+ # This tests direct tool_runtime.invoke_tool API calls
+ tools_list = llama_stack_client.tool_runtime.list_tools(
+ tool_group_id=test_toolgroup_id,
+ authorization=AUTH_TOKEN, # Use dedicated authorization parameter
)
assert len(tools_list) == 2
assert {t.name for t in tools_list} == {"greet_everyone", "get_boiling_point"}
+ # Invoke tool with authorization parameter
response = llama_stack_client.tool_runtime.invoke_tool(
tool_name="greet_everyone",
kwargs=dict(url="https://www.google.com"),
- extra_headers=auth_headers,
+ authorization=AUTH_TOKEN, # Use dedicated authorization parameter
)
content = response.content
assert len(content) == 1
@@ -81,9 +63,7 @@ def test_mcp_invocation(llama_stack_client, text_model_id, mcp_server):
"server_label": test_toolgroup_id,
"require_approval": "never",
"allowed_tools": [tool.name for tool in tools_list],
- "headers": {
- "Authorization": f"Bearer {AUTH_TOKEN}",
- },
+ "authorization": AUTH_TOKEN,
}
]
agent = Agent(
@@ -109,7 +89,6 @@ def test_mcp_invocation(llama_stack_client, text_model_id, mcp_server):
}
],
stream=True,
- extra_headers=auth_headers,
)
)
events = [chunk.event for chunk in chunks]
diff --git a/tests/integration/tool_runtime/test_mcp_json_schema.py b/tests/integration/tool_runtime/test_mcp_json_schema.py
index def0b27b8..6be71caaf 100644
--- a/tests/integration/tool_runtime/test_mcp_json_schema.py
+++ b/tests/integration/tool_runtime/test_mcp_json_schema.py
@@ -4,13 +4,10 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
-"""
-Integration tests for MCP tools with complex JSON Schema support.
+"""Integration tests for MCP tools with complex JSON Schema support.
Tests $ref, $defs, and other JSON Schema features through MCP integration.
"""
-import json
-
import pytest
from llama_stack.core.library_client import LlamaStackAsLibraryClient
@@ -123,15 +120,11 @@ class TestMCPSchemaPreservation:
mcp_endpoint=dict(uri=uri),
)
- provider_data = {"mcp_headers": {uri: {"Authorization": f"Bearer {AUTH_TOKEN}"}}}
- auth_headers = {
- "X-LlamaStack-Provider-Data": json.dumps(provider_data),
- }
-
+ # Use the dedicated authorization parameter
# List runtime tools
response = llama_stack_client.tool_runtime.list_tools(
tool_group_id=test_toolgroup_id,
- extra_headers=auth_headers,
+ authorization=AUTH_TOKEN,
)
tools = response
@@ -166,15 +159,12 @@ class TestMCPSchemaPreservation:
provider_id="model-context-protocol",
mcp_endpoint=dict(uri=uri),
)
- provider_data = {"mcp_headers": {uri: {"Authorization": f"Bearer {AUTH_TOKEN}"}}}
- auth_headers = {
- "X-LlamaStack-Provider-Data": json.dumps(provider_data),
- }
+ # Use the dedicated authorization parameter
# List tools
response = llama_stack_client.tool_runtime.list_tools(
tool_group_id=test_toolgroup_id,
- extra_headers=auth_headers,
+ authorization=AUTH_TOKEN,
)
# Find book_flight tool (which should have $ref/$defs)
@@ -216,14 +206,10 @@ class TestMCPSchemaPreservation:
mcp_endpoint=dict(uri=uri),
)
- provider_data = {"mcp_headers": {uri: {"Authorization": f"Bearer {AUTH_TOKEN}"}}}
- auth_headers = {
- "X-LlamaStack-Provider-Data": json.dumps(provider_data),
- }
-
+ # Use the dedicated authorization parameter
response = llama_stack_client.tool_runtime.list_tools(
tool_group_id=test_toolgroup_id,
- extra_headers=auth_headers,
+ authorization=AUTH_TOKEN,
)
# Find get_weather tool
@@ -263,15 +249,10 @@ class TestMCPToolInvocation:
mcp_endpoint=dict(uri=uri),
)
- provider_data = {"mcp_headers": {uri: {"Authorization": f"Bearer {AUTH_TOKEN}"}}}
- auth_headers = {
- "X-LlamaStack-Provider-Data": json.dumps(provider_data),
- }
-
- # List tools to populate the tool index
+ # Use the dedicated authorization parameter
llama_stack_client.tool_runtime.list_tools(
tool_group_id=test_toolgroup_id,
- extra_headers=auth_headers,
+ authorization=AUTH_TOKEN,
)
# Invoke tool with complex nested data
@@ -283,7 +264,7 @@ class TestMCPToolInvocation:
"shipping": {"address": {"street": "123 Main St", "city": "San Francisco", "zipcode": "94102"}},
}
},
- extra_headers=auth_headers,
+ authorization=AUTH_TOKEN,
)
# Should succeed without schema validation errors
@@ -309,22 +290,17 @@ class TestMCPToolInvocation:
mcp_endpoint=dict(uri=uri),
)
- provider_data = {"mcp_headers": {uri: {"Authorization": f"Bearer {AUTH_TOKEN}"}}}
- auth_headers = {
- "X-LlamaStack-Provider-Data": json.dumps(provider_data),
- }
-
- # List tools to populate the tool index
+ # Use the dedicated authorization parameter
llama_stack_client.tool_runtime.list_tools(
tool_group_id=test_toolgroup_id,
- extra_headers=auth_headers,
+ authorization=AUTH_TOKEN,
)
# Test with email format
result_email = llama_stack_client.tool_runtime.invoke_tool(
tool_name="flexible_contact",
kwargs={"contact_info": "user@example.com"},
- extra_headers=auth_headers,
+ authorization=AUTH_TOKEN,
)
assert result_email.error_message is None
@@ -333,7 +309,7 @@ class TestMCPToolInvocation:
result_phone = llama_stack_client.tool_runtime.invoke_tool(
tool_name="flexible_contact",
kwargs={"contact_info": "+15551234567"},
- extra_headers=auth_headers,
+ authorization=AUTH_TOKEN,
)
assert result_phone.error_message is None
@@ -365,14 +341,10 @@ class TestAgentWithMCPTools:
mcp_endpoint=dict(uri=uri),
)
- provider_data = {"mcp_headers": {uri: {"Authorization": f"Bearer {AUTH_TOKEN}"}}}
- auth_headers = {
- "X-LlamaStack-Provider-Data": json.dumps(provider_data),
- }
-
- tools_list = llama_stack_client.tools.list(
- toolgroup_id=test_toolgroup_id,
- extra_headers=auth_headers,
+ # Use the dedicated authorization parameter
+ tools_list = llama_stack_client.tool_runtime.list_tools(
+ tool_group_id=test_toolgroup_id,
+ authorization=AUTH_TOKEN,
)
tool_defs = [
{
@@ -381,6 +353,7 @@ class TestAgentWithMCPTools:
"server_label": test_toolgroup_id,
"require_approval": "never",
"allowed_tools": [tool.name for tool in tools_list],
+ "authorization": AUTH_TOKEN,
}
]
@@ -389,7 +362,6 @@ class TestAgentWithMCPTools:
model=text_model_id,
instructions="You are a helpful assistant that can process orders and book flights.",
tools=tool_defs,
- extra_headers=auth_headers,
)
session_id = agent.create_session("test-session-complex")
@@ -411,7 +383,6 @@ class TestAgentWithMCPTools:
}
],
stream=True,
- extra_headers=auth_headers,
)
)
diff --git a/tests/integration/tool_runtime/test_registration.py b/tests/integration/tool_runtime/test_registration.py
index 4d532ed87..036a5f018 100644
--- a/tests/integration/tool_runtime/test_registration.py
+++ b/tests/integration/tool_runtime/test_registration.py
@@ -8,8 +8,8 @@ import re
import pytest
-from llama_stack.apis.common.errors import ToolGroupNotFoundError
from llama_stack.core.library_client import LlamaStackAsLibraryClient
+from llama_stack_api import ToolGroupNotFoundError
from tests.common.mcp import MCP_TOOLGROUP_ID, make_mcp_server
diff --git a/tests/integration/vector_io/test_openai_vector_stores.py b/tests/integration/vector_io/test_openai_vector_stores.py
index 97ce4abe8..102f3f00c 100644
--- a/tests/integration/vector_io/test_openai_vector_stores.py
+++ b/tests/integration/vector_io/test_openai_vector_stores.py
@@ -11,9 +11,9 @@ import pytest
from llama_stack_client import BadRequestError
from openai import BadRequestError as OpenAIBadRequestError
-from llama_stack.apis.vector_io import Chunk
from llama_stack.core.library_client import LlamaStackAsLibraryClient
from llama_stack.log import get_logger
+from llama_stack_api import Chunk, ExpiresAfter
from ..conftest import vector_provider_wrapper
@@ -645,7 +645,7 @@ def test_openai_vector_store_attach_file(
):
"""Test OpenAI vector store attach file."""
skip_if_provider_doesnt_support_openai_vector_stores(client_with_models)
- from llama_stack.apis.files import ExpiresAfter
+ from llama_stack_api import ExpiresAfter
compat_client = compat_client_with_empty_stores
@@ -709,7 +709,7 @@ def test_openai_vector_store_attach_files_on_creation(
skip_if_provider_doesnt_support_openai_vector_stores(client_with_models)
compat_client = compat_client_with_empty_stores
- from llama_stack.apis.files import ExpiresAfter
+ from llama_stack_api import ExpiresAfter
# Create some files and attach them to the vector store
valid_file_ids = []
@@ -774,7 +774,7 @@ def test_openai_vector_store_list_files(
skip_if_provider_doesnt_support_openai_vector_stores(client_with_models)
compat_client = compat_client_with_empty_stores
- from llama_stack.apis.files import ExpiresAfter
+ from llama_stack_api import ExpiresAfter
# Create a vector store
vector_store = compat_client.vector_stores.create(
@@ -866,7 +866,7 @@ def test_openai_vector_store_retrieve_file_contents(
skip_if_provider_doesnt_support_openai_vector_stores(client_with_models)
compat_client = compat_client_with_empty_stores
- from llama_stack.apis.files import ExpiresAfter
+ from llama_stack_api import ExpiresAfter
# Create a vector store
vector_store = compat_client.vector_stores.create(
@@ -907,16 +907,16 @@ def test_openai_vector_store_retrieve_file_contents(
)
assert file_contents is not None
- assert len(file_contents.content) == 1
- content = file_contents.content[0]
+ assert file_contents.object == "vector_store.file_content.page"
+ assert len(file_contents.data) == 1
+ content = file_contents.data[0]
# llama-stack-client returns a model, openai-python is a badboy and returns a dict
if not isinstance(content, dict):
content = content.model_dump()
assert content["type"] == "text"
assert content["text"] == test_content.decode("utf-8")
- assert file_contents.filename == file_name
- assert file_contents.attributes == attributes
+ assert file_contents.has_more is False
@vector_provider_wrapper
@@ -927,7 +927,7 @@ def test_openai_vector_store_delete_file(
skip_if_provider_doesnt_support_openai_vector_stores(client_with_models)
compat_client = compat_client_with_empty_stores
- from llama_stack.apis.files import ExpiresAfter
+ from llama_stack_api import ExpiresAfter
# Create a vector store
vector_store = compat_client.vector_stores.create(
@@ -993,7 +993,7 @@ def test_openai_vector_store_delete_file_removes_from_vector_store(
skip_if_provider_doesnt_support_openai_vector_stores(client_with_models)
compat_client = compat_client_with_empty_stores
- from llama_stack.apis.files import ExpiresAfter
+ from llama_stack_api import ExpiresAfter
# Create a vector store
vector_store = compat_client.vector_stores.create(
@@ -1045,7 +1045,7 @@ def test_openai_vector_store_update_file(
skip_if_provider_doesnt_support_openai_vector_stores(client_with_models)
compat_client = compat_client_with_empty_stores
- from llama_stack.apis.files import ExpiresAfter
+ from llama_stack_api import ExpiresAfter
# Create a vector store
vector_store = compat_client.vector_stores.create(
@@ -1102,7 +1102,7 @@ def test_create_vector_store_files_duplicate_vector_store_name(
This test confirms that client.vector_stores.create() creates a unique ID
"""
skip_if_provider_doesnt_support_openai_vector_stores(client_with_models)
- from llama_stack.apis.files import ExpiresAfter
+ from llama_stack_api import ExpiresAfter
compat_client = compat_client_with_empty_stores
@@ -1483,14 +1483,12 @@ def test_openai_vector_store_file_batch_retrieve_contents(
)
assert file_contents is not None
- assert file_contents.filename == file_data[i][0]
- assert len(file_contents.content) > 0
+ assert file_contents.object == "vector_store.file_content.page"
+ assert len(file_contents.data) > 0
# Verify the content matches what we uploaded
content_text = (
- file_contents.content[0].text
- if hasattr(file_contents.content[0], "text")
- else file_contents.content[0]["text"]
+ file_contents.data[0].text if hasattr(file_contents.data[0], "text") else file_contents.data[0]["text"]
)
assert file_data[i][1].decode("utf-8") in content_text
@@ -1606,3 +1604,97 @@ def test_openai_vector_store_embedding_config_from_metadata(
assert "metadata_config_store" in store_names
assert "consistent_config_store" in store_names
+
+
+@vector_provider_wrapper
+def test_openai_vector_store_file_contents_with_extra_query(
+ compat_client_with_empty_stores, client_with_models, embedding_model_id, embedding_dimension, vector_io_provider_id
+):
+ """Test that vector store file contents endpoint supports extra_query parameter."""
+ skip_if_provider_doesnt_support_openai_vector_stores(client_with_models)
+ compat_client = compat_client_with_empty_stores
+
+ # Create a vector store
+ vector_store = compat_client.vector_stores.create(
+ name="test_extra_query_store",
+ extra_body={
+ "embedding_model": embedding_model_id,
+ "provider_id": vector_io_provider_id,
+ },
+ )
+
+ # Create and attach a file
+ test_content = b"This is test content for extra_query validation."
+ with BytesIO(test_content) as file_buffer:
+ file_buffer.name = "test_extra_query.txt"
+ file = compat_client.files.create(
+ file=file_buffer,
+ purpose="assistants",
+ expires_after=ExpiresAfter(anchor="created_at", seconds=86400),
+ )
+
+ file_attach_response = compat_client.vector_stores.files.create(
+ vector_store_id=vector_store.id,
+ file_id=file.id,
+ extra_body={"embedding_model": embedding_model_id},
+ )
+ assert file_attach_response.status == "completed"
+
+ # Wait for processing
+ time.sleep(2)
+
+ # Test that extra_query parameter is accepted and processed
+ content_with_extra_query = compat_client.vector_stores.files.content(
+ vector_store_id=vector_store.id,
+ file_id=file.id,
+ extra_query={"include_embeddings": True, "include_metadata": True},
+ )
+
+ # Test without extra_query for comparison
+ content_without_extra_query = compat_client.vector_stores.files.content(
+ vector_store_id=vector_store.id,
+ file_id=file.id,
+ )
+
+ # Validate that both calls succeed
+ assert content_with_extra_query is not None
+ assert content_without_extra_query is not None
+ assert len(content_with_extra_query.data) > 0
+ assert len(content_without_extra_query.data) > 0
+
+ # Validate that extra_query parameter is processed correctly
+ # Both should have the embedding/metadata fields available (may be None based on flags)
+ first_chunk_with_flags = content_with_extra_query.data[0]
+ first_chunk_without_flags = content_without_extra_query.data[0]
+
+ # The key validation: extra_query fields are present in the response
+ # Handle both dict and object responses (different clients may return different formats)
+ def has_field(obj, field):
+ if isinstance(obj, dict):
+ return field in obj
+ else:
+ return hasattr(obj, field)
+
+ # Validate that all expected fields are present in both responses
+ expected_fields = ["embedding", "chunk_metadata", "metadata", "text"]
+ for field in expected_fields:
+ assert has_field(first_chunk_with_flags, field), f"Field '{field}' missing from response with extra_query"
+ assert has_field(first_chunk_without_flags, field), f"Field '{field}' missing from response without extra_query"
+
+ # Validate content is the same
+ def get_field(obj, field):
+ if isinstance(obj, dict):
+ return obj[field]
+ else:
+ return getattr(obj, field)
+
+ assert get_field(first_chunk_with_flags, "text") == test_content.decode("utf-8")
+ assert get_field(first_chunk_without_flags, "text") == test_content.decode("utf-8")
+
+ with_flags_embedding = get_field(first_chunk_with_flags, "embedding")
+ without_flags_embedding = get_field(first_chunk_without_flags, "embedding")
+
+ # Validate that embeddings are included when requested and excluded when not requested
+ assert with_flags_embedding is not None, "Embeddings should be included when include_embeddings=True"
+ assert len(with_flags_embedding) > 0, "Embedding should be a non-empty list"
+ assert without_flags_embedding is None, "Embeddings should not be included when include_embeddings=False"
diff --git a/tests/integration/vector_io/test_vector_io.py b/tests/integration/vector_io/test_vector_io.py
index 1b2099069..29dbd3e56 100644
--- a/tests/integration/vector_io/test_vector_io.py
+++ b/tests/integration/vector_io/test_vector_io.py
@@ -6,7 +6,7 @@
import pytest
-from llama_stack.apis.vector_io import Chunk
+from llama_stack_api import Chunk
from ..conftest import vector_provider_wrapper
diff --git a/tests/unit/conversations/test_api_models.py b/tests/unit/conversations/test_api_models.py
index 8416cba0b..f8576f076 100644
--- a/tests/unit/conversations/test_api_models.py
+++ b/tests/unit/conversations/test_api_models.py
@@ -5,11 +5,7 @@
# the root directory of this source tree.
-from llama_stack.apis.conversations.conversations import (
- Conversation,
- ConversationItem,
- ConversationItemList,
-)
+from llama_stack_api import Conversation, ConversationItem, ConversationItemList
def test_conversation_model_defaults():
diff --git a/tests/unit/conversations/test_conversations.py b/tests/unit/conversations/test_conversations.py
index 3f0175831..3f9df5fc0 100644
--- a/tests/unit/conversations/test_conversations.py
+++ b/tests/unit/conversations/test_conversations.py
@@ -12,10 +12,6 @@ from openai.types.conversations.conversation import Conversation as OpenAIConver
from openai.types.conversations.conversation_item import ConversationItem as OpenAIConversationItem
from pydantic import TypeAdapter
-from llama_stack.apis.agents.openai_responses import (
- OpenAIResponseInputMessageContentText,
- OpenAIResponseMessage,
-)
from llama_stack.core.conversations.conversations import (
ConversationServiceConfig,
ConversationServiceImpl,
@@ -27,7 +23,8 @@ from llama_stack.core.storage.datatypes import (
SqlStoreReference,
StorageConfig,
)
-from llama_stack.providers.utils.sqlstore.sqlstore import register_sqlstore_backends
+from llama_stack.core.storage.sqlstore.sqlstore import register_sqlstore_backends
+from llama_stack_api import OpenAIResponseInputMessageContentText, OpenAIResponseMessage
@pytest.fixture
@@ -41,6 +38,9 @@ async def service():
},
stores=ServerStoresConfig(
conversations=SqlStoreReference(backend="sql_test", table_name="openai_conversations"),
+ metadata=None,
+ inference=None,
+ prompts=None,
),
)
register_sqlstore_backends({"sql_test": storage.backends["sql_test"]})
@@ -145,6 +145,9 @@ async def test_policy_configuration():
},
stores=ServerStoresConfig(
conversations=SqlStoreReference(backend="sql_test", table_name="openai_conversations"),
+ metadata=None,
+ inference=None,
+ prompts=None,
),
)
register_sqlstore_backends({"sql_test": storage.backends["sql_test"]})
diff --git a/tests/unit/core/routers/test_safety_router.py b/tests/unit/core/routers/test_safety_router.py
index bf195ff33..1b24a59a2 100644
--- a/tests/unit/core/routers/test_safety_router.py
+++ b/tests/unit/core/routers/test_safety_router.py
@@ -6,10 +6,9 @@
from unittest.mock import AsyncMock
-from llama_stack.apis.safety.safety import ModerationObject, ModerationObjectResults
-from llama_stack.apis.shields import ListShieldsResponse, Shield
from llama_stack.core.datatypes import SafetyConfig
from llama_stack.core.routers.safety import SafetyRouter
+from llama_stack_api import ListShieldsResponse, ModerationObject, ModerationObjectResults, Shield
async def test_run_moderation_uses_default_shield_when_model_missing():
diff --git a/tests/unit/core/routers/test_vector_io.py b/tests/unit/core/routers/test_vector_io.py
index dd3246cb3..a6df0694b 100644
--- a/tests/unit/core/routers/test_vector_io.py
+++ b/tests/unit/core/routers/test_vector_io.py
@@ -8,8 +8,13 @@ from unittest.mock import AsyncMock, Mock
import pytest
-from llama_stack.apis.vector_io import OpenAICreateVectorStoreRequestWithExtraBody
from llama_stack.core.routers.vector_io import VectorIORouter
+from llama_stack_api import (
+ ModelNotFoundError,
+ ModelType,
+ ModelTypeError,
+ OpenAICreateVectorStoreRequestWithExtraBody,
+)
async def test_single_provider_auto_selection():
@@ -21,6 +26,7 @@ async def test_single_provider_auto_selection():
Mock(identifier="all-MiniLM-L6-v2", model_type="embedding", metadata={"embedding_dimension": 384})
]
)
+ mock_routing_table.get_object_by_identifier = AsyncMock(return_value=Mock(model_type=ModelType.embedding))
mock_routing_table.register_vector_store = AsyncMock(
return_value=Mock(identifier="vs_123", provider_id="inline::faiss", provider_resource_id="vs_123")
)
@@ -48,6 +54,7 @@ async def test_create_vector_stores_multiple_providers_missing_provider_id_error
Mock(identifier="all-MiniLM-L6-v2", model_type="embedding", metadata={"embedding_dimension": 384})
]
)
+ mock_routing_table.get_object_by_identifier = AsyncMock(return_value=Mock(model_type=ModelType.embedding))
router = VectorIORouter(mock_routing_table)
request = OpenAICreateVectorStoreRequestWithExtraBody.model_validate(
{"name": "test_store", "embedding_model": "all-MiniLM-L6-v2"}
@@ -55,3 +62,94 @@ async def test_create_vector_stores_multiple_providers_missing_provider_id_error
with pytest.raises(ValueError, match="Multiple vector_io providers available"):
await router.openai_create_vector_store(request)
+
+
+async def test_update_vector_store_provider_id_change_fails():
+ """Test that updating a vector store with a different provider_id fails with clear error."""
+ mock_routing_table = Mock()
+
+ # Mock an existing vector store with provider_id "faiss"
+ mock_existing_store = Mock()
+ mock_existing_store.provider_id = "inline::faiss"
+ mock_existing_store.identifier = "vs_123"
+
+ mock_routing_table.get_object_by_identifier = AsyncMock(return_value=mock_existing_store)
+ mock_routing_table.get_provider_impl = AsyncMock(
+ return_value=Mock(openai_update_vector_store=AsyncMock(return_value=Mock(id="vs_123")))
+ )
+
+ router = VectorIORouter(mock_routing_table)
+
+ # Try to update with different provider_id in metadata - this should fail
+ with pytest.raises(ValueError, match="provider_id cannot be changed after vector store creation"):
+ await router.openai_update_vector_store(
+ vector_store_id="vs_123",
+ name="updated_name",
+ metadata={"provider_id": "inline::sqlite"}, # Different provider_id
+ )
+
+ # Verify the existing store was looked up to check provider_id
+ mock_routing_table.get_object_by_identifier.assert_called_once_with("vector_store", "vs_123")
+
+ # Provider should not be called since validation failed
+ mock_routing_table.get_provider_impl.assert_not_called()
+
+
+async def test_update_vector_store_same_provider_id_succeeds():
+ """Test that updating a vector store with the same provider_id succeeds."""
+ mock_routing_table = Mock()
+
+ # Mock an existing vector store with provider_id "faiss"
+ mock_existing_store = Mock()
+ mock_existing_store.provider_id = "inline::faiss"
+ mock_existing_store.identifier = "vs_123"
+
+ mock_routing_table.get_object_by_identifier = AsyncMock(return_value=mock_existing_store)
+ mock_routing_table.get_provider_impl = AsyncMock(
+ return_value=Mock(openai_update_vector_store=AsyncMock(return_value=Mock(id="vs_123")))
+ )
+
+ router = VectorIORouter(mock_routing_table)
+
+ # Update with same provider_id should succeed
+ await router.openai_update_vector_store(
+ vector_store_id="vs_123",
+ name="updated_name",
+ metadata={"provider_id": "inline::faiss"}, # Same provider_id
+ )
+
+ # Verify the provider update method was called
+ mock_routing_table.get_provider_impl.assert_called_once_with("vs_123")
+ provider = await mock_routing_table.get_provider_impl("vs_123")
+ provider.openai_update_vector_store.assert_called_once_with(
+ vector_store_id="vs_123", name="updated_name", expires_after=None, metadata={"provider_id": "inline::faiss"}
+ )
+
+
+async def test_create_vector_store_with_unknown_embedding_model_raises_error():
+ """Test that creating a vector store with an unknown embedding model raises
+ FoundError."""
+ mock_routing_table = Mock(impls_by_provider_id={"provider": "mock"})
+ mock_routing_table.get_object_by_identifier = AsyncMock(return_value=None)
+
+ router = VectorIORouter(mock_routing_table)
+ request = OpenAICreateVectorStoreRequestWithExtraBody.model_validate(
+ {"embedding_model": "unknown-model", "embedding_dimension": 384}
+ )
+
+ with pytest.raises(ModelNotFoundError, match="Model 'unknown-model' not found"):
+ await router.openai_create_vector_store(request)
+
+
+async def test_create_vector_store_with_wrong_model_type_raises_error():
+ """Test that creating a vector store with a non-embedding model raises ModelTypeError."""
+ mock_routing_table = Mock(impls_by_provider_id={"provider": "mock"})
+ mock_routing_table.get_object_by_identifier = AsyncMock(return_value=Mock(model_type=ModelType.llm))
+
+ router = VectorIORouter(mock_routing_table)
+ request = OpenAICreateVectorStoreRequestWithExtraBody.model_validate(
+ {"embedding_model": "text-model", "embedding_dimension": 384}
+ )
+
+ with pytest.raises(ModelTypeError, match="Model 'text-model' is of type"):
+ await router.openai_create_vector_store(request)
diff --git a/tests/unit/core/test_stack_validation.py b/tests/unit/core/test_stack_validation.py
index d28803006..5f75bc522 100644
--- a/tests/unit/core/test_stack_validation.py
+++ b/tests/unit/core/test_stack_validation.py
@@ -10,11 +10,10 @@ from unittest.mock import AsyncMock
import pytest
-from llama_stack.apis.models import ListModelsResponse, Model, ModelType
-from llama_stack.apis.shields import ListShieldsResponse, Shield
-from llama_stack.core.datatypes import QualifiedModel, SafetyConfig, StackRunConfig, StorageConfig, VectorStoresConfig
+from llama_stack.core.datatypes import QualifiedModel, SafetyConfig, StackRunConfig, VectorStoresConfig
from llama_stack.core.stack import validate_safety_config, validate_vector_stores_config
-from llama_stack.providers.datatypes import Api
+from llama_stack.core.storage.datatypes import ServerStoresConfig, StorageConfig
+from llama_stack_api import Api, ListModelsResponse, ListShieldsResponse, Model, ModelType, Shield
class TestVectorStoresValidation:
@@ -23,7 +22,15 @@ class TestVectorStoresValidation:
run_config = StackRunConfig(
image_name="test",
providers={},
- storage=StorageConfig(backends={}, stores={}),
+ storage=StorageConfig(
+ backends={},
+ stores=ServerStoresConfig(
+ metadata=None,
+ inference=None,
+ conversations=None,
+ prompts=None,
+ ),
+ ),
vector_stores=VectorStoresConfig(
default_provider_id="faiss",
default_embedding_model=QualifiedModel(
@@ -43,7 +50,15 @@ class TestVectorStoresValidation:
run_config = StackRunConfig(
image_name="test",
providers={},
- storage=StorageConfig(backends={}, stores={}),
+ storage=StorageConfig(
+ backends={},
+ stores=ServerStoresConfig(
+ metadata=None,
+ inference=None,
+ conversations=None,
+ prompts=None,
+ ),
+ ),
vector_stores=VectorStoresConfig(
default_provider_id="faiss",
default_embedding_model=QualifiedModel(
diff --git a/tests/unit/distribution/routers/test_routing_tables.py b/tests/unit/distribution/routers/test_routing_tables.py
index 8c1838ba3..292ee8384 100644
--- a/tests/unit/distribution/routers/test_routing_tables.py
+++ b/tests/unit/distribution/routers/test_routing_tables.py
@@ -10,14 +10,6 @@ from unittest.mock import AsyncMock
import pytest
-from llama_stack.apis.common.content_types import URL
-from llama_stack.apis.common.errors import ModelNotFoundError
-from llama_stack.apis.common.type_system import NumberType
-from llama_stack.apis.datasets.datasets import Dataset, DatasetPurpose, URIDataSource
-from llama_stack.apis.datatypes import Api
-from llama_stack.apis.models import Model, ModelType
-from llama_stack.apis.shields.shields import Shield
-from llama_stack.apis.tools import ListToolDefsResponse, ToolDef, ToolGroup
from llama_stack.core.datatypes import RegistryEntrySource
from llama_stack.core.routing_tables.benchmarks import BenchmarksRoutingTable
from llama_stack.core.routing_tables.datasets import DatasetsRoutingTable
@@ -25,6 +17,21 @@ from llama_stack.core.routing_tables.models import ModelsRoutingTable
from llama_stack.core.routing_tables.scoring_functions import ScoringFunctionsRoutingTable
from llama_stack.core.routing_tables.shields import ShieldsRoutingTable
from llama_stack.core.routing_tables.toolgroups import ToolGroupsRoutingTable
+from llama_stack_api import (
+ URL,
+ Api,
+ Dataset,
+ DatasetPurpose,
+ ListToolDefsResponse,
+ Model,
+ ModelNotFoundError,
+ ModelType,
+ NumberType,
+ Shield,
+ ToolDef,
+ ToolGroup,
+ URIDataSource,
+)
class Impl:
@@ -130,7 +137,7 @@ class ToolGroupsImpl(Impl):
async def unregister_toolgroup(self, toolgroup_id: str):
return toolgroup_id
- async def list_runtime_tools(self, toolgroup_id, mcp_endpoint):
+ async def list_runtime_tools(self, toolgroup_id, mcp_endpoint, authorization=None):
return ListToolDefsResponse(
data=[
ToolDef(
diff --git a/tests/unit/distribution/test_api_recordings.py b/tests/unit/distribution/test_api_recordings.py
index 2b7ce5c4e..889f063e6 100644
--- a/tests/unit/distribution/test_api_recordings.py
+++ b/tests/unit/distribution/test_api_recordings.py
@@ -11,8 +11,15 @@ from unittest.mock import patch
import pytest
from openai import AsyncOpenAI
+from llama_stack.testing.api_recorder import (
+ APIRecordingMode,
+ ResponseStorage,
+ api_recording,
+ normalize_inference_request,
+)
+
# Import the real Pydantic response types instead of using Mocks
-from llama_stack.apis.inference import (
+from llama_stack_api import (
OpenAIAssistantMessageParam,
OpenAIChatCompletion,
OpenAIChoice,
@@ -20,12 +27,6 @@ from llama_stack.apis.inference import (
OpenAIEmbeddingsResponse,
OpenAIEmbeddingUsage,
)
-from llama_stack.testing.api_recorder import (
- APIRecordingMode,
- ResponseStorage,
- api_recording,
- normalize_inference_request,
-)
@pytest.fixture
diff --git a/tests/unit/distribution/test_distribution.py b/tests/unit/distribution/test_distribution.py
index 11f55cfdb..b8d6ba55d 100644
--- a/tests/unit/distribution/test_distribution.py
+++ b/tests/unit/distribution/test_distribution.py
@@ -22,7 +22,7 @@ from llama_stack.core.storage.datatypes import (
SqlStoreReference,
StorageConfig,
)
-from llama_stack.providers.datatypes import ProviderSpec
+from llama_stack_api import ProviderSpec
class SampleConfig(BaseModel):
@@ -312,7 +312,7 @@ pip_packages:
"""Test loading an external provider from a module (success path)."""
from types import SimpleNamespace
- from llama_stack.providers.datatypes import Api, ProviderSpec
+ from llama_stack_api import Api, ProviderSpec
# Simulate a provider module with get_provider_spec
fake_spec = ProviderSpec(
@@ -396,7 +396,7 @@ pip_packages:
def test_external_provider_from_module_building(self, mock_providers):
"""Test loading an external provider from a module during build (building=True, partial spec)."""
from llama_stack.core.datatypes import BuildConfig, BuildProvider, DistributionSpec
- from llama_stack.providers.datatypes import Api
+ from llama_stack_api import Api
# No importlib patch needed, should not import module when type of `config` is BuildConfig or DistributionSpec
build_config = BuildConfig(
@@ -457,7 +457,7 @@ class TestGetExternalProvidersFromModule:
from types import SimpleNamespace
from llama_stack.core.distribution import get_external_providers_from_module
- from llama_stack.providers.datatypes import ProviderSpec
+ from llama_stack_api import ProviderSpec
fake_spec = ProviderSpec(
api=Api.inference,
@@ -594,7 +594,7 @@ class TestGetExternalProvidersFromModule:
from types import SimpleNamespace
from llama_stack.core.distribution import get_external_providers_from_module
- from llama_stack.providers.datatypes import ProviderSpec
+ from llama_stack_api import ProviderSpec
spec1 = ProviderSpec(
api=Api.inference,
@@ -642,7 +642,7 @@ class TestGetExternalProvidersFromModule:
from types import SimpleNamespace
from llama_stack.core.distribution import get_external_providers_from_module
- from llama_stack.providers.datatypes import ProviderSpec
+ from llama_stack_api import ProviderSpec
spec1 = ProviderSpec(
api=Api.inference,
@@ -690,7 +690,7 @@ class TestGetExternalProvidersFromModule:
from types import SimpleNamespace
from llama_stack.core.distribution import get_external_providers_from_module
- from llama_stack.providers.datatypes import ProviderSpec
+ from llama_stack_api import ProviderSpec
# Module returns both inline and remote variants
spec1 = ProviderSpec(
@@ -829,7 +829,7 @@ class TestGetExternalProvidersFromModule:
from types import SimpleNamespace
from llama_stack.core.distribution import get_external_providers_from_module
- from llama_stack.providers.datatypes import ProviderSpec
+ from llama_stack_api import ProviderSpec
inference_spec = ProviderSpec(
api=Api.inference,
diff --git a/tests/unit/files/test_files.py b/tests/unit/files/test_files.py
index 426e2cf64..197038349 100644
--- a/tests/unit/files/test_files.py
+++ b/tests/unit/files/test_files.py
@@ -7,16 +7,14 @@
import pytest
-from llama_stack.apis.common.errors import ResourceNotFoundError
-from llama_stack.apis.common.responses import Order
-from llama_stack.apis.files import OpenAIFilePurpose
from llama_stack.core.access_control.access_control import default_policy
from llama_stack.core.storage.datatypes import SqliteSqlStoreConfig, SqlStoreReference
+from llama_stack.core.storage.sqlstore.sqlstore import register_sqlstore_backends
from llama_stack.providers.inline.files.localfs import (
LocalfsFilesImpl,
LocalfsFilesImplConfig,
)
-from llama_stack.providers.utils.sqlstore.sqlstore import register_sqlstore_backends
+from llama_stack_api import OpenAIFilePurpose, Order, ResourceNotFoundError
class MockUploadFile:
diff --git a/tests/unit/fixtures.py b/tests/unit/fixtures.py
index 443a1d371..9e049f8da 100644
--- a/tests/unit/fixtures.py
+++ b/tests/unit/fixtures.py
@@ -6,9 +6,9 @@
import pytest
+from llama_stack.core.storage.kvstore.config import SqliteKVStoreConfig
+from llama_stack.core.storage.kvstore.sqlite import SqliteKVStoreImpl
from llama_stack.core.store.registry import CachedDiskDistributionRegistry, DiskDistributionRegistry
-from llama_stack.providers.utils.kvstore.config import SqliteKVStoreConfig
-from llama_stack.providers.utils.kvstore.sqlite import SqliteKVStoreImpl
@pytest.fixture(scope="function")
diff --git a/tests/unit/models/test_prompt_adapter.py b/tests/unit/models/test_prompt_adapter.py
deleted file mode 100644
index d31426135..000000000
--- a/tests/unit/models/test_prompt_adapter.py
+++ /dev/null
@@ -1,303 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-
-from llama_stack.apis.inference import (
- ChatCompletionRequest,
- CompletionMessage,
- StopReason,
- SystemMessage,
- SystemMessageBehavior,
- ToolCall,
- ToolConfig,
- UserMessage,
-)
-from llama_stack.models.llama.datatypes import (
- BuiltinTool,
- ToolDefinition,
- ToolPromptFormat,
-)
-from llama_stack.providers.utils.inference.prompt_adapter import (
- chat_completion_request_to_messages,
- chat_completion_request_to_prompt,
- interleaved_content_as_str,
-)
-
-MODEL = "Llama3.1-8B-Instruct"
-MODEL3_2 = "Llama3.2-3B-Instruct"
-
-
-async def test_system_default():
- content = "Hello !"
- request = ChatCompletionRequest(
- model=MODEL,
- messages=[
- UserMessage(content=content),
- ],
- )
- messages = chat_completion_request_to_messages(request, MODEL)
- assert len(messages) == 2
- assert messages[-1].content == content
- assert "Cutting Knowledge Date: December 2023" in interleaved_content_as_str(messages[0].content)
-
-
-async def test_system_builtin_only():
- content = "Hello !"
- request = ChatCompletionRequest(
- model=MODEL,
- messages=[
- UserMessage(content=content),
- ],
- tools=[
- ToolDefinition(tool_name=BuiltinTool.code_interpreter),
- ToolDefinition(tool_name=BuiltinTool.brave_search),
- ],
- )
- messages = chat_completion_request_to_messages(request, MODEL)
- assert len(messages) == 2
- assert messages[-1].content == content
- assert "Cutting Knowledge Date: December 2023" in interleaved_content_as_str(messages[0].content)
- assert "Tools: brave_search" in interleaved_content_as_str(messages[0].content)
-
-
-async def test_system_custom_only():
- content = "Hello !"
- request = ChatCompletionRequest(
- model=MODEL,
- messages=[
- UserMessage(content=content),
- ],
- tools=[
- ToolDefinition(
- tool_name="custom1",
- description="custom1 tool",
- input_schema={
- "type": "object",
- "properties": {
- "param1": {
- "type": "str",
- "description": "param1 description",
- },
- },
- "required": ["param1"],
- },
- )
- ],
- tool_config=ToolConfig(tool_prompt_format=ToolPromptFormat.json),
- )
- messages = chat_completion_request_to_messages(request, MODEL)
- assert len(messages) == 3
- assert "Environment: ipython" in interleaved_content_as_str(messages[0].content)
-
- assert "Return function calls in JSON format" in interleaved_content_as_str(messages[1].content)
- assert messages[-1].content == content
-
-
-async def test_system_custom_and_builtin():
- content = "Hello !"
- request = ChatCompletionRequest(
- model=MODEL,
- messages=[
- UserMessage(content=content),
- ],
- tools=[
- ToolDefinition(tool_name=BuiltinTool.code_interpreter),
- ToolDefinition(tool_name=BuiltinTool.brave_search),
- ToolDefinition(
- tool_name="custom1",
- description="custom1 tool",
- input_schema={
- "type": "object",
- "properties": {
- "param1": {
- "type": "str",
- "description": "param1 description",
- },
- },
- "required": ["param1"],
- },
- ),
- ],
- )
- messages = chat_completion_request_to_messages(request, MODEL)
- assert len(messages) == 3
-
- assert "Environment: ipython" in interleaved_content_as_str(messages[0].content)
- assert "Tools: brave_search" in interleaved_content_as_str(messages[0].content)
-
- assert "Return function calls in JSON format" in interleaved_content_as_str(messages[1].content)
- assert messages[-1].content == content
-
-
-async def test_completion_message_encoding():
- request = ChatCompletionRequest(
- model=MODEL3_2,
- messages=[
- UserMessage(content="hello"),
- CompletionMessage(
- content="",
- stop_reason=StopReason.end_of_turn,
- tool_calls=[
- ToolCall(
- tool_name="custom1",
- arguments='{"param1": "value1"}', # arguments must be a JSON string
- call_id="123",
- )
- ],
- ),
- ],
- tools=[
- ToolDefinition(
- tool_name="custom1",
- description="custom1 tool",
- input_schema={
- "type": "object",
- "properties": {
- "param1": {
- "type": "str",
- "description": "param1 description",
- },
- },
- "required": ["param1"],
- },
- ),
- ],
- tool_config=ToolConfig(tool_prompt_format=ToolPromptFormat.python_list),
- )
- prompt = await chat_completion_request_to_prompt(request, request.model)
- assert '[custom1(param1="value1")]' in prompt
-
- request.model = MODEL
- request.tool_config = ToolConfig(tool_prompt_format=ToolPromptFormat.json)
- prompt = await chat_completion_request_to_prompt(request, request.model)
- assert '{"type": "function", "name": "custom1", "parameters": {"param1": "value1"}}' in prompt
-
-
-async def test_user_provided_system_message():
- content = "Hello !"
- system_prompt = "You are a pirate"
- request = ChatCompletionRequest(
- model=MODEL,
- messages=[
- SystemMessage(content=system_prompt),
- UserMessage(content=content),
- ],
- tools=[
- ToolDefinition(tool_name=BuiltinTool.code_interpreter),
- ],
- )
- messages = chat_completion_request_to_messages(request, MODEL)
- assert len(messages) == 2
- assert interleaved_content_as_str(messages[0].content).endswith(system_prompt)
-
- assert messages[-1].content == content
-
-
-async def test_replace_system_message_behavior_builtin_tools():
- content = "Hello !"
- system_prompt = "You are a pirate"
- request = ChatCompletionRequest(
- model=MODEL,
- messages=[
- SystemMessage(content=system_prompt),
- UserMessage(content=content),
- ],
- tools=[
- ToolDefinition(tool_name=BuiltinTool.code_interpreter),
- ],
- tool_config=ToolConfig(
- tool_choice="auto",
- tool_prompt_format=ToolPromptFormat.python_list,
- system_message_behavior=SystemMessageBehavior.replace,
- ),
- )
- messages = chat_completion_request_to_messages(request, MODEL3_2)
- assert len(messages) == 2
- assert interleaved_content_as_str(messages[0].content).endswith(system_prompt)
- assert "Environment: ipython" in interleaved_content_as_str(messages[0].content)
- assert messages[-1].content == content
-
-
-async def test_replace_system_message_behavior_custom_tools():
- content = "Hello !"
- system_prompt = "You are a pirate"
- request = ChatCompletionRequest(
- model=MODEL,
- messages=[
- SystemMessage(content=system_prompt),
- UserMessage(content=content),
- ],
- tools=[
- ToolDefinition(tool_name=BuiltinTool.code_interpreter),
- ToolDefinition(
- tool_name="custom1",
- description="custom1 tool",
- input_schema={
- "type": "object",
- "properties": {
- "param1": {
- "type": "str",
- "description": "param1 description",
- },
- },
- "required": ["param1"],
- },
- ),
- ],
- tool_config=ToolConfig(
- tool_choice="auto",
- tool_prompt_format=ToolPromptFormat.python_list,
- system_message_behavior=SystemMessageBehavior.replace,
- ),
- )
- messages = chat_completion_request_to_messages(request, MODEL3_2)
-
- assert len(messages) == 2
- assert interleaved_content_as_str(messages[0].content).endswith(system_prompt)
- assert "Environment: ipython" in interleaved_content_as_str(messages[0].content)
- assert messages[-1].content == content
-
-
-async def test_replace_system_message_behavior_custom_tools_with_template():
- content = "Hello !"
- system_prompt = "You are a pirate {{ function_description }}"
- request = ChatCompletionRequest(
- model=MODEL,
- messages=[
- SystemMessage(content=system_prompt),
- UserMessage(content=content),
- ],
- tools=[
- ToolDefinition(tool_name=BuiltinTool.code_interpreter),
- ToolDefinition(
- tool_name="custom1",
- description="custom1 tool",
- input_schema={
- "type": "object",
- "properties": {
- "param1": {
- "type": "str",
- "description": "param1 description",
- },
- },
- "required": ["param1"],
- },
- ),
- ],
- tool_config=ToolConfig(
- tool_choice="auto",
- tool_prompt_format=ToolPromptFormat.python_list,
- system_message_behavior=SystemMessageBehavior.replace,
- ),
- )
- messages = chat_completion_request_to_messages(request, MODEL3_2)
-
- assert len(messages) == 2
- assert "Environment: ipython" in interleaved_content_as_str(messages[0].content)
- assert "You are a pirate" in interleaved_content_as_str(messages[0].content)
- # function description is present in the system prompt
- assert '"name": "custom1"' in interleaved_content_as_str(messages[0].content)
- assert messages[-1].content == content
diff --git a/tests/unit/prompts/prompts/conftest.py b/tests/unit/prompts/prompts/conftest.py
index c876f2041..8bfc1f03c 100644
--- a/tests/unit/prompts/prompts/conftest.py
+++ b/tests/unit/prompts/prompts/conftest.py
@@ -18,7 +18,7 @@ from llama_stack.core.storage.datatypes import (
SqlStoreReference,
StorageConfig,
)
-from llama_stack.providers.utils.kvstore import register_kvstore_backends
+from llama_stack.core.storage.kvstore import register_kvstore_backends
@pytest.fixture
diff --git a/tests/unit/providers/agents/meta_reference/fixtures/__init__.py b/tests/unit/providers/agents/meta_reference/fixtures/__init__.py
new file mode 100644
index 000000000..130c46f6d
--- /dev/null
+++ b/tests/unit/providers/agents/meta_reference/fixtures/__init__.py
@@ -0,0 +1,23 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+import os
+
+import yaml
+
+from llama_stack_api.inference import (
+ OpenAIChatCompletion,
+)
+
+FIXTURES_DIR = os.path.dirname(os.path.abspath(__file__))
+
+
+def load_chat_completion_fixture(filename: str) -> OpenAIChatCompletion:
+ fixture_path = os.path.join(FIXTURES_DIR, filename)
+
+ with open(fixture_path) as f:
+ data = yaml.safe_load(f)
+ return OpenAIChatCompletion(**data)
diff --git a/tests/unit/providers/agents/meta_reference/fixtures/simple_chat_completion.yaml b/tests/unit/providers/agents/meta_reference/fixtures/simple_chat_completion.yaml
new file mode 100644
index 000000000..4959349a0
--- /dev/null
+++ b/tests/unit/providers/agents/meta_reference/fixtures/simple_chat_completion.yaml
@@ -0,0 +1,9 @@
+id: chat-completion-123
+choices:
+ - message:
+ content: "Dublin"
+ role: assistant
+ finish_reason: stop
+ index: 0
+created: 1234567890
+model: meta-llama/Llama-3.1-8B-Instruct
diff --git a/tests/unit/providers/agents/meta_reference/fixtures/tool_call_completion.yaml b/tests/unit/providers/agents/meta_reference/fixtures/tool_call_completion.yaml
new file mode 100644
index 000000000..f6532e3a9
--- /dev/null
+++ b/tests/unit/providers/agents/meta_reference/fixtures/tool_call_completion.yaml
@@ -0,0 +1,14 @@
+id: chat-completion-123
+choices:
+ - message:
+ tool_calls:
+ - id: tool_call_123
+ type: function
+ function:
+ name: web_search
+ arguments: '{"query":"What is the capital of Ireland?"}'
+ role: assistant
+ finish_reason: stop
+ index: 0
+created: 1234567890
+model: meta-llama/Llama-3.1-8B-Instruct
diff --git a/tests/unit/providers/agents/meta_reference/test_openai_responses.py b/tests/unit/providers/agents/meta_reference/test_openai_responses.py
new file mode 100644
index 000000000..97bccbfe4
--- /dev/null
+++ b/tests/unit/providers/agents/meta_reference/test_openai_responses.py
@@ -0,0 +1,1757 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from unittest.mock import AsyncMock, patch
+
+import pytest
+from openai.types.chat.chat_completion_chunk import (
+ ChatCompletionChunk,
+ Choice,
+ ChoiceDelta,
+ ChoiceDeltaToolCall,
+ ChoiceDeltaToolCallFunction,
+)
+
+from llama_stack.core.access_control.access_control import default_policy
+from llama_stack.core.storage.datatypes import ResponsesStoreReference, SqliteSqlStoreConfig
+from llama_stack.core.storage.sqlstore.sqlstore import register_sqlstore_backends
+from llama_stack.providers.inline.agents.meta_reference.responses.openai_responses import (
+ OpenAIResponsesImpl,
+)
+from llama_stack.providers.utils.responses.responses_store import (
+ ResponsesStore,
+ _OpenAIResponseObjectWithInputAndMessages,
+)
+from llama_stack_api import (
+ OpenAIChatCompletionContentPartImageParam,
+ OpenAIFile,
+ OpenAIFileObject,
+ OpenAISystemMessageParam,
+ Prompt,
+)
+from llama_stack_api.agents import Order
+from llama_stack_api.inference import (
+ OpenAIAssistantMessageParam,
+ OpenAIChatCompletionContentPartTextParam,
+ OpenAIChatCompletionRequestWithExtraBody,
+ OpenAIDeveloperMessageParam,
+ OpenAIJSONSchema,
+ OpenAIResponseFormatJSONObject,
+ OpenAIResponseFormatJSONSchema,
+ OpenAIUserMessageParam,
+)
+from llama_stack_api.openai_responses import (
+ ListOpenAIResponseInputItem,
+ OpenAIResponseInputMessageContentFile,
+ OpenAIResponseInputMessageContentImage,
+ OpenAIResponseInputMessageContentText,
+ OpenAIResponseInputToolFunction,
+ OpenAIResponseInputToolMCP,
+ OpenAIResponseInputToolWebSearch,
+ OpenAIResponseMessage,
+ OpenAIResponseOutputMessageContentOutputText,
+ OpenAIResponseOutputMessageFunctionToolCall,
+ OpenAIResponseOutputMessageMCPCall,
+ OpenAIResponseOutputMessageWebSearchToolCall,
+ OpenAIResponsePrompt,
+ OpenAIResponseText,
+ OpenAIResponseTextFormat,
+ WebSearchToolTypes,
+)
+from llama_stack_api.tools import ListToolDefsResponse, ToolDef, ToolGroups, ToolInvocationResult, ToolRuntime
+from tests.unit.providers.agents.meta_reference.fixtures import load_chat_completion_fixture
+
+
+@pytest.fixture
+def mock_inference_api():
+ inference_api = AsyncMock()
+ return inference_api
+
+
+@pytest.fixture
+def mock_tool_groups_api():
+ tool_groups_api = AsyncMock(spec=ToolGroups)
+ return tool_groups_api
+
+
+@pytest.fixture
+def mock_tool_runtime_api():
+ tool_runtime_api = AsyncMock(spec=ToolRuntime)
+ return tool_runtime_api
+
+
+@pytest.fixture
+def mock_responses_store():
+ responses_store = AsyncMock(spec=ResponsesStore)
+ return responses_store
+
+
+@pytest.fixture
+def mock_vector_io_api():
+ vector_io_api = AsyncMock()
+ return vector_io_api
+
+
+@pytest.fixture
+def mock_conversations_api():
+ """Mock conversations API for testing."""
+ mock_api = AsyncMock()
+ return mock_api
+
+
+@pytest.fixture
+def mock_safety_api():
+ safety_api = AsyncMock()
+ return safety_api
+
+
+@pytest.fixture
+def mock_prompts_api():
+ prompts_api = AsyncMock()
+ return prompts_api
+
+
+@pytest.fixture
+def mock_files_api():
+ """Mock files API for testing."""
+ files_api = AsyncMock()
+ return files_api
+
+
+@pytest.fixture
+def openai_responses_impl(
+ mock_inference_api,
+ mock_tool_groups_api,
+ mock_tool_runtime_api,
+ mock_responses_store,
+ mock_vector_io_api,
+ mock_safety_api,
+ mock_conversations_api,
+ mock_prompts_api,
+ mock_files_api,
+):
+ return OpenAIResponsesImpl(
+ inference_api=mock_inference_api,
+ tool_groups_api=mock_tool_groups_api,
+ tool_runtime_api=mock_tool_runtime_api,
+ responses_store=mock_responses_store,
+ vector_io_api=mock_vector_io_api,
+ safety_api=mock_safety_api,
+ conversations_api=mock_conversations_api,
+ prompts_api=mock_prompts_api,
+ files_api=mock_files_api,
+ )
+
+
+async def fake_stream(fixture: str = "simple_chat_completion.yaml"):
+ value = load_chat_completion_fixture(fixture)
+ yield ChatCompletionChunk(
+ id=value.id,
+ choices=[
+ Choice(
+ index=0,
+ delta=ChoiceDelta(
+ content=c.message.content,
+ role=c.message.role,
+ tool_calls=[
+ ChoiceDeltaToolCall(
+ index=0,
+ id=t.id,
+ function=ChoiceDeltaToolCallFunction(
+ name=t.function.name,
+ arguments=t.function.arguments,
+ ),
+ )
+ for t in (c.message.tool_calls or [])
+ ],
+ ),
+ )
+ for c in value.choices
+ ],
+ created=1,
+ model=value.model,
+ object="chat.completion.chunk",
+ )
+
+
+async def test_create_openai_response_with_string_input(openai_responses_impl, mock_inference_api):
+ """Test creating an OpenAI response with a simple string input."""
+ # Setup
+ input_text = "What is the capital of Ireland?"
+ model = "meta-llama/Llama-3.1-8B-Instruct"
+
+ # Load the chat completion fixture
+ mock_inference_api.openai_chat_completion.return_value = fake_stream()
+
+ # Execute
+ result = await openai_responses_impl.create_openai_response(
+ input=input_text,
+ model=model,
+ temperature=0.1,
+ stream=True, # Enable streaming to test content part events
+ )
+
+ # For streaming response, collect all chunks
+ chunks = [chunk async for chunk in result]
+
+ mock_inference_api.openai_chat_completion.assert_called_once_with(
+ OpenAIChatCompletionRequestWithExtraBody(
+ model=model,
+ messages=[OpenAIUserMessageParam(role="user", content="What is the capital of Ireland?", name=None)],
+ response_format=None,
+ tools=None,
+ stream=True,
+ temperature=0.1,
+ stream_options={
+ "include_usage": True,
+ },
+ )
+ )
+
+ # Should have content part events for text streaming
+ # Expected: response.created, response.in_progress, content_part.added, output_text.delta, content_part.done, response.completed
+ assert len(chunks) >= 5
+ assert chunks[0].type == "response.created"
+ assert any(chunk.type == "response.in_progress" for chunk in chunks)
+
+ # Check for content part events
+ content_part_added_events = [c for c in chunks if c.type == "response.content_part.added"]
+ content_part_done_events = [c for c in chunks if c.type == "response.content_part.done"]
+ text_delta_events = [c for c in chunks if c.type == "response.output_text.delta"]
+
+ assert len(content_part_added_events) >= 1, "Should have content_part.added event for text"
+ assert len(content_part_done_events) >= 1, "Should have content_part.done event for text"
+ assert len(text_delta_events) >= 1, "Should have text delta events"
+
+ added_event = content_part_added_events[0]
+ done_event = content_part_done_events[0]
+ assert added_event.content_index == 0
+ assert done_event.content_index == 0
+ assert added_event.output_index == done_event.output_index == 0
+ assert added_event.item_id == done_event.item_id
+ assert added_event.response_id == done_event.response_id
+
+ # Verify final event is completion
+ assert chunks[-1].type == "response.completed"
+
+ # When streaming, the final response is in the last chunk
+ final_response = chunks[-1].response
+ assert final_response.model == model
+ assert len(final_response.output) == 1
+ assert isinstance(final_response.output[0], OpenAIResponseMessage)
+ assert final_response.output[0].id == added_event.item_id
+ assert final_response.id == added_event.response_id
+
+ openai_responses_impl.responses_store.store_response_object.assert_called_once()
+ assert final_response.output[0].content[0].text == "Dublin"
+
+
+async def test_create_openai_response_with_string_input_with_tools(openai_responses_impl, mock_inference_api):
+ """Test creating an OpenAI response with a simple string input and tools."""
+ # Setup
+ input_text = "What is the capital of Ireland?"
+ model = "meta-llama/Llama-3.1-8B-Instruct"
+
+ openai_responses_impl.tool_groups_api.get_tool.return_value = ToolDef(
+ name="web_search",
+ toolgroup_id="web_search",
+ description="Search the web for information",
+ input_schema={
+ "type": "object",
+ "properties": {"query": {"type": "string", "description": "The query to search for"}},
+ "required": ["query"],
+ },
+ )
+
+ openai_responses_impl.tool_runtime_api.invoke_tool.return_value = ToolInvocationResult(
+ status="completed",
+ content="Dublin",
+ )
+
+ # Execute
+ for tool_name in WebSearchToolTypes:
+ # Reset mock states as we loop through each tool type
+ mock_inference_api.openai_chat_completion.side_effect = [
+ fake_stream("tool_call_completion.yaml"),
+ fake_stream(),
+ ]
+ openai_responses_impl.tool_groups_api.get_tool.reset_mock()
+ openai_responses_impl.tool_runtime_api.invoke_tool.reset_mock()
+ openai_responses_impl.responses_store.store_response_object.reset_mock()
+
+ result = await openai_responses_impl.create_openai_response(
+ input=input_text,
+ model=model,
+ temperature=0.1,
+ tools=[
+ OpenAIResponseInputToolWebSearch(
+ name=tool_name,
+ )
+ ],
+ )
+
+ # Verify
+ first_call = mock_inference_api.openai_chat_completion.call_args_list[0]
+ first_params = first_call.args[0]
+ assert first_params.messages[0].content == "What is the capital of Ireland?"
+ assert first_params.tools is not None
+ assert first_params.temperature == 0.1
+
+ second_call = mock_inference_api.openai_chat_completion.call_args_list[1]
+ second_params = second_call.args[0]
+ assert second_params.messages[-1].content == "Dublin"
+ assert second_params.temperature == 0.1
+
+ openai_responses_impl.tool_groups_api.get_tool.assert_called_once_with("web_search")
+ openai_responses_impl.tool_runtime_api.invoke_tool.assert_called_once_with(
+ tool_name="web_search",
+ kwargs={"query": "What is the capital of Ireland?"},
+ )
+
+ openai_responses_impl.responses_store.store_response_object.assert_called_once()
+
+ # Check that we got the content from our mocked tool execution result
+ assert len(result.output) >= 1
+ assert isinstance(result.output[1], OpenAIResponseMessage)
+ assert result.output[1].content[0].text == "Dublin"
+ assert result.output[1].content[0].annotations == []
+
+
+async def test_create_openai_response_with_tool_call_type_none(openai_responses_impl, mock_inference_api):
+ """Test creating an OpenAI response with a tool call response that has a type of None."""
+ # Setup
+ input_text = "How hot it is in San Francisco today?"
+ model = "meta-llama/Llama-3.1-8B-Instruct"
+
+ async def fake_stream_toolcall():
+ yield ChatCompletionChunk(
+ id="123",
+ choices=[
+ Choice(
+ index=0,
+ delta=ChoiceDelta(
+ tool_calls=[
+ ChoiceDeltaToolCall(
+ index=0,
+ id="tc_123",
+ function=ChoiceDeltaToolCallFunction(name="get_weather", arguments="{}"),
+ type=None,
+ )
+ ]
+ ),
+ ),
+ ],
+ created=1,
+ model=model,
+ object="chat.completion.chunk",
+ )
+
+ mock_inference_api.openai_chat_completion.return_value = fake_stream_toolcall()
+
+ # Execute
+ result = await openai_responses_impl.create_openai_response(
+ input=input_text,
+ model=model,
+ stream=True,
+ temperature=0.1,
+ tools=[
+ OpenAIResponseInputToolFunction(
+ name="get_weather",
+ description="Get current temperature for a given location.",
+ parameters={
+ "location": "string",
+ },
+ )
+ ],
+ )
+
+ # Check that we got the content from our mocked tool execution result
+ chunks = [chunk async for chunk in result]
+
+ # Verify event types
+ # Should have: response.created, response.in_progress, output_item.added,
+ # function_call_arguments.delta, function_call_arguments.done, output_item.done, response.completed
+ assert len(chunks) == 7
+
+ event_types = [chunk.type for chunk in chunks]
+ assert event_types == [
+ "response.created",
+ "response.in_progress",
+ "response.output_item.added",
+ "response.function_call_arguments.delta",
+ "response.function_call_arguments.done",
+ "response.output_item.done",
+ "response.completed",
+ ]
+
+ # Verify inference API was called correctly (after iterating over result)
+ first_call = mock_inference_api.openai_chat_completion.call_args_list[0]
+ first_params = first_call.args[0]
+ assert first_params.messages[0].content == input_text
+ assert first_params.tools is not None
+ assert first_params.temperature == 0.1
+
+ # Check response.created event (should have empty output)
+ assert len(chunks[0].response.output) == 0
+
+ # Check response.completed event (should have the tool call)
+ completed_chunk = chunks[-1]
+ assert completed_chunk.type == "response.completed"
+ assert len(completed_chunk.response.output) == 1
+ assert completed_chunk.response.output[0].type == "function_call"
+ assert completed_chunk.response.output[0].name == "get_weather"
+
+
+async def test_create_openai_response_with_tool_call_function_arguments_none(openai_responses_impl, mock_inference_api):
+ """Test creating an OpenAI response with tool calls that omit arguments."""
+
+ input_text = "What is the time right now?"
+ model = "meta-llama/Llama-3.1-8B-Instruct"
+
+ async def fake_stream_toolcall():
+ yield ChatCompletionChunk(
+ id="123",
+ choices=[
+ Choice(
+ index=0,
+ delta=ChoiceDelta(
+ tool_calls=[
+ ChoiceDeltaToolCall(
+ index=0,
+ id="tc_123",
+ function=ChoiceDeltaToolCallFunction(name="get_current_time", arguments=None),
+ type=None,
+ )
+ ]
+ ),
+ ),
+ ],
+ created=1,
+ model=model,
+ object="chat.completion.chunk",
+ )
+
+ def assert_common_expectations(chunks) -> None:
+ first_call = mock_inference_api.openai_chat_completion.call_args_list[0]
+ first_params = first_call.args[0]
+ assert first_params.messages[0].content == input_text
+ assert first_params.tools is not None
+ assert first_params.temperature == 0.1
+ assert len(chunks[0].response.output) == 0
+ completed_chunk = chunks[-1]
+ assert completed_chunk.type == "response.completed"
+ assert len(completed_chunk.response.output) == 1
+ assert completed_chunk.response.output[0].type == "function_call"
+ assert completed_chunk.response.output[0].name == "get_current_time"
+ assert completed_chunk.response.output[0].arguments == "{}"
+
+ # Function does not accept arguments
+ mock_inference_api.openai_chat_completion.return_value = fake_stream_toolcall()
+ result = await openai_responses_impl.create_openai_response(
+ input=input_text,
+ model=model,
+ stream=True,
+ temperature=0.1,
+ tools=[
+ OpenAIResponseInputToolFunction(
+ name="get_current_time", description="Get current time for system's timezone", parameters={}
+ )
+ ],
+ )
+ chunks = [chunk async for chunk in result]
+ assert [chunk.type for chunk in chunks] == [
+ "response.created",
+ "response.in_progress",
+ "response.output_item.added",
+ "response.function_call_arguments.done",
+ "response.output_item.done",
+ "response.completed",
+ ]
+ assert_common_expectations(chunks)
+
+ # Function accepts optional arguments
+ mock_inference_api.openai_chat_completion.return_value = fake_stream_toolcall()
+ result = await openai_responses_impl.create_openai_response(
+ input=input_text,
+ model=model,
+ stream=True,
+ temperature=0.1,
+ tools=[
+ OpenAIResponseInputToolFunction(
+ name="get_current_time",
+ description="Get current time for system's timezone",
+ parameters={"timezone": "string"},
+ )
+ ],
+ )
+ chunks = [chunk async for chunk in result]
+ assert [chunk.type for chunk in chunks] == [
+ "response.created",
+ "response.in_progress",
+ "response.output_item.added",
+ "response.function_call_arguments.done",
+ "response.output_item.done",
+ "response.completed",
+ ]
+ assert_common_expectations(chunks)
+
+ # Function accepts optional arguments with additional optional fields
+ mock_inference_api.openai_chat_completion.return_value = fake_stream_toolcall()
+ result = await openai_responses_impl.create_openai_response(
+ input=input_text,
+ model=model,
+ stream=True,
+ temperature=0.1,
+ tools=[
+ OpenAIResponseInputToolFunction(
+ name="get_current_time",
+ description="Get current time for system's timezone",
+ parameters={"timezone": "string", "location": "string"},
+ )
+ ],
+ )
+ chunks = [chunk async for chunk in result]
+ assert [chunk.type for chunk in chunks] == [
+ "response.created",
+ "response.in_progress",
+ "response.output_item.added",
+ "response.function_call_arguments.done",
+ "response.output_item.done",
+ "response.completed",
+ ]
+ assert_common_expectations(chunks)
+ mock_inference_api.openai_chat_completion.return_value = fake_stream_toolcall()
+
+
+async def test_create_openai_response_with_multiple_messages(openai_responses_impl, mock_inference_api, mock_files_api):
+ """Test creating an OpenAI response with multiple messages."""
+ # Setup
+ input_messages = [
+ OpenAIResponseMessage(role="developer", content="You are a helpful assistant", name=None),
+ OpenAIResponseMessage(role="user", content="Name some towns in Ireland", name=None),
+ OpenAIResponseMessage(
+ role="assistant",
+ content=[
+ OpenAIResponseInputMessageContentText(text="Galway, Longford, Sligo"),
+ OpenAIResponseInputMessageContentText(text="Dublin"),
+ ],
+ name=None,
+ ),
+ OpenAIResponseMessage(role="user", content="Which is the largest town in Ireland?", name=None),
+ ]
+ model = "meta-llama/Llama-3.1-8B-Instruct"
+
+ mock_inference_api.openai_chat_completion.return_value = fake_stream()
+
+ # Execute
+ await openai_responses_impl.create_openai_response(
+ input=input_messages,
+ model=model,
+ temperature=0.1,
+ )
+
+ # Verify the the correct messages were sent to the inference API i.e.
+ # All of the responses message were convered to the chat completion message objects
+ call_args = mock_inference_api.openai_chat_completion.call_args_list[0]
+ params = call_args.args[0]
+ inference_messages = params.messages
+ for i, m in enumerate(input_messages):
+ if isinstance(m.content, str):
+ assert inference_messages[i].content == m.content
+ else:
+ assert inference_messages[i].content[0].text == m.content[0].text
+ assert isinstance(inference_messages[i].content[0], OpenAIChatCompletionContentPartTextParam)
+ assert inference_messages[i].role == m.role
+ if m.role == "user":
+ assert isinstance(inference_messages[i], OpenAIUserMessageParam)
+ elif m.role == "assistant":
+ assert isinstance(inference_messages[i], OpenAIAssistantMessageParam)
+ else:
+ assert isinstance(inference_messages[i], OpenAIDeveloperMessageParam)
+
+
+async def test_prepend_previous_response_basic(openai_responses_impl, mock_responses_store):
+ """Test prepending a basic previous response to a new response."""
+
+ input_item_message = OpenAIResponseMessage(
+ id="123",
+ content=[OpenAIResponseInputMessageContentText(text="fake_previous_input")],
+ role="user",
+ )
+ response_output_message = OpenAIResponseMessage(
+ id="123",
+ content=[OpenAIResponseOutputMessageContentOutputText(text="fake_response")],
+ status="completed",
+ role="assistant",
+ )
+ previous_response = _OpenAIResponseObjectWithInputAndMessages(
+ created_at=1,
+ id="resp_123",
+ model="fake_model",
+ output=[response_output_message],
+ status="completed",
+ text=OpenAIResponseText(format=OpenAIResponseTextFormat(type="text")),
+ input=[input_item_message],
+ messages=[OpenAIUserMessageParam(content="fake_previous_input")],
+ )
+ mock_responses_store.get_response_object.return_value = previous_response
+
+ input = await openai_responses_impl._prepend_previous_response("fake_input", previous_response)
+
+ assert len(input) == 3
+ # Check for previous input
+ assert isinstance(input[0], OpenAIResponseMessage)
+ assert input[0].content[0].text == "fake_previous_input"
+ # Check for previous output
+ assert isinstance(input[1], OpenAIResponseMessage)
+ assert input[1].content[0].text == "fake_response"
+ # Check for new input
+ assert isinstance(input[2], OpenAIResponseMessage)
+ assert input[2].content == "fake_input"
+
+
+async def test_prepend_previous_response_web_search(openai_responses_impl, mock_responses_store):
+ """Test prepending a web search previous response to a new response."""
+ input_item_message = OpenAIResponseMessage(
+ id="123",
+ content=[OpenAIResponseInputMessageContentText(text="fake_previous_input")],
+ role="user",
+ )
+ output_web_search = OpenAIResponseOutputMessageWebSearchToolCall(
+ id="ws_123",
+ status="completed",
+ )
+ output_message = OpenAIResponseMessage(
+ id="123",
+ content=[OpenAIResponseOutputMessageContentOutputText(text="fake_web_search_response")],
+ status="completed",
+ role="assistant",
+ )
+ response = _OpenAIResponseObjectWithInputAndMessages(
+ created_at=1,
+ id="resp_123",
+ model="fake_model",
+ output=[output_web_search, output_message],
+ status="completed",
+ text=OpenAIResponseText(format=OpenAIResponseTextFormat(type="text")),
+ input=[input_item_message],
+ messages=[OpenAIUserMessageParam(content="test input")],
+ )
+ mock_responses_store.get_response_object.return_value = response
+
+ input_messages = [OpenAIResponseMessage(content="fake_input", role="user")]
+ input = await openai_responses_impl._prepend_previous_response(input_messages, response)
+
+ assert len(input) == 4
+ # Check for previous input
+ assert isinstance(input[0], OpenAIResponseMessage)
+ assert input[0].content[0].text == "fake_previous_input"
+ # Check for previous output web search tool call
+ assert isinstance(input[1], OpenAIResponseOutputMessageWebSearchToolCall)
+ # Check for previous output web search response
+ assert isinstance(input[2], OpenAIResponseMessage)
+ assert input[2].content[0].text == "fake_web_search_response"
+ # Check for new input
+ assert isinstance(input[3], OpenAIResponseMessage)
+ assert input[3].content == "fake_input"
+
+
+async def test_prepend_previous_response_mcp_tool_call(openai_responses_impl, mock_responses_store):
+ """Test prepending a previous response which included an mcp tool call to a new response."""
+ input_item_message = OpenAIResponseMessage(
+ id="123",
+ content=[OpenAIResponseInputMessageContentText(text="fake_previous_input")],
+ role="user",
+ )
+ output_tool_call = OpenAIResponseOutputMessageMCPCall(
+ id="ws_123",
+ name="fake-tool",
+ arguments="fake-arguments",
+ server_label="fake-label",
+ )
+ output_message = OpenAIResponseMessage(
+ id="123",
+ content=[OpenAIResponseOutputMessageContentOutputText(text="fake_tool_call_response")],
+ status="completed",
+ role="assistant",
+ )
+ response = _OpenAIResponseObjectWithInputAndMessages(
+ created_at=1,
+ id="resp_123",
+ model="fake_model",
+ output=[output_tool_call, output_message],
+ status="completed",
+ text=OpenAIResponseText(format=OpenAIResponseTextFormat(type="text")),
+ input=[input_item_message],
+ messages=[OpenAIUserMessageParam(content="test input")],
+ )
+ mock_responses_store.get_response_object.return_value = response
+
+ input_messages = [OpenAIResponseMessage(content="fake_input", role="user")]
+ input = await openai_responses_impl._prepend_previous_response(input_messages, response)
+
+ assert len(input) == 4
+ # Check for previous input
+ assert isinstance(input[0], OpenAIResponseMessage)
+ assert input[0].content[0].text == "fake_previous_input"
+ # Check for previous output MCP tool call
+ assert isinstance(input[1], OpenAIResponseOutputMessageMCPCall)
+ # Check for previous output web search response
+ assert isinstance(input[2], OpenAIResponseMessage)
+ assert input[2].content[0].text == "fake_tool_call_response"
+ # Check for new input
+ assert isinstance(input[3], OpenAIResponseMessage)
+ assert input[3].content == "fake_input"
+
+
+async def test_create_openai_response_with_instructions(openai_responses_impl, mock_inference_api):
+ # Setup
+ input_text = "What is the capital of Ireland?"
+ model = "meta-llama/Llama-3.1-8B-Instruct"
+ instructions = "You are a geography expert. Provide concise answers."
+
+ mock_inference_api.openai_chat_completion.return_value = fake_stream()
+
+ # Execute
+ await openai_responses_impl.create_openai_response(
+ input=input_text,
+ model=model,
+ instructions=instructions,
+ )
+
+ # Verify
+ mock_inference_api.openai_chat_completion.assert_called_once()
+ call_args = mock_inference_api.openai_chat_completion.call_args
+ params = call_args.args[0]
+ sent_messages = params.messages
+
+ # Check that instructions were prepended as a system message
+ assert len(sent_messages) == 2
+ assert sent_messages[0].role == "system"
+ assert sent_messages[0].content == instructions
+ assert sent_messages[1].role == "user"
+ assert sent_messages[1].content == input_text
+
+
+async def test_create_openai_response_with_instructions_and_multiple_messages(
+ openai_responses_impl, mock_inference_api, mock_files_api
+):
+ # Setup
+ input_messages = [
+ OpenAIResponseMessage(role="user", content="Name some towns in Ireland", name=None),
+ OpenAIResponseMessage(
+ role="assistant",
+ content="Galway, Longford, Sligo",
+ name=None,
+ ),
+ OpenAIResponseMessage(role="user", content="Which is the largest?", name=None),
+ ]
+ model = "meta-llama/Llama-3.1-8B-Instruct"
+ instructions = "You are a geography expert. Provide concise answers."
+
+ mock_inference_api.openai_chat_completion.return_value = fake_stream()
+
+ # Execute
+ await openai_responses_impl.create_openai_response(
+ input=input_messages,
+ model=model,
+ instructions=instructions,
+ )
+
+ # Verify
+ mock_inference_api.openai_chat_completion.assert_called_once()
+ call_args = mock_inference_api.openai_chat_completion.call_args
+ params = call_args.args[0]
+ sent_messages = params.messages
+
+ # Check that instructions were prepended as a system message
+ assert len(sent_messages) == 4 # 1 system + 3 input messages
+ assert sent_messages[0].role == "system"
+ assert sent_messages[0].content == instructions
+
+ # Check the rest of the messages were converted correctly
+ assert sent_messages[1].role == "user"
+ assert sent_messages[1].content == "Name some towns in Ireland"
+ assert sent_messages[2].role == "assistant"
+ assert sent_messages[2].content == "Galway, Longford, Sligo"
+ assert sent_messages[3].role == "user"
+ assert sent_messages[3].content == "Which is the largest?"
+
+
+async def test_create_openai_response_with_instructions_and_previous_response(
+ openai_responses_impl, mock_responses_store, mock_inference_api
+):
+ """Test prepending both instructions and previous response."""
+
+ input_item_message = OpenAIResponseMessage(
+ id="123",
+ content="Name some towns in Ireland",
+ role="user",
+ )
+ response_output_message = OpenAIResponseMessage(
+ id="123",
+ content="Galway, Longford, Sligo",
+ status="completed",
+ role="assistant",
+ )
+ response = _OpenAIResponseObjectWithInputAndMessages(
+ created_at=1,
+ id="resp_123",
+ model="fake_model",
+ output=[response_output_message],
+ status="completed",
+ text=OpenAIResponseText(format=OpenAIResponseTextFormat(type="text")),
+ input=[input_item_message],
+ messages=[
+ OpenAIUserMessageParam(content="Name some towns in Ireland"),
+ OpenAIAssistantMessageParam(content="Galway, Longford, Sligo"),
+ ],
+ )
+ mock_responses_store.get_response_object.return_value = response
+
+ model = "meta-llama/Llama-3.1-8B-Instruct"
+ instructions = "You are a geography expert. Provide concise answers."
+
+ mock_inference_api.openai_chat_completion.return_value = fake_stream()
+
+ # Execute
+ await openai_responses_impl.create_openai_response(
+ input="Which is the largest?", model=model, instructions=instructions, previous_response_id="123"
+ )
+
+ # Verify
+ mock_inference_api.openai_chat_completion.assert_called_once()
+ call_args = mock_inference_api.openai_chat_completion.call_args
+ params = call_args.args[0]
+ sent_messages = params.messages
+
+ # Check that instructions were prepended as a system message
+ assert len(sent_messages) == 4, sent_messages
+ assert sent_messages[0].role == "system"
+ assert sent_messages[0].content == instructions
+
+ # Check the rest of the messages were converted correctly
+ assert sent_messages[1].role == "user"
+ assert sent_messages[1].content == "Name some towns in Ireland"
+ assert sent_messages[2].role == "assistant"
+ assert sent_messages[2].content == "Galway, Longford, Sligo"
+ assert sent_messages[3].role == "user"
+ assert sent_messages[3].content == "Which is the largest?"
+
+
+async def test_create_openai_response_with_previous_response_instructions(
+ openai_responses_impl, mock_responses_store, mock_inference_api
+):
+ """Test prepending instructions and previous response with instructions."""
+
+ input_item_message = OpenAIResponseMessage(
+ id="123",
+ content="Name some towns in Ireland",
+ role="user",
+ )
+ response_output_message = OpenAIResponseMessage(
+ id="123",
+ content="Galway, Longford, Sligo",
+ status="completed",
+ role="assistant",
+ )
+ response = _OpenAIResponseObjectWithInputAndMessages(
+ created_at=1,
+ id="resp_123",
+ model="fake_model",
+ output=[response_output_message],
+ status="completed",
+ text=OpenAIResponseText(format=OpenAIResponseTextFormat(type="text")),
+ input=[input_item_message],
+ messages=[
+ OpenAIUserMessageParam(content="Name some towns in Ireland"),
+ OpenAIAssistantMessageParam(content="Galway, Longford, Sligo"),
+ ],
+ instructions="You are a helpful assistant.",
+ )
+ mock_responses_store.get_response_object.return_value = response
+
+ model = "meta-llama/Llama-3.1-8B-Instruct"
+ instructions = "You are a geography expert. Provide concise answers."
+
+ mock_inference_api.openai_chat_completion.return_value = fake_stream()
+
+ # Execute
+ await openai_responses_impl.create_openai_response(
+ input="Which is the largest?", model=model, instructions=instructions, previous_response_id="123"
+ )
+
+ # Verify
+ mock_inference_api.openai_chat_completion.assert_called_once()
+ call_args = mock_inference_api.openai_chat_completion.call_args
+ params = call_args.args[0]
+ sent_messages = params.messages
+
+ # Check that instructions were prepended as a system message
+ # and that the previous response instructions were not carried over
+ assert len(sent_messages) == 4, sent_messages
+ assert sent_messages[0].role == "system"
+ assert sent_messages[0].content == instructions
+
+ # Check the rest of the messages were converted correctly
+ assert sent_messages[1].role == "user"
+ assert sent_messages[1].content == "Name some towns in Ireland"
+ assert sent_messages[2].role == "assistant"
+ assert sent_messages[2].content == "Galway, Longford, Sligo"
+ assert sent_messages[3].role == "user"
+ assert sent_messages[3].content == "Which is the largest?"
+
+
+async def test_list_openai_response_input_items_delegation(openai_responses_impl, mock_responses_store):
+ """Test that list_openai_response_input_items properly delegates to responses_store with correct parameters."""
+ # Setup
+ response_id = "resp_123"
+ after = "msg_after"
+ before = "msg_before"
+ include = ["metadata"]
+ limit = 5
+ order = Order.asc
+
+ input_message = OpenAIResponseMessage(
+ id="msg_123",
+ content="Test message",
+ role="user",
+ )
+
+ expected_result = ListOpenAIResponseInputItem(data=[input_message])
+ mock_responses_store.list_response_input_items.return_value = expected_result
+
+ # Execute with all parameters to test delegation
+ result = await openai_responses_impl.list_openai_response_input_items(
+ response_id, after=after, before=before, include=include, limit=limit, order=order
+ )
+
+ # Verify all parameters are passed through correctly to the store
+ mock_responses_store.list_response_input_items.assert_called_once_with(
+ response_id, after, before, include, limit, order
+ )
+
+ # Verify the result is returned as-is from the store
+ assert result.object == "list"
+ assert len(result.data) == 1
+ assert result.data[0].id == "msg_123"
+
+
+async def test_responses_store_list_input_items_logic():
+ """Test ResponsesStore list_response_input_items logic - mocks get_response_object to test actual ordering/limiting."""
+
+ # Create mock store and response store
+ mock_sql_store = AsyncMock()
+ backend_name = "sql_responses_test"
+ register_sqlstore_backends({backend_name: SqliteSqlStoreConfig(db_path="mock_db_path")})
+ responses_store = ResponsesStore(
+ ResponsesStoreReference(backend=backend_name, table_name="responses"), policy=default_policy()
+ )
+ responses_store.sql_store = mock_sql_store
+
+ # Setup test data - multiple input items
+ input_items = [
+ OpenAIResponseMessage(id="msg_1", content="First message", role="user"),
+ OpenAIResponseMessage(id="msg_2", content="Second message", role="user"),
+ OpenAIResponseMessage(id="msg_3", content="Third message", role="user"),
+ OpenAIResponseMessage(id="msg_4", content="Fourth message", role="user"),
+ ]
+
+ response_with_input = _OpenAIResponseObjectWithInputAndMessages(
+ id="resp_123",
+ model="test_model",
+ created_at=1234567890,
+ object="response",
+ status="completed",
+ output=[],
+ text=OpenAIResponseText(format=(OpenAIResponseTextFormat(type="text"))),
+ input=input_items,
+ messages=[OpenAIUserMessageParam(content="First message")],
+ )
+
+ # Mock the get_response_object method to return our test data
+ mock_sql_store.fetch_one.return_value = {"response_object": response_with_input.model_dump()}
+
+ # Test 1: Default behavior (no limit, desc order)
+ result = await responses_store.list_response_input_items("resp_123")
+ assert result.object == "list"
+ assert len(result.data) == 4
+ # Should be reversed for desc order
+ assert result.data[0].id == "msg_4"
+ assert result.data[1].id == "msg_3"
+ assert result.data[2].id == "msg_2"
+ assert result.data[3].id == "msg_1"
+
+ # Test 2: With limit=2, desc order
+ result = await responses_store.list_response_input_items("resp_123", limit=2, order=Order.desc)
+ assert result.object == "list"
+ assert len(result.data) == 2
+ # Should be first 2 items in desc order
+ assert result.data[0].id == "msg_4"
+ assert result.data[1].id == "msg_3"
+
+ # Test 3: With limit=2, asc order
+ result = await responses_store.list_response_input_items("resp_123", limit=2, order=Order.asc)
+ assert result.object == "list"
+ assert len(result.data) == 2
+ # Should be first 2 items in original order (asc)
+ assert result.data[0].id == "msg_1"
+ assert result.data[1].id == "msg_2"
+
+ # Test 4: Asc order without limit
+ result = await responses_store.list_response_input_items("resp_123", order=Order.asc)
+ assert result.object == "list"
+ assert len(result.data) == 4
+ # Should be in original order (asc)
+ assert result.data[0].id == "msg_1"
+ assert result.data[1].id == "msg_2"
+ assert result.data[2].id == "msg_3"
+ assert result.data[3].id == "msg_4"
+
+ # Test 5: Large limit (larger than available items)
+ result = await responses_store.list_response_input_items("resp_123", limit=10, order=Order.desc)
+ assert result.object == "list"
+ assert len(result.data) == 4 # Should return all available items
+ assert result.data[0].id == "msg_4"
+
+ # Test 6: Zero limit edge case
+ result = await responses_store.list_response_input_items("resp_123", limit=0, order=Order.asc)
+ assert result.object == "list"
+ assert len(result.data) == 0 # Should return no items
+
+
+async def test_store_response_uses_rehydrated_input_with_previous_response(
+ openai_responses_impl, mock_responses_store, mock_inference_api
+):
+ """Test that _store_response uses the full re-hydrated input (including previous responses)
+ rather than just the original input when previous_response_id is provided."""
+
+ # Setup - Create a previous response that should be included in the stored input
+ previous_response = _OpenAIResponseObjectWithInputAndMessages(
+ id="resp-previous-123",
+ object="response",
+ created_at=1234567890,
+ model="meta-llama/Llama-3.1-8B-Instruct",
+ status="completed",
+ text=OpenAIResponseText(format=OpenAIResponseTextFormat(type="text")),
+ input=[
+ OpenAIResponseMessage(
+ id="msg-prev-user", role="user", content=[OpenAIResponseInputMessageContentText(text="What is 2+2?")]
+ )
+ ],
+ output=[
+ OpenAIResponseMessage(
+ id="msg-prev-assistant",
+ role="assistant",
+ content=[OpenAIResponseOutputMessageContentOutputText(text="2+2 equals 4.")],
+ )
+ ],
+ messages=[
+ OpenAIUserMessageParam(content="What is 2+2?"),
+ OpenAIAssistantMessageParam(content="2+2 equals 4."),
+ ],
+ )
+
+ mock_responses_store.get_response_object.return_value = previous_response
+
+ current_input = "Now what is 3+3?"
+ model = "meta-llama/Llama-3.1-8B-Instruct"
+
+ mock_inference_api.openai_chat_completion.return_value = fake_stream()
+
+ # Execute - Create response with previous_response_id
+ result = await openai_responses_impl.create_openai_response(
+ input=current_input,
+ model=model,
+ previous_response_id="resp-previous-123",
+ store=True,
+ )
+
+ store_call_args = mock_responses_store.store_response_object.call_args
+ stored_input = store_call_args.kwargs["input"]
+
+ # Verify that the stored input contains the full re-hydrated conversation:
+ # 1. Previous user message
+ # 2. Previous assistant response
+ # 3. Current user message
+ assert len(stored_input) == 3
+
+ assert stored_input[0].role == "user"
+ assert stored_input[0].content[0].text == "What is 2+2?"
+
+ assert stored_input[1].role == "assistant"
+ assert stored_input[1].content[0].text == "2+2 equals 4."
+
+ assert stored_input[2].role == "user"
+ assert stored_input[2].content == "Now what is 3+3?"
+
+ # Verify the response itself is correct
+ assert result.model == model
+ assert result.status == "completed"
+
+
+@patch("llama_stack.providers.utils.tools.mcp.list_mcp_tools")
+async def test_reuse_mcp_tool_list(
+ mock_list_mcp_tools, openai_responses_impl, mock_responses_store, mock_inference_api
+):
+ """Test that mcp_list_tools can be reused where appropriate."""
+
+ mock_inference_api.openai_chat_completion.return_value = fake_stream()
+ mock_list_mcp_tools.return_value = ListToolDefsResponse(
+ data=[ToolDef(name="test_tool", description="a test tool", input_schema={}, output_schema={})]
+ )
+
+ res1 = await openai_responses_impl.create_openai_response(
+ input="What is 2+2?",
+ model="meta-llama/Llama-3.1-8B-Instruct",
+ store=True,
+ tools=[
+ OpenAIResponseInputToolFunction(name="fake", parameters=None),
+ OpenAIResponseInputToolMCP(server_label="alabel", server_url="aurl"),
+ ],
+ )
+ args = mock_responses_store.store_response_object.call_args
+ data = args.kwargs["response_object"].model_dump()
+ data["input"] = [input_item.model_dump() for input_item in args.kwargs["input"]]
+ data["messages"] = [msg.model_dump() for msg in args.kwargs["messages"]]
+ stored = _OpenAIResponseObjectWithInputAndMessages(**data)
+ mock_responses_store.get_response_object.return_value = stored
+
+ res2 = await openai_responses_impl.create_openai_response(
+ previous_response_id=res1.id,
+ input="Now what is 3+3?",
+ model="meta-llama/Llama-3.1-8B-Instruct",
+ store=True,
+ tools=[
+ OpenAIResponseInputToolMCP(server_label="alabel", server_url="aurl"),
+ ],
+ )
+ assert len(mock_inference_api.openai_chat_completion.call_args_list) == 2
+ second_call = mock_inference_api.openai_chat_completion.call_args_list[1]
+ second_params = second_call.args[0]
+ tools_seen = second_params.tools
+ assert len(tools_seen) == 1
+ assert tools_seen[0]["function"]["name"] == "test_tool"
+ assert tools_seen[0]["function"]["description"] == "a test tool"
+
+ assert mock_list_mcp_tools.call_count == 1
+ listings = [obj for obj in res2.output if obj.type == "mcp_list_tools"]
+ assert len(listings) == 1
+ assert listings[0].server_label == "alabel"
+ assert len(listings[0].tools) == 1
+ assert listings[0].tools[0].name == "test_tool"
+
+
+@pytest.mark.parametrize(
+ "text_format, response_format",
+ [
+ (OpenAIResponseText(format=OpenAIResponseTextFormat(type="text")), None),
+ (
+ OpenAIResponseText(format=OpenAIResponseTextFormat(name="Test", schema={"foo": "bar"}, type="json_schema")),
+ OpenAIResponseFormatJSONSchema(json_schema=OpenAIJSONSchema(name="Test", schema={"foo": "bar"})),
+ ),
+ (OpenAIResponseText(format=OpenAIResponseTextFormat(type="json_object")), OpenAIResponseFormatJSONObject()),
+ # ensure text param with no format specified defaults to None
+ (OpenAIResponseText(format=None), None),
+ # ensure text param of None defaults to None
+ (None, None),
+ ],
+)
+async def test_create_openai_response_with_text_format(
+ openai_responses_impl, mock_inference_api, text_format, response_format
+):
+ """Test creating Responses with text formats."""
+ # Setup
+ input_text = "How hot it is in San Francisco today?"
+ model = "meta-llama/Llama-3.1-8B-Instruct"
+
+ mock_inference_api.openai_chat_completion.return_value = fake_stream()
+
+ # Execute
+ _result = await openai_responses_impl.create_openai_response(
+ input=input_text,
+ model=model,
+ text=text_format,
+ )
+
+ # Verify
+ first_call = mock_inference_api.openai_chat_completion.call_args_list[0]
+ first_params = first_call.args[0]
+ assert first_params.messages[0].content == input_text
+ assert first_params.response_format == response_format
+
+
+async def test_create_openai_response_with_invalid_text_format(openai_responses_impl, mock_inference_api):
+ """Test creating an OpenAI response with an invalid text format."""
+ # Setup
+ input_text = "How hot it is in San Francisco today?"
+ model = "meta-llama/Llama-3.1-8B-Instruct"
+
+ # Execute
+ with pytest.raises(ValueError):
+ _result = await openai_responses_impl.create_openai_response(
+ input=input_text,
+ model=model,
+ text=OpenAIResponseText(format={"type": "invalid"}),
+ )
+
+
+async def test_create_openai_response_with_output_types_as_input(
+ openai_responses_impl, mock_inference_api, mock_responses_store
+):
+ """Test that response outputs can be used as inputs in multi-turn conversations.
+
+ Before adding OpenAIResponseOutput types to OpenAIResponseInput,
+ creating a _OpenAIResponseObjectWithInputAndMessages with some output types
+ in the input field would fail with a Pydantic ValidationError.
+
+ This test simulates storing a response where the input contains output message
+ types (MCP calls, function calls), which happens in multi-turn conversations.
+ """
+ model = "meta-llama/Llama-3.1-8B-Instruct"
+
+ # Mock the inference response
+ mock_inference_api.openai_chat_completion.return_value = fake_stream()
+
+ # Create a response with store=True to trigger the storage path
+ result = await openai_responses_impl.create_openai_response(
+ input="What's the weather?",
+ model=model,
+ stream=True,
+ temperature=0.1,
+ store=True,
+ )
+
+ # Consume the stream
+ _ = [chunk async for chunk in result]
+
+ # Verify store was called
+ assert mock_responses_store.store_response_object.called
+
+ # Get the stored data
+ store_call_args = mock_responses_store.store_response_object.call_args
+ stored_response = store_call_args.kwargs["response_object"]
+
+ # Now simulate a multi-turn conversation where outputs become inputs
+ input_with_output_types = [
+ OpenAIResponseMessage(role="user", content="What's the weather?", name=None),
+ # These output types need to be valid OpenAIResponseInput
+ OpenAIResponseOutputMessageFunctionToolCall(
+ call_id="call_123",
+ name="get_weather",
+ arguments='{"city": "Tokyo"}',
+ type="function_call",
+ ),
+ OpenAIResponseOutputMessageMCPCall(
+ id="mcp_456",
+ type="mcp_call",
+ server_label="weather_server",
+ name="get_temperature",
+ arguments='{"location": "Tokyo"}',
+ output="25°C",
+ ),
+ ]
+
+ # This simulates storing a response in a multi-turn conversation
+ # where previous outputs are included in the input.
+ stored_with_outputs = _OpenAIResponseObjectWithInputAndMessages(
+ id=stored_response.id,
+ created_at=stored_response.created_at,
+ model=stored_response.model,
+ status=stored_response.status,
+ output=stored_response.output,
+ input=input_with_output_types, # This will trigger Pydantic validation
+ messages=None,
+ )
+
+ assert stored_with_outputs.input == input_with_output_types
+ assert len(stored_with_outputs.input) == 3
+
+
+async def test_create_openai_response_with_prompt(openai_responses_impl, mock_inference_api, mock_prompts_api):
+ """Test creating an OpenAI response with a prompt."""
+ input_text = "What is the capital of Ireland?"
+ model = "meta-llama/Llama-3.1-8B-Instruct"
+ prompt_id = "pmpt_1234567890abcdef1234567890abcdef1234567890abcdef"
+ prompt = Prompt(
+ prompt="You are a helpful {{ area_name }} assistant at {{ company_name }}. Always provide accurate information.",
+ prompt_id=prompt_id,
+ version=1,
+ variables=["area_name", "company_name"],
+ is_default=True,
+ )
+
+ openai_response_prompt = OpenAIResponsePrompt(
+ id=prompt_id,
+ version="1",
+ variables={
+ "area_name": OpenAIResponseInputMessageContentText(text="geography"),
+ "company_name": OpenAIResponseInputMessageContentText(text="Dummy Company"),
+ },
+ )
+
+ mock_prompts_api.get_prompt.return_value = prompt
+ mock_inference_api.openai_chat_completion.return_value = fake_stream()
+
+ result = await openai_responses_impl.create_openai_response(
+ input=input_text,
+ model=model,
+ prompt=openai_response_prompt,
+ )
+
+ mock_prompts_api.get_prompt.assert_called_with(prompt_id, 1)
+ mock_inference_api.openai_chat_completion.assert_called()
+ call_args = mock_inference_api.openai_chat_completion.call_args
+ sent_messages = call_args.args[0].messages
+ assert len(sent_messages) == 2
+
+ system_messages = [msg for msg in sent_messages if msg.role == "system"]
+ assert len(system_messages) == 1
+ assert (
+ system_messages[0].content
+ == "You are a helpful geography assistant at Dummy Company. Always provide accurate information."
+ )
+
+ user_messages = [msg for msg in sent_messages if msg.role == "user"]
+ assert len(user_messages) == 1
+ assert user_messages[0].content == input_text
+
+ assert result.model == model
+ assert result.status == "completed"
+ assert isinstance(result.prompt, OpenAIResponsePrompt)
+ assert result.prompt.id == prompt_id
+ assert result.prompt.variables == openai_response_prompt.variables
+ assert result.prompt.version == "1"
+
+
+async def test_prepend_prompt_successful_without_variables(openai_responses_impl, mock_prompts_api, mock_inference_api):
+ """Test prepend_prompt function without variables."""
+ input_text = "What is the capital of Ireland?"
+ model = "meta-llama/Llama-3.1-8B-Instruct"
+ prompt_id = "pmpt_1234567890abcdef1234567890abcdef1234567890abcdef"
+ prompt = Prompt(
+ prompt="You are a helpful assistant. Always provide accurate information.",
+ prompt_id=prompt_id,
+ version=1,
+ variables=[],
+ is_default=True,
+ )
+
+ openai_response_prompt = OpenAIResponsePrompt(id=prompt_id, version="1")
+
+ mock_prompts_api.get_prompt.return_value = prompt
+ mock_inference_api.openai_chat_completion.return_value = fake_stream()
+
+ await openai_responses_impl.create_openai_response(
+ input=input_text,
+ model=model,
+ prompt=openai_response_prompt,
+ )
+
+ mock_prompts_api.get_prompt.assert_called_with(prompt_id, 1)
+ mock_inference_api.openai_chat_completion.assert_called()
+ call_args = mock_inference_api.openai_chat_completion.call_args
+ sent_messages = call_args.args[0].messages
+ assert len(sent_messages) == 2
+ system_messages = [msg for msg in sent_messages if msg.role == "system"]
+ assert system_messages[0].content == "You are a helpful assistant. Always provide accurate information."
+
+
+async def test_prepend_prompt_invalid_variable(openai_responses_impl, mock_prompts_api):
+ """Test error handling in prepend_prompt function when prompt parameters contain invalid variables."""
+ prompt_id = "pmpt_1234567890abcdef1234567890abcdef1234567890abcdef"
+ prompt = Prompt(
+ prompt="You are a {{ role }} assistant.",
+ prompt_id=prompt_id,
+ version=1,
+ variables=["role"], # Only "role" is valid
+ is_default=True,
+ )
+
+ openai_response_prompt = OpenAIResponsePrompt(
+ id=prompt_id,
+ version="1",
+ variables={
+ "role": OpenAIResponseInputMessageContentText(text="helpful"),
+ "company": OpenAIResponseInputMessageContentText(
+ text="Dummy Company"
+ ), # company is not in prompt.variables
+ },
+ )
+
+ mock_prompts_api.get_prompt.return_value = prompt
+
+ # Initial messages
+ messages = [OpenAIUserMessageParam(content="Test prompt")]
+
+ # Execute - should raise ValueError for invalid variable
+ with pytest.raises(ValueError, match="Variable company not found in prompt"):
+ await openai_responses_impl._prepend_prompt(messages, openai_response_prompt)
+
+ # Verify
+ mock_prompts_api.get_prompt.assert_called_once_with(prompt_id, 1)
+
+
+async def test_prepend_prompt_not_found(openai_responses_impl, mock_prompts_api):
+ """Test prepend_prompt function when prompt is not found."""
+ prompt_id = "pmpt_nonexistent"
+ openai_response_prompt = OpenAIResponsePrompt(id=prompt_id, version="1")
+
+ mock_prompts_api.get_prompt.return_value = None # Prompt not found
+
+ # Initial messages
+ messages = [OpenAIUserMessageParam(content="Test prompt")]
+ initial_length = len(messages)
+
+ # Execute
+ result = await openai_responses_impl._prepend_prompt(messages, openai_response_prompt)
+
+ # Verify
+ mock_prompts_api.get_prompt.assert_called_once_with(prompt_id, 1)
+
+ # Should return None when prompt not found
+ assert result is None
+
+ # Messages should not be modified
+ assert len(messages) == initial_length
+ assert messages[0].content == "Test prompt"
+
+
+async def test_prepend_prompt_variable_substitution(openai_responses_impl, mock_prompts_api):
+ """Test complex variable substitution with multiple occurrences and special characters in prepend_prompt function."""
+ prompt_id = "pmpt_1234567890abcdef1234567890abcdef1234567890abcdef"
+
+ # Support all whitespace variations: {{name}}, {{ name }}, {{ name}}, {{name }}, etc.
+ prompt = Prompt(
+ prompt="Hello {{name}}! You are working at {{ company}}. Your role is {{role}} at {{company}}. Remember, {{ name }}, to be {{ tone }}.",
+ prompt_id=prompt_id,
+ version=1,
+ variables=["name", "company", "role", "tone"],
+ is_default=True,
+ )
+
+ openai_response_prompt = OpenAIResponsePrompt(
+ id=prompt_id,
+ version="1",
+ variables={
+ "name": OpenAIResponseInputMessageContentText(text="Alice"),
+ "company": OpenAIResponseInputMessageContentText(text="Dummy Company"),
+ "role": OpenAIResponseInputMessageContentText(text="AI Assistant"),
+ "tone": OpenAIResponseInputMessageContentText(text="professional"),
+ },
+ )
+
+ mock_prompts_api.get_prompt.return_value = prompt
+
+ # Initial messages
+ messages = [OpenAIUserMessageParam(content="Test")]
+
+ # Execute
+ await openai_responses_impl._prepend_prompt(messages, openai_response_prompt)
+
+ # Verify
+ assert len(messages) == 2
+ assert isinstance(messages[0], OpenAISystemMessageParam)
+ expected_content = "Hello Alice! You are working at Dummy Company. Your role is AI Assistant at Dummy Company. Remember, Alice, to be professional."
+ assert messages[0].content == expected_content
+
+
+async def test_prepend_prompt_with_image_variable(openai_responses_impl, mock_prompts_api, mock_files_api):
+ """Test prepend_prompt with image variable - should create placeholder in system message and append image as separate user message."""
+ prompt_id = "pmpt_1234567890abcdef1234567890abcdef1234567890abcdef"
+ prompt = Prompt(
+ prompt="Analyze this {{product_image}} and describe what you see.",
+ prompt_id=prompt_id,
+ version=1,
+ variables=["product_image"],
+ is_default=True,
+ )
+
+ # Mock file content and file metadata
+ mock_file_content = b"fake_image_data"
+ mock_files_api.openai_retrieve_file_content.return_value = type("obj", (object,), {"body": mock_file_content})()
+ mock_files_api.openai_retrieve_file.return_value = OpenAIFileObject(
+ object="file",
+ id="file-abc123",
+ bytes=len(mock_file_content),
+ created_at=1234567890,
+ expires_at=1234567890,
+ filename="product.jpg",
+ purpose="assistants",
+ )
+
+ openai_response_prompt = OpenAIResponsePrompt(
+ id=prompt_id,
+ version="1",
+ variables={
+ "product_image": OpenAIResponseInputMessageContentImage(
+ file_id="file-abc123",
+ detail="high",
+ )
+ },
+ )
+
+ mock_prompts_api.get_prompt.return_value = prompt
+
+ # Initial messages
+ messages = [OpenAIUserMessageParam(content="What do you think?")]
+
+ # Execute
+ await openai_responses_impl._prepend_prompt(messages, openai_response_prompt)
+
+ assert len(messages) == 3
+
+ # Check system message has placeholder
+ assert isinstance(messages[0], OpenAISystemMessageParam)
+ assert messages[0].content == "Analyze this [Image: product_image] and describe what you see."
+
+ # Check original user message is still there
+ assert isinstance(messages[1], OpenAIUserMessageParam)
+ assert messages[1].content == "What do you think?"
+
+ # Check new user message with image is appended
+ assert isinstance(messages[2], OpenAIUserMessageParam)
+ assert isinstance(messages[2].content, list)
+ assert len(messages[2].content) == 1
+
+ # Should be image with data URL
+ assert isinstance(messages[2].content[0], OpenAIChatCompletionContentPartImageParam)
+ assert messages[2].content[0].image_url.url.startswith("data:image/")
+ assert messages[2].content[0].image_url.detail == "high"
+
+
+async def test_prepend_prompt_with_file_variable(openai_responses_impl, mock_prompts_api, mock_files_api):
+ """Test prepend_prompt with file variable - should create placeholder in system message and append file as separate user message."""
+ prompt_id = "pmpt_1234567890abcdef1234567890abcdef1234567890abcdef"
+ prompt = Prompt(
+ prompt="Review the document {{contract_file}} and summarize key points.",
+ prompt_id=prompt_id,
+ version=1,
+ variables=["contract_file"],
+ is_default=True,
+ )
+
+ # Mock file retrieval
+ mock_file_content = b"fake_pdf_content"
+ mock_files_api.openai_retrieve_file_content.return_value = type("obj", (object,), {"body": mock_file_content})()
+ mock_files_api.openai_retrieve_file.return_value = OpenAIFileObject(
+ object="file",
+ id="file-contract-789",
+ bytes=len(mock_file_content),
+ created_at=1234567890,
+ expires_at=1234567890,
+ filename="contract.pdf",
+ purpose="assistants",
+ )
+
+ openai_response_prompt = OpenAIResponsePrompt(
+ id=prompt_id,
+ version="1",
+ variables={
+ "contract_file": OpenAIResponseInputMessageContentFile(
+ file_id="file-contract-789",
+ filename="contract.pdf",
+ )
+ },
+ )
+
+ mock_prompts_api.get_prompt.return_value = prompt
+
+ # Initial messages
+ messages = [OpenAIUserMessageParam(content="Please review this.")]
+
+ # Execute
+ await openai_responses_impl._prepend_prompt(messages, openai_response_prompt)
+
+ assert len(messages) == 3
+
+ # Check system message has placeholder
+ assert isinstance(messages[0], OpenAISystemMessageParam)
+ assert messages[0].content == "Review the document [File: contract_file] and summarize key points."
+
+ # Check original user message is still there
+ assert isinstance(messages[1], OpenAIUserMessageParam)
+ assert messages[1].content == "Please review this."
+
+ # Check new user message with file is appended
+ assert isinstance(messages[2], OpenAIUserMessageParam)
+ assert isinstance(messages[2].content, list)
+ assert len(messages[2].content) == 1
+
+ # First part should be file with data URL
+ assert isinstance(messages[2].content[0], OpenAIFile)
+ assert messages[2].content[0].file.file_data.startswith("data:application/pdf;base64,")
+ assert messages[2].content[0].file.filename == "contract.pdf"
+ assert messages[2].content[0].file.file_id is None
+
+
+async def test_prepend_prompt_with_mixed_variables(openai_responses_impl, mock_prompts_api, mock_files_api):
+ """Test prepend_prompt with text, image, and file variables mixed together."""
+ prompt_id = "pmpt_1234567890abcdef1234567890abcdef1234567890abcdef"
+ prompt = Prompt(
+ prompt="Hello {{name}}! Analyze {{photo}} and review {{document}}. Provide insights for {{company}}.",
+ prompt_id=prompt_id,
+ version=1,
+ variables=["name", "photo", "document", "company"],
+ is_default=True,
+ )
+
+ # Mock file retrieval for image and file
+ mock_image_content = b"fake_image_data"
+ mock_file_content = b"fake_doc_content"
+
+ async def mock_retrieve_file_content(file_id):
+ if file_id == "file-photo-123":
+ return type("obj", (object,), {"body": mock_image_content})()
+ elif file_id == "file-doc-456":
+ return type("obj", (object,), {"body": mock_file_content})()
+
+ mock_files_api.openai_retrieve_file_content.side_effect = mock_retrieve_file_content
+
+ def mock_retrieve_file(file_id):
+ if file_id == "file-photo-123":
+ return OpenAIFileObject(
+ object="file",
+ id="file-photo-123",
+ bytes=len(mock_image_content),
+ created_at=1234567890,
+ expires_at=1234567890,
+ filename="photo.jpg",
+ purpose="assistants",
+ )
+ elif file_id == "file-doc-456":
+ return OpenAIFileObject(
+ object="file",
+ id="file-doc-456",
+ bytes=len(mock_file_content),
+ created_at=1234567890,
+ expires_at=1234567890,
+ filename="doc.pdf",
+ purpose="assistants",
+ )
+
+ mock_files_api.openai_retrieve_file.side_effect = mock_retrieve_file
+
+ openai_response_prompt = OpenAIResponsePrompt(
+ id=prompt_id,
+ version="1",
+ variables={
+ "name": OpenAIResponseInputMessageContentText(text="Alice"),
+ "photo": OpenAIResponseInputMessageContentImage(file_id="file-photo-123", detail="auto"),
+ "document": OpenAIResponseInputMessageContentFile(file_id="file-doc-456", filename="doc.pdf"),
+ "company": OpenAIResponseInputMessageContentText(text="Acme Corp"),
+ },
+ )
+
+ mock_prompts_api.get_prompt.return_value = prompt
+
+ # Initial messages
+ messages = [OpenAIUserMessageParam(content="Here's my question.")]
+
+ # Execute
+ await openai_responses_impl._prepend_prompt(messages, openai_response_prompt)
+
+ assert len(messages) == 3
+
+ # Check system message has text and placeholders
+ assert isinstance(messages[0], OpenAISystemMessageParam)
+ expected_system = "Hello Alice! Analyze [Image: photo] and review [File: document]. Provide insights for Acme Corp."
+ assert messages[0].content == expected_system
+
+ # Check original user message is still there
+ assert isinstance(messages[1], OpenAIUserMessageParam)
+ assert messages[1].content == "Here's my question."
+
+ # Check new user message with media is appended (2 media items)
+ assert isinstance(messages[2], OpenAIUserMessageParam)
+ assert isinstance(messages[2].content, list)
+ assert len(messages[2].content) == 2
+
+ # First part should be image with data URL
+ assert isinstance(messages[2].content[0], OpenAIChatCompletionContentPartImageParam)
+ assert messages[2].content[0].image_url.url.startswith("data:image/")
+
+ # Second part should be file with data URL
+ assert isinstance(messages[2].content[1], OpenAIFile)
+ assert messages[2].content[1].file.file_data.startswith("data:application/pdf;base64,")
+ assert messages[2].content[1].file.filename == "doc.pdf"
+ assert messages[2].content[1].file.file_id is None
+
+
+async def test_prepend_prompt_with_image_using_image_url(openai_responses_impl, mock_prompts_api):
+ """Test prepend_prompt with image variable using image_url instead of file_id."""
+ prompt_id = "pmpt_1234567890abcdef1234567890abcdef1234567890abcdef"
+ prompt = Prompt(
+ prompt="Describe {{screenshot}}.",
+ prompt_id=prompt_id,
+ version=1,
+ variables=["screenshot"],
+ is_default=True,
+ )
+
+ openai_response_prompt = OpenAIResponsePrompt(
+ id=prompt_id,
+ version="1",
+ variables={
+ "screenshot": OpenAIResponseInputMessageContentImage(
+ image_url="https://example.com/screenshot.png",
+ detail="low",
+ )
+ },
+ )
+
+ mock_prompts_api.get_prompt.return_value = prompt
+
+ # Initial messages
+ messages = [OpenAIUserMessageParam(content="What is this?")]
+
+ # Execute
+ await openai_responses_impl._prepend_prompt(messages, openai_response_prompt)
+
+ assert len(messages) == 3
+
+ # Check system message has placeholder
+ assert isinstance(messages[0], OpenAISystemMessageParam)
+ assert messages[0].content == "Describe [Image: screenshot]."
+
+ # Check original user message is still there
+ assert isinstance(messages[1], OpenAIUserMessageParam)
+ assert messages[1].content == "What is this?"
+
+ # Check new user message with image is appended
+ assert isinstance(messages[2], OpenAIUserMessageParam)
+ assert isinstance(messages[2].content, list)
+
+ # Image should use the provided URL
+ assert isinstance(messages[2].content[0], OpenAIChatCompletionContentPartImageParam)
+ assert messages[2].content[0].image_url.url == "https://example.com/screenshot.png"
+ assert messages[2].content[0].image_url.detail == "low"
+
+
+async def test_prepend_prompt_image_variable_missing_required_fields(openai_responses_impl, mock_prompts_api):
+ """Test prepend_prompt with image variable that has neither file_id nor image_url - should raise error."""
+ prompt_id = "pmpt_1234567890abcdef1234567890abcdef1234567890abcdef"
+ prompt = Prompt(
+ prompt="Analyze {{bad_image}}.",
+ prompt_id=prompt_id,
+ version=1,
+ variables=["bad_image"],
+ is_default=True,
+ )
+
+ # Create image content with neither file_id nor image_url
+ openai_response_prompt = OpenAIResponsePrompt(
+ id=prompt_id,
+ version="1",
+ variables={"bad_image": OpenAIResponseInputMessageContentImage()}, # No file_id or image_url
+ )
+
+ mock_prompts_api.get_prompt.return_value = prompt
+ messages = [OpenAIUserMessageParam(content="Test")]
+
+ # Execute - should raise ValueError
+ with pytest.raises(ValueError, match="Image content must have either 'image_url' or 'file_id'"):
+ await openai_responses_impl._prepend_prompt(messages, openai_response_prompt)
diff --git a/tests/unit/providers/agents/meta_reference/test_openai_responses_conversations.py b/tests/unit/providers/agents/meta_reference/test_openai_responses_conversations.py
new file mode 100644
index 000000000..5a3e6bf21
--- /dev/null
+++ b/tests/unit/providers/agents/meta_reference/test_openai_responses_conversations.py
@@ -0,0 +1,253 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+
+import pytest
+
+from llama_stack_api.common.errors import (
+ ConversationNotFoundError,
+ InvalidConversationIdError,
+)
+from llama_stack_api.conversations import (
+ ConversationItemList,
+)
+from llama_stack_api.openai_responses import (
+ OpenAIResponseMessage,
+ OpenAIResponseObject,
+ OpenAIResponseObjectStreamResponseCompleted,
+ OpenAIResponseObjectStreamResponseOutputItemDone,
+ OpenAIResponseOutputMessageContentOutputText,
+)
+
+# Import existing fixtures from the main responses test file
+pytest_plugins = ["tests.unit.providers.agents.meta_reference.test_openai_responses"]
+
+from llama_stack.providers.inline.agents.meta_reference.responses.openai_responses import (
+ OpenAIResponsesImpl,
+)
+
+
+@pytest.fixture
+def responses_impl_with_conversations(
+ mock_inference_api,
+ mock_tool_groups_api,
+ mock_tool_runtime_api,
+ mock_responses_store,
+ mock_vector_io_api,
+ mock_conversations_api,
+ mock_safety_api,
+ mock_prompts_api,
+ mock_files_api,
+):
+ """Create OpenAIResponsesImpl instance with conversations API."""
+ return OpenAIResponsesImpl(
+ inference_api=mock_inference_api,
+ tool_groups_api=mock_tool_groups_api,
+ tool_runtime_api=mock_tool_runtime_api,
+ responses_store=mock_responses_store,
+ vector_io_api=mock_vector_io_api,
+ conversations_api=mock_conversations_api,
+ safety_api=mock_safety_api,
+ prompts_api=mock_prompts_api,
+ files_api=mock_files_api,
+ )
+
+
+class TestConversationValidation:
+ """Test conversation ID validation logic."""
+
+ async def test_nonexistent_conversation_raises_error(
+ self, responses_impl_with_conversations, mock_conversations_api
+ ):
+ """Test that ConversationNotFoundError is raised for non-existent conversation."""
+ conv_id = "conv_nonexistent"
+
+ # Mock conversation not found
+ mock_conversations_api.list_items.side_effect = ConversationNotFoundError("conv_nonexistent")
+
+ with pytest.raises(ConversationNotFoundError):
+ await responses_impl_with_conversations.create_openai_response(
+ input="Hello", model="test-model", conversation=conv_id, stream=False
+ )
+
+
+class TestMessageSyncing:
+ """Test message syncing to conversations."""
+
+ async def test_sync_response_to_conversation_simple(
+ self, responses_impl_with_conversations, mock_conversations_api
+ ):
+ """Test syncing simple response to conversation."""
+ conv_id = "conv_test123"
+ input_text = "What are the 5 Ds of dodgeball?"
+
+ # Output items (what the model generated)
+ output_items = [
+ OpenAIResponseMessage(
+ id="msg_response",
+ content=[
+ OpenAIResponseOutputMessageContentOutputText(
+ text="The 5 Ds are: Dodge, Duck, Dip, Dive, and Dodge.", type="output_text", annotations=[]
+ )
+ ],
+ role="assistant",
+ status="completed",
+ type="message",
+ )
+ ]
+
+ await responses_impl_with_conversations._sync_response_to_conversation(conv_id, input_text, output_items)
+
+ # should call add_items with user input and assistant response
+ mock_conversations_api.add_items.assert_called_once()
+ call_args = mock_conversations_api.add_items.call_args
+
+ assert call_args[0][0] == conv_id # conversation_id
+ items = call_args[0][1] # conversation_items
+
+ assert len(items) == 2
+ # User message
+ assert items[0].type == "message"
+ assert items[0].role == "user"
+ assert items[0].content[0].type == "input_text"
+ assert items[0].content[0].text == input_text
+
+ # Assistant message
+ assert items[1].type == "message"
+ assert items[1].role == "assistant"
+
+ async def test_sync_response_to_conversation_api_error(
+ self, responses_impl_with_conversations, mock_conversations_api
+ ):
+ mock_conversations_api.add_items.side_effect = Exception("API Error")
+ output_items = []
+
+ # matching the behavior of OpenAI here
+ with pytest.raises(Exception, match="API Error"):
+ await responses_impl_with_conversations._sync_response_to_conversation(
+ "conv_test123", "Hello", output_items
+ )
+
+ async def test_sync_with_list_input(self, responses_impl_with_conversations, mock_conversations_api):
+ """Test syncing with list of input messages."""
+ conv_id = "conv_test123"
+ input_messages = [
+ OpenAIResponseMessage(role="user", content=[{"type": "input_text", "text": "First message"}]),
+ ]
+ output_items = [
+ OpenAIResponseMessage(
+ id="msg_response",
+ content=[OpenAIResponseOutputMessageContentOutputText(text="Response", type="output_text")],
+ role="assistant",
+ status="completed",
+ type="message",
+ )
+ ]
+
+ await responses_impl_with_conversations._sync_response_to_conversation(conv_id, input_messages, output_items)
+
+ mock_conversations_api.add_items.assert_called_once()
+ call_args = mock_conversations_api.add_items.call_args
+
+ items = call_args[0][1]
+ # Should have input message + output message
+ assert len(items) == 2
+
+
+class TestIntegrationWorkflow:
+ """Integration tests for the full conversation workflow."""
+
+ async def test_create_response_with_valid_conversation(
+ self, responses_impl_with_conversations, mock_conversations_api
+ ):
+ """Test creating a response with a valid conversation parameter."""
+ mock_conversations_api.list_items.return_value = ConversationItemList(
+ data=[], first_id=None, has_more=False, last_id=None, object="list"
+ )
+
+ async def mock_streaming_response(*args, **kwargs):
+ message_item = OpenAIResponseMessage(
+ id="msg_response",
+ content=[
+ OpenAIResponseOutputMessageContentOutputText(
+ text="Test response", type="output_text", annotations=[]
+ )
+ ],
+ role="assistant",
+ status="completed",
+ type="message",
+ )
+
+ # Emit output_item.done event first (needed for conversation sync)
+ yield OpenAIResponseObjectStreamResponseOutputItemDone(
+ response_id="resp_test123",
+ item=message_item,
+ output_index=0,
+ sequence_number=1,
+ type="response.output_item.done",
+ )
+
+ # Then emit response.completed
+ mock_response = OpenAIResponseObject(
+ id="resp_test123",
+ created_at=1234567890,
+ model="test-model",
+ object="response",
+ output=[message_item],
+ status="completed",
+ )
+
+ yield OpenAIResponseObjectStreamResponseCompleted(response=mock_response, type="response.completed")
+
+ responses_impl_with_conversations._create_streaming_response = mock_streaming_response
+
+ input_text = "Hello, how are you?"
+ conversation_id = "conv_test123"
+
+ response = await responses_impl_with_conversations.create_openai_response(
+ input=input_text, model="test-model", conversation=conversation_id, stream=False
+ )
+
+ assert response is not None
+ assert response.id == "resp_test123"
+
+ # Note: conversation sync happens inside _create_streaming_response,
+ # which we're mocking here, so we can't test it in this unit test.
+ # The sync logic is tested separately in TestMessageSyncing.
+
+ async def test_create_response_with_invalid_conversation_id(self, responses_impl_with_conversations):
+ """Test creating a response with an invalid conversation ID."""
+ with pytest.raises(InvalidConversationIdError) as exc_info:
+ await responses_impl_with_conversations.create_openai_response(
+ input="Hello", model="test-model", conversation="invalid_id", stream=False
+ )
+
+ assert "Expected an ID that begins with 'conv_'" in str(exc_info.value)
+
+ async def test_create_response_with_nonexistent_conversation(
+ self, responses_impl_with_conversations, mock_conversations_api
+ ):
+ """Test creating a response with a non-existent conversation."""
+ mock_conversations_api.list_items.side_effect = ConversationNotFoundError("conv_nonexistent")
+
+ with pytest.raises(ConversationNotFoundError) as exc_info:
+ await responses_impl_with_conversations.create_openai_response(
+ input="Hello", model="test-model", conversation="conv_nonexistent", stream=False
+ )
+
+ assert "not found" in str(exc_info.value)
+
+ async def test_conversation_and_previous_response_id(
+ self, responses_impl_with_conversations, mock_conversations_api, mock_responses_store
+ ):
+ with pytest.raises(ValueError) as exc_info:
+ await responses_impl_with_conversations.create_openai_response(
+ input="test", model="test", conversation="conv_123", previous_response_id="resp_123"
+ )
+
+ assert "Mutually exclusive parameters" in str(exc_info.value)
+ assert "previous_response_id" in str(exc_info.value)
+ assert "conversation" in str(exc_info.value)
diff --git a/tests/unit/providers/agents/meta_reference/test_response_conversion_utils.py b/tests/unit/providers/agents/meta_reference/test_response_conversion_utils.py
new file mode 100644
index 000000000..e496a96e3
--- /dev/null
+++ b/tests/unit/providers/agents/meta_reference/test_response_conversion_utils.py
@@ -0,0 +1,375 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+
+from unittest.mock import AsyncMock
+
+import pytest
+
+from llama_stack.providers.inline.agents.meta_reference.responses.utils import (
+ _extract_citations_from_text,
+ convert_chat_choice_to_response_message,
+ convert_response_content_to_chat_content,
+ convert_response_input_to_chat_messages,
+ convert_response_text_to_chat_response_format,
+ get_message_type_by_role,
+ is_function_tool_call,
+)
+from llama_stack_api.inference import (
+ OpenAIAssistantMessageParam,
+ OpenAIChatCompletionContentPartImageParam,
+ OpenAIChatCompletionContentPartTextParam,
+ OpenAIChatCompletionToolCall,
+ OpenAIChatCompletionToolCallFunction,
+ OpenAIChoice,
+ OpenAIDeveloperMessageParam,
+ OpenAIResponseFormatJSONObject,
+ OpenAIResponseFormatJSONSchema,
+ OpenAIResponseFormatText,
+ OpenAISystemMessageParam,
+ OpenAIToolMessageParam,
+ OpenAIUserMessageParam,
+)
+from llama_stack_api.openai_responses import (
+ OpenAIResponseAnnotationFileCitation,
+ OpenAIResponseInputFunctionToolCallOutput,
+ OpenAIResponseInputMessageContentImage,
+ OpenAIResponseInputMessageContentText,
+ OpenAIResponseInputToolFunction,
+ OpenAIResponseInputToolWebSearch,
+ OpenAIResponseMessage,
+ OpenAIResponseOutputMessageContentOutputText,
+ OpenAIResponseOutputMessageFunctionToolCall,
+ OpenAIResponseText,
+ OpenAIResponseTextFormat,
+)
+
+
+@pytest.fixture
+def mock_files_api():
+ """Mock files API for testing."""
+ return AsyncMock()
+
+
+class TestConvertChatChoiceToResponseMessage:
+ async def test_convert_string_content(self):
+ choice = OpenAIChoice(
+ message=OpenAIAssistantMessageParam(content="Test message"),
+ finish_reason="stop",
+ index=0,
+ )
+
+ result = await convert_chat_choice_to_response_message(choice)
+
+ assert result.role == "assistant"
+ assert result.status == "completed"
+ assert len(result.content) == 1
+ assert isinstance(result.content[0], OpenAIResponseOutputMessageContentOutputText)
+ assert result.content[0].text == "Test message"
+
+ async def test_convert_text_param_content(self):
+ choice = OpenAIChoice(
+ message=OpenAIAssistantMessageParam(
+ content=[OpenAIChatCompletionContentPartTextParam(text="Test text param")]
+ ),
+ finish_reason="stop",
+ index=0,
+ )
+
+ with pytest.raises(ValueError) as exc_info:
+ await convert_chat_choice_to_response_message(choice)
+
+ assert "does not yet support output content type" in str(exc_info.value)
+
+
+class TestConvertResponseContentToChatContent:
+ async def test_convert_string_content(self, mock_files_api):
+ result = await convert_response_content_to_chat_content("Simple string", mock_files_api)
+ assert result == "Simple string"
+
+ async def test_convert_text_content_parts(self, mock_files_api):
+ content = [
+ OpenAIResponseInputMessageContentText(text="First part"),
+ OpenAIResponseOutputMessageContentOutputText(text="Second part"),
+ ]
+
+ result = await convert_response_content_to_chat_content(content, mock_files_api)
+
+ assert len(result) == 2
+ assert isinstance(result[0], OpenAIChatCompletionContentPartTextParam)
+ assert result[0].text == "First part"
+ assert isinstance(result[1], OpenAIChatCompletionContentPartTextParam)
+ assert result[1].text == "Second part"
+
+ async def test_convert_image_content(self, mock_files_api):
+ content = [OpenAIResponseInputMessageContentImage(image_url="https://example.com/image.jpg", detail="high")]
+
+ result = await convert_response_content_to_chat_content(content, mock_files_api)
+
+ assert len(result) == 1
+ assert isinstance(result[0], OpenAIChatCompletionContentPartImageParam)
+ assert result[0].image_url.url == "https://example.com/image.jpg"
+ assert result[0].image_url.detail == "high"
+
+
+class TestConvertResponseInputToChatMessages:
+ async def test_convert_string_input(self):
+ result = await convert_response_input_to_chat_messages("User message")
+
+ assert len(result) == 1
+ assert isinstance(result[0], OpenAIUserMessageParam)
+ assert result[0].content == "User message"
+
+ async def test_convert_function_tool_call_output(self):
+ input_items = [
+ OpenAIResponseOutputMessageFunctionToolCall(
+ call_id="call_123",
+ name="test_function",
+ arguments='{"param": "value"}',
+ ),
+ OpenAIResponseInputFunctionToolCallOutput(
+ output="Tool output",
+ call_id="call_123",
+ ),
+ ]
+
+ result = await convert_response_input_to_chat_messages(input_items)
+
+ assert len(result) == 2
+ assert isinstance(result[0], OpenAIAssistantMessageParam)
+ assert result[0].tool_calls[0].id == "call_123"
+ assert result[0].tool_calls[0].function.name == "test_function"
+ assert result[0].tool_calls[0].function.arguments == '{"param": "value"}'
+ assert isinstance(result[1], OpenAIToolMessageParam)
+ assert result[1].content == "Tool output"
+ assert result[1].tool_call_id == "call_123"
+
+ async def test_convert_function_tool_call(self):
+ input_items = [
+ OpenAIResponseOutputMessageFunctionToolCall(
+ call_id="call_456",
+ name="test_function",
+ arguments='{"param": "value"}',
+ )
+ ]
+
+ result = await convert_response_input_to_chat_messages(input_items)
+
+ assert len(result) == 1
+ assert isinstance(result[0], OpenAIAssistantMessageParam)
+ assert len(result[0].tool_calls) == 1
+ assert result[0].tool_calls[0].id == "call_456"
+ assert result[0].tool_calls[0].function.name == "test_function"
+ assert result[0].tool_calls[0].function.arguments == '{"param": "value"}'
+
+ async def test_convert_function_call_ordering(self):
+ input_items = [
+ OpenAIResponseOutputMessageFunctionToolCall(
+ call_id="call_123",
+ name="test_function_a",
+ arguments='{"param": "value"}',
+ ),
+ OpenAIResponseOutputMessageFunctionToolCall(
+ call_id="call_456",
+ name="test_function_b",
+ arguments='{"param": "value"}',
+ ),
+ OpenAIResponseInputFunctionToolCallOutput(
+ output="AAA",
+ call_id="call_123",
+ ),
+ OpenAIResponseInputFunctionToolCallOutput(
+ output="BBB",
+ call_id="call_456",
+ ),
+ ]
+
+ result = await convert_response_input_to_chat_messages(input_items)
+ assert len(result) == 4
+ assert isinstance(result[0], OpenAIAssistantMessageParam)
+ assert len(result[0].tool_calls) == 1
+ assert result[0].tool_calls[0].id == "call_123"
+ assert result[0].tool_calls[0].function.name == "test_function_a"
+ assert result[0].tool_calls[0].function.arguments == '{"param": "value"}'
+ assert isinstance(result[1], OpenAIToolMessageParam)
+ assert result[1].content == "AAA"
+ assert result[1].tool_call_id == "call_123"
+ assert isinstance(result[2], OpenAIAssistantMessageParam)
+ assert len(result[2].tool_calls) == 1
+ assert result[2].tool_calls[0].id == "call_456"
+ assert result[2].tool_calls[0].function.name == "test_function_b"
+ assert result[2].tool_calls[0].function.arguments == '{"param": "value"}'
+ assert isinstance(result[3], OpenAIToolMessageParam)
+ assert result[3].content == "BBB"
+ assert result[3].tool_call_id == "call_456"
+
+ async def test_convert_response_message(self):
+ input_items = [
+ OpenAIResponseMessage(
+ role="user",
+ content=[OpenAIResponseInputMessageContentText(text="User text")],
+ )
+ ]
+
+ result = await convert_response_input_to_chat_messages(input_items)
+
+ assert len(result) == 1
+ assert isinstance(result[0], OpenAIUserMessageParam)
+ # Content should be converted to chat content format
+ assert len(result[0].content) == 1
+ assert result[0].content[0].text == "User text"
+
+
+class TestConvertResponseTextToChatResponseFormat:
+ async def test_convert_text_format(self):
+ text = OpenAIResponseText(format=OpenAIResponseTextFormat(type="text"))
+ result = await convert_response_text_to_chat_response_format(text)
+
+ assert isinstance(result, OpenAIResponseFormatText)
+ assert result.type == "text"
+
+ async def test_convert_json_object_format(self):
+ text = OpenAIResponseText(format={"type": "json_object"})
+ result = await convert_response_text_to_chat_response_format(text)
+
+ assert isinstance(result, OpenAIResponseFormatJSONObject)
+
+ async def test_convert_json_schema_format(self):
+ schema_def = {"type": "object", "properties": {"test": {"type": "string"}}}
+ text = OpenAIResponseText(
+ format={
+ "type": "json_schema",
+ "name": "test_schema",
+ "schema": schema_def,
+ }
+ )
+ result = await convert_response_text_to_chat_response_format(text)
+
+ assert isinstance(result, OpenAIResponseFormatJSONSchema)
+ assert result.json_schema["name"] == "test_schema"
+ assert result.json_schema["schema"] == schema_def
+
+ async def test_default_text_format(self):
+ text = OpenAIResponseText()
+ result = await convert_response_text_to_chat_response_format(text)
+
+ assert isinstance(result, OpenAIResponseFormatText)
+ assert result.type == "text"
+
+
+class TestGetMessageTypeByRole:
+ async def test_user_role(self):
+ result = await get_message_type_by_role("user")
+ assert result == OpenAIUserMessageParam
+
+ async def test_system_role(self):
+ result = await get_message_type_by_role("system")
+ assert result == OpenAISystemMessageParam
+
+ async def test_assistant_role(self):
+ result = await get_message_type_by_role("assistant")
+ assert result == OpenAIAssistantMessageParam
+
+ async def test_developer_role(self):
+ result = await get_message_type_by_role("developer")
+ assert result == OpenAIDeveloperMessageParam
+
+ async def test_unknown_role(self):
+ result = await get_message_type_by_role("unknown")
+ assert result is None
+
+
+class TestIsFunctionToolCall:
+ def test_is_function_tool_call_true(self):
+ tool_call = OpenAIChatCompletionToolCall(
+ index=0,
+ id="call_123",
+ function=OpenAIChatCompletionToolCallFunction(
+ name="test_function",
+ arguments="{}",
+ ),
+ )
+ tools = [
+ OpenAIResponseInputToolFunction(
+ type="function", name="test_function", parameters={"type": "object", "properties": {}}
+ ),
+ OpenAIResponseInputToolWebSearch(type="web_search"),
+ ]
+
+ result = is_function_tool_call(tool_call, tools)
+ assert result is True
+
+ def test_is_function_tool_call_false_different_name(self):
+ tool_call = OpenAIChatCompletionToolCall(
+ index=0,
+ id="call_123",
+ function=OpenAIChatCompletionToolCallFunction(
+ name="other_function",
+ arguments="{}",
+ ),
+ )
+ tools = [
+ OpenAIResponseInputToolFunction(
+ type="function", name="test_function", parameters={"type": "object", "properties": {}}
+ ),
+ ]
+
+ result = is_function_tool_call(tool_call, tools)
+ assert result is False
+
+ def test_is_function_tool_call_false_no_function(self):
+ tool_call = OpenAIChatCompletionToolCall(
+ index=0,
+ id="call_123",
+ function=None,
+ )
+ tools = [
+ OpenAIResponseInputToolFunction(
+ type="function", name="test_function", parameters={"type": "object", "properties": {}}
+ ),
+ ]
+
+ result = is_function_tool_call(tool_call, tools)
+ assert result is False
+
+ def test_is_function_tool_call_false_wrong_type(self):
+ tool_call = OpenAIChatCompletionToolCall(
+ index=0,
+ id="call_123",
+ function=OpenAIChatCompletionToolCallFunction(
+ name="web_search",
+ arguments="{}",
+ ),
+ )
+ tools = [
+ OpenAIResponseInputToolWebSearch(type="web_search"),
+ ]
+
+ result = is_function_tool_call(tool_call, tools)
+ assert result is False
+
+
+class TestExtractCitationsFromText:
+ def test_extract_citations_and_annotations(self):
+ text = "Start [not-a-file]. New source <|file-abc123|>. "
+ text += "Other source <|file-def456|>? Repeat source <|file-abc123|>! No citation."
+ file_mapping = {"file-abc123": "doc1.pdf", "file-def456": "doc2.txt"}
+
+ annotations, cleaned_text = _extract_citations_from_text(text, file_mapping)
+
+ expected_annotations = [
+ OpenAIResponseAnnotationFileCitation(file_id="file-abc123", filename="doc1.pdf", index=30),
+ OpenAIResponseAnnotationFileCitation(file_id="file-def456", filename="doc2.txt", index=44),
+ OpenAIResponseAnnotationFileCitation(file_id="file-abc123", filename="doc1.pdf", index=59),
+ ]
+ expected_clean_text = "Start [not-a-file]. New source. Other source? Repeat source! No citation."
+
+ assert cleaned_text == expected_clean_text
+ assert annotations == expected_annotations
+ # OpenAI cites at the end of the sentence
+ assert cleaned_text[expected_annotations[0].index] == "."
+ assert cleaned_text[expected_annotations[1].index] == "?"
+ assert cleaned_text[expected_annotations[2].index] == "!"
diff --git a/tests/unit/providers/agents/meta_reference/test_response_tool_context.py b/tests/unit/providers/agents/meta_reference/test_response_tool_context.py
new file mode 100644
index 000000000..4054debd5
--- /dev/null
+++ b/tests/unit/providers/agents/meta_reference/test_response_tool_context.py
@@ -0,0 +1,183 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+
+from llama_stack.providers.inline.agents.meta_reference.responses.types import ToolContext
+from llama_stack_api.openai_responses import (
+ MCPListToolsTool,
+ OpenAIResponseInputToolFileSearch,
+ OpenAIResponseInputToolFunction,
+ OpenAIResponseInputToolMCP,
+ OpenAIResponseInputToolWebSearch,
+ OpenAIResponseObject,
+ OpenAIResponseOutputMessageMCPListTools,
+ OpenAIResponseToolMCP,
+)
+
+
+class TestToolContext:
+ def test_no_tools(self):
+ tools = []
+ context = ToolContext(tools)
+ previous_response = OpenAIResponseObject(created_at=1234, id="test", model="mymodel", output=[], status="")
+ context.recover_tools_from_previous_response(previous_response)
+
+ assert len(context.tools_to_process) == 0
+ assert len(context.previous_tools) == 0
+ assert len(context.previous_tool_listings) == 0
+
+ def test_no_previous_tools(self):
+ tools = [
+ OpenAIResponseInputToolFileSearch(vector_store_ids=["fake"]),
+ OpenAIResponseInputToolMCP(server_label="label", server_url="url"),
+ ]
+ context = ToolContext(tools)
+ previous_response = OpenAIResponseObject(created_at=1234, id="test", model="mymodel", output=[], status="")
+ context.recover_tools_from_previous_response(previous_response)
+
+ assert len(context.tools_to_process) == 2
+ assert len(context.previous_tools) == 0
+ assert len(context.previous_tool_listings) == 0
+
+ def test_reusable_server(self):
+ tools = [
+ OpenAIResponseInputToolFileSearch(vector_store_ids=["fake"]),
+ OpenAIResponseInputToolMCP(server_label="alabel", server_url="aurl"),
+ ]
+ context = ToolContext(tools)
+ output = [
+ OpenAIResponseOutputMessageMCPListTools(
+ id="test", server_label="alabel", tools=[MCPListToolsTool(name="test_tool", input_schema={})]
+ )
+ ]
+ previous_response = OpenAIResponseObject(created_at=1234, id="test", model="fake", output=output, status="")
+ previous_response.tools = [
+ OpenAIResponseInputToolFileSearch(vector_store_ids=["fake"]),
+ OpenAIResponseToolMCP(server_label="alabel"),
+ ]
+ context.recover_tools_from_previous_response(previous_response)
+
+ assert len(context.tools_to_process) == 1
+ assert context.tools_to_process[0].type == "file_search"
+ assert len(context.previous_tools) == 1
+ assert context.previous_tools["test_tool"].server_label == "alabel"
+ assert context.previous_tools["test_tool"].server_url == "aurl"
+ assert len(context.previous_tool_listings) == 1
+ assert len(context.previous_tool_listings[0].tools) == 1
+ assert context.previous_tool_listings[0].server_label == "alabel"
+
+ def test_multiple_reusable_servers(self):
+ tools = [
+ OpenAIResponseInputToolFunction(name="fake", parameters=None),
+ OpenAIResponseInputToolMCP(server_label="anotherlabel", server_url="anotherurl"),
+ OpenAIResponseInputToolWebSearch(),
+ OpenAIResponseInputToolMCP(server_label="alabel", server_url="aurl"),
+ ]
+ context = ToolContext(tools)
+ output = [
+ OpenAIResponseOutputMessageMCPListTools(
+ id="test1", server_label="alabel", tools=[MCPListToolsTool(name="test_tool", input_schema={})]
+ ),
+ OpenAIResponseOutputMessageMCPListTools(
+ id="test2",
+ server_label="anotherlabel",
+ tools=[MCPListToolsTool(name="some_other_tool", input_schema={})],
+ ),
+ ]
+ previous_response = OpenAIResponseObject(created_at=1234, id="test", model="fake", output=output, status="")
+ previous_response.tools = [
+ OpenAIResponseInputToolFunction(name="fake", parameters=None),
+ OpenAIResponseToolMCP(server_label="anotherlabel", server_url="anotherurl"),
+ OpenAIResponseInputToolWebSearch(type="web_search"),
+ OpenAIResponseToolMCP(server_label="alabel", server_url="aurl"),
+ ]
+ context.recover_tools_from_previous_response(previous_response)
+
+ assert len(context.tools_to_process) == 2
+ assert context.tools_to_process[0].type == "function"
+ assert context.tools_to_process[1].type == "web_search"
+ assert len(context.previous_tools) == 2
+ assert context.previous_tools["test_tool"].server_label == "alabel"
+ assert context.previous_tools["test_tool"].server_url == "aurl"
+ assert context.previous_tools["some_other_tool"].server_label == "anotherlabel"
+ assert context.previous_tools["some_other_tool"].server_url == "anotherurl"
+ assert len(context.previous_tool_listings) == 2
+ assert len(context.previous_tool_listings[0].tools) == 1
+ assert context.previous_tool_listings[0].server_label == "alabel"
+ assert len(context.previous_tool_listings[1].tools) == 1
+ assert context.previous_tool_listings[1].server_label == "anotherlabel"
+
+ def test_multiple_servers_only_one_reusable(self):
+ tools = [
+ OpenAIResponseInputToolFunction(name="fake", parameters=None),
+ OpenAIResponseInputToolMCP(server_label="anotherlabel", server_url="anotherurl"),
+ OpenAIResponseInputToolWebSearch(type="web_search"),
+ OpenAIResponseInputToolMCP(server_label="alabel", server_url="aurl"),
+ ]
+ context = ToolContext(tools)
+ output = [
+ OpenAIResponseOutputMessageMCPListTools(
+ id="test2",
+ server_label="anotherlabel",
+ tools=[MCPListToolsTool(name="some_other_tool", input_schema={})],
+ )
+ ]
+ previous_response = OpenAIResponseObject(created_at=1234, id="test", model="fake", output=output, status="")
+ previous_response.tools = [
+ OpenAIResponseInputToolFunction(name="fake", parameters=None),
+ OpenAIResponseToolMCP(server_label="anotherlabel", server_url="anotherurl"),
+ OpenAIResponseInputToolWebSearch(type="web_search"),
+ ]
+ context.recover_tools_from_previous_response(previous_response)
+
+ assert len(context.tools_to_process) == 3
+ assert context.tools_to_process[0].type == "function"
+ assert context.tools_to_process[1].type == "web_search"
+ assert context.tools_to_process[2].type == "mcp"
+ assert len(context.previous_tools) == 1
+ assert context.previous_tools["some_other_tool"].server_label == "anotherlabel"
+ assert context.previous_tools["some_other_tool"].server_url == "anotherurl"
+ assert len(context.previous_tool_listings) == 1
+ assert len(context.previous_tool_listings[0].tools) == 1
+ assert context.previous_tool_listings[0].server_label == "anotherlabel"
+
+ def test_mismatched_allowed_tools(self):
+ tools = [
+ OpenAIResponseInputToolFunction(name="fake", parameters=None),
+ OpenAIResponseInputToolMCP(server_label="anotherlabel", server_url="anotherurl"),
+ OpenAIResponseInputToolWebSearch(type="web_search"),
+ OpenAIResponseInputToolMCP(server_label="alabel", server_url="aurl", allowed_tools=["test_tool_2"]),
+ ]
+ context = ToolContext(tools)
+ output = [
+ OpenAIResponseOutputMessageMCPListTools(
+ id="test1", server_label="alabel", tools=[MCPListToolsTool(name="test_tool_1", input_schema={})]
+ ),
+ OpenAIResponseOutputMessageMCPListTools(
+ id="test2",
+ server_label="anotherlabel",
+ tools=[MCPListToolsTool(name="some_other_tool", input_schema={})],
+ ),
+ ]
+ previous_response = OpenAIResponseObject(created_at=1234, id="test", model="fake", output=output, status="")
+ previous_response.tools = [
+ OpenAIResponseInputToolFunction(name="fake", parameters=None),
+ OpenAIResponseToolMCP(server_label="anotherlabel", server_url="anotherurl"),
+ OpenAIResponseInputToolWebSearch(type="web_search"),
+ OpenAIResponseToolMCP(server_label="alabel", server_url="aurl"),
+ ]
+ context.recover_tools_from_previous_response(previous_response)
+
+ assert len(context.tools_to_process) == 3
+ assert context.tools_to_process[0].type == "function"
+ assert context.tools_to_process[1].type == "web_search"
+ assert context.tools_to_process[2].type == "mcp"
+ assert len(context.previous_tools) == 1
+ assert context.previous_tools["some_other_tool"].server_label == "anotherlabel"
+ assert context.previous_tools["some_other_tool"].server_url == "anotherurl"
+ assert len(context.previous_tool_listings) == 1
+ assert len(context.previous_tool_listings[0].tools) == 1
+ assert context.previous_tool_listings[0].server_label == "anotherlabel"
diff --git a/tests/unit/providers/agents/meta_reference/test_responses_safety_utils.py b/tests/unit/providers/agents/meta_reference/test_responses_safety_utils.py
new file mode 100644
index 000000000..a914bbef4
--- /dev/null
+++ b/tests/unit/providers/agents/meta_reference/test_responses_safety_utils.py
@@ -0,0 +1,157 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from unittest.mock import AsyncMock
+
+import pytest
+
+from llama_stack.providers.inline.agents.meta_reference.responses.openai_responses import (
+ OpenAIResponsesImpl,
+)
+from llama_stack.providers.inline.agents.meta_reference.responses.utils import (
+ extract_guardrail_ids,
+ run_guardrails,
+)
+from llama_stack_api.agents import ResponseGuardrailSpec
+from llama_stack_api.safety import ModerationObject, ModerationObjectResults
+
+
+@pytest.fixture
+def mock_apis():
+ """Create mock APIs for testing."""
+ return {
+ "inference_api": AsyncMock(),
+ "tool_groups_api": AsyncMock(),
+ "tool_runtime_api": AsyncMock(),
+ "responses_store": AsyncMock(),
+ "vector_io_api": AsyncMock(),
+ "conversations_api": AsyncMock(),
+ "safety_api": AsyncMock(),
+ "prompts_api": AsyncMock(),
+ "files_api": AsyncMock(),
+ }
+
+
+@pytest.fixture
+def responses_impl(mock_apis):
+ """Create OpenAIResponsesImpl instance with mocked dependencies."""
+ return OpenAIResponsesImpl(**mock_apis)
+
+
+def test_extract_guardrail_ids_from_strings(responses_impl):
+ """Test extraction from simple string guardrail IDs."""
+ guardrails = ["llama-guard", "content-filter", "nsfw-detector"]
+ result = extract_guardrail_ids(guardrails)
+ assert result == ["llama-guard", "content-filter", "nsfw-detector"]
+
+
+def test_extract_guardrail_ids_from_objects(responses_impl):
+ """Test extraction from ResponseGuardrailSpec objects."""
+ guardrails = [
+ ResponseGuardrailSpec(type="llama-guard"),
+ ResponseGuardrailSpec(type="content-filter"),
+ ]
+ result = extract_guardrail_ids(guardrails)
+ assert result == ["llama-guard", "content-filter"]
+
+
+def test_extract_guardrail_ids_mixed_formats(responses_impl):
+ """Test extraction from mixed string and object formats."""
+ guardrails = [
+ "llama-guard",
+ ResponseGuardrailSpec(type="content-filter"),
+ "nsfw-detector",
+ ]
+ result = extract_guardrail_ids(guardrails)
+ assert result == ["llama-guard", "content-filter", "nsfw-detector"]
+
+
+def test_extract_guardrail_ids_none_input(responses_impl):
+ """Test extraction with None input."""
+ result = extract_guardrail_ids(None)
+ assert result == []
+
+
+def test_extract_guardrail_ids_empty_list(responses_impl):
+ """Test extraction with empty list."""
+ result = extract_guardrail_ids([])
+ assert result == []
+
+
+def test_extract_guardrail_ids_unknown_format(responses_impl):
+ """Test extraction with unknown guardrail format raises ValueError."""
+ # Create an object that's neither string nor ResponseGuardrailSpec
+ unknown_object = {"invalid": "format"} # Plain dict, not ResponseGuardrailSpec
+ guardrails = ["valid-guardrail", unknown_object, "another-guardrail"]
+ with pytest.raises(ValueError, match="Unknown guardrail format.*expected str or ResponseGuardrailSpec"):
+ extract_guardrail_ids(guardrails)
+
+
+@pytest.fixture
+def mock_safety_api():
+ """Create mock safety API for guardrails testing."""
+ safety_api = AsyncMock()
+ # Mock the routing table and shields list for guardrails lookup
+ safety_api.routing_table = AsyncMock()
+ shield = AsyncMock()
+ shield.identifier = "llama-guard"
+ shield.provider_resource_id = "llama-guard-model"
+ safety_api.routing_table.list_shields.return_value = AsyncMock(data=[shield])
+ return safety_api
+
+
+async def test_run_guardrails_no_violation(mock_safety_api):
+ """Test guardrails validation with no violations."""
+ text = "Hello world"
+ guardrail_ids = ["llama-guard"]
+
+ # Mock moderation to return non-flagged content
+ unflagged_result = ModerationObjectResults(flagged=False, categories={"violence": False})
+ mock_moderation_object = ModerationObject(id="test-mod-id", model="llama-guard-model", results=[unflagged_result])
+ mock_safety_api.run_moderation.return_value = mock_moderation_object
+
+ result = await run_guardrails(mock_safety_api, text, guardrail_ids)
+
+ assert result is None
+ # Verify run_moderation was called with the correct model
+ mock_safety_api.run_moderation.assert_called_once()
+ call_args = mock_safety_api.run_moderation.call_args
+ assert call_args[1]["model"] == "llama-guard-model"
+
+
+async def test_run_guardrails_with_violation(mock_safety_api):
+ """Test guardrails validation with safety violation."""
+ text = "Harmful content"
+ guardrail_ids = ["llama-guard"]
+
+ # Mock moderation to return flagged content
+ flagged_result = ModerationObjectResults(
+ flagged=True,
+ categories={"violence": True},
+ user_message="Content flagged by moderation",
+ metadata={"violation_type": ["S1"]},
+ )
+ mock_moderation_object = ModerationObject(id="test-mod-id", model="llama-guard-model", results=[flagged_result])
+ mock_safety_api.run_moderation.return_value = mock_moderation_object
+
+ result = await run_guardrails(mock_safety_api, text, guardrail_ids)
+
+ assert result == "Content flagged by moderation (flagged for: violence) (violation type: S1)"
+
+
+async def test_run_guardrails_empty_inputs(mock_safety_api):
+ """Test guardrails validation with empty inputs."""
+ # Test empty guardrail_ids
+ result = await run_guardrails(mock_safety_api, "test", [])
+ assert result is None
+
+ # Test empty text
+ result = await run_guardrails(mock_safety_api, "", ["llama-guard"])
+ assert result is None
+
+ # Test both empty
+ result = await run_guardrails(mock_safety_api, "", [])
+ assert result is None
diff --git a/tests/unit/providers/agents/meta_reference/test_safety_optional.py b/tests/unit/providers/agents/meta_reference/test_safety_optional.py
new file mode 100644
index 000000000..c2311b68f
--- /dev/null
+++ b/tests/unit/providers/agents/meta_reference/test_safety_optional.py
@@ -0,0 +1,214 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+"""Tests for making Safety API optional in meta-reference agents provider.
+
+This test suite validates the changes introduced to fix issue #4165, which
+allows running the meta-reference agents provider without the Safety API.
+Safety API is now an optional dependency, and errors are raised at request time
+when guardrails are explicitly requested without Safety API configured.
+"""
+
+from unittest.mock import AsyncMock, MagicMock, patch
+
+import pytest
+
+from llama_stack.core.datatypes import Api
+from llama_stack.core.storage.datatypes import KVStoreReference, ResponsesStoreReference
+from llama_stack.providers.inline.agents.meta_reference import get_provider_impl
+from llama_stack.providers.inline.agents.meta_reference.config import (
+ AgentPersistenceConfig,
+ MetaReferenceAgentsImplConfig,
+)
+from llama_stack.providers.inline.agents.meta_reference.responses.utils import (
+ run_guardrails,
+)
+
+
+@pytest.fixture
+def mock_persistence_config():
+ """Create a mock persistence configuration."""
+ return AgentPersistenceConfig(
+ agent_state=KVStoreReference(
+ backend="kv_default",
+ namespace="agents",
+ ),
+ responses=ResponsesStoreReference(
+ backend="sql_default",
+ table_name="responses",
+ ),
+ )
+
+
+@pytest.fixture
+def mock_deps():
+ """Create mock dependencies for the agents provider."""
+ # Create mock APIs
+ inference_api = AsyncMock()
+ vector_io_api = AsyncMock()
+ tool_runtime_api = AsyncMock()
+ tool_groups_api = AsyncMock()
+ conversations_api = AsyncMock()
+ prompts_api = AsyncMock()
+ files_api = AsyncMock()
+
+ return {
+ Api.inference: inference_api,
+ Api.vector_io: vector_io_api,
+ Api.tool_runtime: tool_runtime_api,
+ Api.tool_groups: tool_groups_api,
+ Api.conversations: conversations_api,
+ Api.prompts: prompts_api,
+ Api.files: files_api,
+ }
+
+
+class TestProviderInitialization:
+ """Test provider initialization with different safety API configurations."""
+
+ async def test_initialization_with_safety_api_present(self, mock_persistence_config, mock_deps):
+ """Test successful initialization when Safety API is configured."""
+ config = MetaReferenceAgentsImplConfig(persistence=mock_persistence_config)
+
+ # Add safety API to deps
+ safety_api = AsyncMock()
+ mock_deps[Api.safety] = safety_api
+
+ # Mock the initialize method to avoid actual initialization
+ with patch(
+ "llama_stack.providers.inline.agents.meta_reference.agents.MetaReferenceAgentsImpl.initialize",
+ new_callable=AsyncMock,
+ ):
+ # Should not raise any exception
+ provider = await get_provider_impl(config, mock_deps, policy=[], telemetry_enabled=False)
+ assert provider is not None
+
+ async def test_initialization_without_safety_api(self, mock_persistence_config, mock_deps):
+ """Test successful initialization when Safety API is not configured."""
+ config = MetaReferenceAgentsImplConfig(persistence=mock_persistence_config)
+
+ # Safety API is NOT in mock_deps - provider should still start
+ # Mock the initialize method to avoid actual initialization
+ with patch(
+ "llama_stack.providers.inline.agents.meta_reference.agents.MetaReferenceAgentsImpl.initialize",
+ new_callable=AsyncMock,
+ ):
+ # Should not raise any exception
+ provider = await get_provider_impl(config, mock_deps, policy=[], telemetry_enabled=False)
+ assert provider is not None
+ assert provider.safety_api is None
+
+
+class TestGuardrailsFunctionality:
+ """Test run_guardrails function with optional safety API."""
+
+ async def test_run_guardrails_with_none_safety_api(self):
+ """Test that run_guardrails returns None when safety_api is None."""
+ result = await run_guardrails(safety_api=None, messages="test message", guardrail_ids=["llama-guard"])
+ assert result is None
+
+ async def test_run_guardrails_with_empty_messages(self):
+ """Test that run_guardrails returns None for empty messages."""
+ # Test with None safety API
+ result = await run_guardrails(safety_api=None, messages="", guardrail_ids=["llama-guard"])
+ assert result is None
+
+ # Test with mock safety API
+ mock_safety_api = AsyncMock()
+ result = await run_guardrails(safety_api=mock_safety_api, messages="", guardrail_ids=["llama-guard"])
+ assert result is None
+
+ async def test_run_guardrails_with_none_safety_api_ignores_guardrails(self):
+ """Test that guardrails are skipped when safety_api is None, even if guardrail_ids are provided."""
+ # Should not raise exception, just return None
+ result = await run_guardrails(
+ safety_api=None,
+ messages="potentially harmful content",
+ guardrail_ids=["llama-guard", "content-filter"],
+ )
+ assert result is None
+
+ async def test_create_response_rejects_guardrails_without_safety_api(self, mock_persistence_config, mock_deps):
+ """Test that create_openai_response raises error when guardrails requested but Safety API unavailable."""
+ from llama_stack.providers.inline.agents.meta_reference.responses.openai_responses import (
+ OpenAIResponsesImpl,
+ )
+ from llama_stack_api import ResponseGuardrailSpec
+
+ # Create OpenAIResponsesImpl with no safety API
+ with patch("llama_stack.providers.inline.agents.meta_reference.responses.openai_responses.ResponsesStore"):
+ impl = OpenAIResponsesImpl(
+ inference_api=mock_deps[Api.inference],
+ tool_groups_api=mock_deps[Api.tool_groups],
+ tool_runtime_api=mock_deps[Api.tool_runtime],
+ responses_store=MagicMock(),
+ vector_io_api=mock_deps[Api.vector_io],
+ safety_api=None, # No Safety API
+ conversations_api=mock_deps[Api.conversations],
+ prompts_api=mock_deps[Api.prompts],
+ files_api=mock_deps[Api.files],
+ )
+
+ # Test with string guardrail
+ with pytest.raises(ValueError) as exc_info:
+ await impl.create_openai_response(
+ input="test input",
+ model="test-model",
+ guardrails=["llama-guard"],
+ )
+ assert "Cannot process guardrails: Safety API is not configured" in str(exc_info.value)
+
+ # Test with ResponseGuardrailSpec
+ with pytest.raises(ValueError) as exc_info:
+ await impl.create_openai_response(
+ input="test input",
+ model="test-model",
+ guardrails=[ResponseGuardrailSpec(type="llama-guard")],
+ )
+ assert "Cannot process guardrails: Safety API is not configured" in str(exc_info.value)
+
+ async def test_create_response_succeeds_without_guardrails_and_no_safety_api(
+ self, mock_persistence_config, mock_deps
+ ):
+ """Test that create_openai_response works when no guardrails requested and Safety API unavailable."""
+ from llama_stack.providers.inline.agents.meta_reference.responses.openai_responses import (
+ OpenAIResponsesImpl,
+ )
+
+ # Create OpenAIResponsesImpl with no safety API
+ with (
+ patch("llama_stack.providers.inline.agents.meta_reference.responses.openai_responses.ResponsesStore"),
+ patch.object(OpenAIResponsesImpl, "_create_streaming_response", new_callable=AsyncMock) as mock_stream,
+ ):
+ # Mock the streaming response to return a simple async generator
+ async def mock_generator():
+ yield MagicMock()
+
+ mock_stream.return_value = mock_generator()
+
+ impl = OpenAIResponsesImpl(
+ inference_api=mock_deps[Api.inference],
+ tool_groups_api=mock_deps[Api.tool_groups],
+ tool_runtime_api=mock_deps[Api.tool_runtime],
+ responses_store=MagicMock(),
+ vector_io_api=mock_deps[Api.vector_io],
+ safety_api=None, # No Safety API
+ conversations_api=mock_deps[Api.conversations],
+ prompts_api=mock_deps[Api.prompts],
+ files_api=mock_deps[Api.files],
+ )
+
+ # Should not raise when no guardrails requested
+ # Note: This will still fail later in execution due to mocking, but should pass the validation
+ try:
+ await impl.create_openai_response(
+ input="test input",
+ model="test-model",
+ guardrails=None, # No guardrails
+ )
+ except Exception as e:
+ # Ensure the error is NOT about missing Safety API
+ assert "Cannot process guardrails: Safety API is not configured" not in str(e)
diff --git a/tests/unit/providers/batches/conftest.py b/tests/unit/providers/batches/conftest.py
index d161bf976..8ecfa99fb 100644
--- a/tests/unit/providers/batches/conftest.py
+++ b/tests/unit/providers/batches/conftest.py
@@ -13,9 +13,9 @@ from unittest.mock import AsyncMock
import pytest
from llama_stack.core.storage.datatypes import KVStoreReference, SqliteKVStoreConfig
+from llama_stack.core.storage.kvstore import kvstore_impl, register_kvstore_backends
from llama_stack.providers.inline.batches.reference.batches import ReferenceBatchesImpl
from llama_stack.providers.inline.batches.reference.config import ReferenceBatchesImplConfig
-from llama_stack.providers.utils.kvstore import kvstore_impl, register_kvstore_backends
@pytest.fixture
diff --git a/tests/unit/providers/batches/test_reference.py b/tests/unit/providers/batches/test_reference.py
index 89cb1af9d..32d59234d 100644
--- a/tests/unit/providers/batches/test_reference.py
+++ b/tests/unit/providers/batches/test_reference.py
@@ -59,8 +59,7 @@ from unittest.mock import AsyncMock, MagicMock
import pytest
-from llama_stack.apis.batches import BatchObject
-from llama_stack.apis.common.errors import ConflictError, ResourceNotFoundError
+from llama_stack_api import BatchObject, ConflictError, ResourceNotFoundError
class TestReferenceBatchesImpl:
diff --git a/tests/unit/providers/batches/test_reference_idempotency.py b/tests/unit/providers/batches/test_reference_idempotency.py
index e6cb29b9b..acb7ca01c 100644
--- a/tests/unit/providers/batches/test_reference_idempotency.py
+++ b/tests/unit/providers/batches/test_reference_idempotency.py
@@ -44,7 +44,7 @@ import asyncio
import pytest
-from llama_stack.apis.common.errors import ConflictError
+from llama_stack_api import ConflictError
class TestReferenceBatchesIdempotency:
diff --git a/tests/unit/providers/files/conftest.py b/tests/unit/providers/files/conftest.py
index c64ecc3a3..f8959b5b7 100644
--- a/tests/unit/providers/files/conftest.py
+++ b/tests/unit/providers/files/conftest.py
@@ -9,8 +9,8 @@ import pytest
from moto import mock_aws
from llama_stack.core.storage.datatypes import SqliteSqlStoreConfig, SqlStoreReference
+from llama_stack.core.storage.sqlstore.sqlstore import register_sqlstore_backends
from llama_stack.providers.remote.files.s3 import S3FilesImplConfig, get_adapter_impl
-from llama_stack.providers.utils.sqlstore.sqlstore import register_sqlstore_backends
class MockUploadFile:
diff --git a/tests/unit/providers/files/test_s3_files.py b/tests/unit/providers/files/test_s3_files.py
index 92a45a9f2..de6c92e9c 100644
--- a/tests/unit/providers/files/test_s3_files.py
+++ b/tests/unit/providers/files/test_s3_files.py
@@ -9,8 +9,7 @@ from unittest.mock import patch
import pytest
from botocore.exceptions import ClientError
-from llama_stack.apis.common.errors import ResourceNotFoundError
-from llama_stack.apis.files import OpenAIFilePurpose
+from llama_stack_api import OpenAIFilePurpose, ResourceNotFoundError
class TestS3FilesImpl:
@@ -228,7 +227,7 @@ class TestS3FilesImpl:
mock_now.return_value = 0
- from llama_stack.apis.files import ExpiresAfter
+ from llama_stack_api import ExpiresAfter
sample_text_file.filename = "test_expired_file"
uploaded = await s3_provider.openai_upload_file(
@@ -260,7 +259,7 @@ class TestS3FilesImpl:
async def test_unsupported_expires_after_anchor(self, s3_provider, sample_text_file):
"""Unsupported anchor value should raise ValueError."""
- from llama_stack.apis.files import ExpiresAfter
+ from llama_stack_api import ExpiresAfter
sample_text_file.filename = "test_unsupported_expires_after_anchor"
@@ -273,7 +272,7 @@ class TestS3FilesImpl:
async def test_nonint_expires_after_seconds(self, s3_provider, sample_text_file):
"""Non-integer seconds in expires_after should raise ValueError."""
- from llama_stack.apis.files import ExpiresAfter
+ from llama_stack_api import ExpiresAfter
sample_text_file.filename = "test_nonint_expires_after_seconds"
@@ -286,7 +285,7 @@ class TestS3FilesImpl:
async def test_expires_after_seconds_out_of_bounds(self, s3_provider, sample_text_file):
"""Seconds outside allowed range should raise ValueError."""
- from llama_stack.apis.files import ExpiresAfter
+ from llama_stack_api import ExpiresAfter
with pytest.raises(ValueError, match="greater than or equal to 3600"):
await s3_provider.openai_upload_file(
diff --git a/tests/unit/providers/files/test_s3_files_auth.py b/tests/unit/providers/files/test_s3_files_auth.py
index 6097f2808..49b33fd7b 100644
--- a/tests/unit/providers/files/test_s3_files_auth.py
+++ b/tests/unit/providers/files/test_s3_files_auth.py
@@ -8,10 +8,9 @@ from unittest.mock import patch
import pytest
-from llama_stack.apis.common.errors import ResourceNotFoundError
-from llama_stack.apis.files import OpenAIFilePurpose
from llama_stack.core.datatypes import User
from llama_stack.providers.remote.files.s3.files import S3FilesImpl
+from llama_stack_api import OpenAIFilePurpose, ResourceNotFoundError
async def test_listing_hides_other_users_file(s3_provider, sample_text_file):
@@ -19,11 +18,11 @@ async def test_listing_hides_other_users_file(s3_provider, sample_text_file):
user_a = User("user-a", {"roles": ["team-a"]})
user_b = User("user-b", {"roles": ["team-b"]})
- with patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
+ with patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
mock_get_user.return_value = user_a
uploaded = await s3_provider.openai_upload_file(file=sample_text_file, purpose=OpenAIFilePurpose.ASSISTANTS)
- with patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
+ with patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
mock_get_user.return_value = user_b
listed = await s3_provider.openai_list_files()
assert all(f.id != uploaded.id for f in listed.data)
@@ -42,11 +41,11 @@ async def test_cannot_access_other_user_file(s3_provider, sample_text_file, op):
user_a = User("user-a", {"roles": ["team-a"]})
user_b = User("user-b", {"roles": ["team-b"]})
- with patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
+ with patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
mock_get_user.return_value = user_a
uploaded = await s3_provider.openai_upload_file(file=sample_text_file, purpose=OpenAIFilePurpose.ASSISTANTS)
- with patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
+ with patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
mock_get_user.return_value = user_b
with pytest.raises(ResourceNotFoundError):
await op(s3_provider, uploaded.id)
@@ -57,11 +56,11 @@ async def test_shared_role_allows_listing(s3_provider, sample_text_file):
user_a = User("user-a", {"roles": ["shared-role"]})
user_b = User("user-b", {"roles": ["shared-role"]})
- with patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
+ with patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
mock_get_user.return_value = user_a
uploaded = await s3_provider.openai_upload_file(file=sample_text_file, purpose=OpenAIFilePurpose.ASSISTANTS)
- with patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
+ with patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
mock_get_user.return_value = user_b
listed = await s3_provider.openai_list_files()
assert any(f.id == uploaded.id for f in listed.data)
@@ -80,10 +79,10 @@ async def test_shared_role_allows_access(s3_provider, sample_text_file, op):
user_x = User("user-x", {"roles": ["shared-role"]})
user_y = User("user-y", {"roles": ["shared-role"]})
- with patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
+ with patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
mock_get_user.return_value = user_x
uploaded = await s3_provider.openai_upload_file(file=sample_text_file, purpose=OpenAIFilePurpose.ASSISTANTS)
- with patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
+ with patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user") as mock_get_user:
mock_get_user.return_value = user_y
await op(s3_provider, uploaded.id)
diff --git a/tests/unit/providers/inference/test_bedrock_adapter.py b/tests/unit/providers/inference/test_bedrock_adapter.py
index fdd07c032..2a1ca769b 100644
--- a/tests/unit/providers/inference/test_bedrock_adapter.py
+++ b/tests/unit/providers/inference/test_bedrock_adapter.py
@@ -10,9 +10,9 @@ from unittest.mock import AsyncMock, MagicMock
import pytest
from openai import AuthenticationError
-from llama_stack.apis.inference import OpenAIChatCompletionRequestWithExtraBody
from llama_stack.providers.remote.inference.bedrock.bedrock import BedrockInferenceAdapter
from llama_stack.providers.remote.inference.bedrock.config import BedrockConfig
+from llama_stack_api import OpenAIChatCompletionRequestWithExtraBody
def test_adapter_initialization():
@@ -40,8 +40,8 @@ def test_api_key_from_header_overrides_config():
"""Test API key from request header overrides config via client property"""
config = BedrockConfig(api_key="config-key", region_name="us-east-1")
adapter = BedrockInferenceAdapter(config=config)
- adapter.provider_data_api_key_field = "aws_bedrock_api_key"
- adapter.get_request_provider_data = MagicMock(return_value=SimpleNamespace(aws_bedrock_api_key="header-key"))
+ adapter.provider_data_api_key_field = "aws_bearer_token_bedrock"
+ adapter.get_request_provider_data = MagicMock(return_value=SimpleNamespace(aws_bearer_token_bedrock="header-key"))
# The client property is where header override happens (in OpenAIMixin)
assert adapter.client.api_key == "header-key"
diff --git a/tests/unit/providers/inference/test_bedrock_config.py b/tests/unit/providers/inference/test_bedrock_config.py
index 4c1fd56a2..622080426 100644
--- a/tests/unit/providers/inference/test_bedrock_config.py
+++ b/tests/unit/providers/inference/test_bedrock_config.py
@@ -9,7 +9,7 @@ from llama_stack.providers.remote.inference.bedrock.config import BedrockConfig
def test_bedrock_config_defaults_no_env(monkeypatch):
"""Test BedrockConfig defaults when env vars are not set"""
- monkeypatch.delenv("AWS_BEDROCK_API_KEY", raising=False)
+ monkeypatch.delenv("AWS_BEARER_TOKEN_BEDROCK", raising=False)
monkeypatch.delenv("AWS_DEFAULT_REGION", raising=False)
config = BedrockConfig()
assert config.auth_credential is None
@@ -35,5 +35,5 @@ def test_bedrock_config_sample():
sample = BedrockConfig.sample_run_config()
assert "api_key" in sample
assert "region_name" in sample
- assert sample["api_key"] == "${env.AWS_BEDROCK_API_KEY:=}"
+ assert sample["api_key"] == "${env.AWS_BEARER_TOKEN_BEDROCK:=}"
assert sample["region_name"] == "${env.AWS_DEFAULT_REGION:=us-east-2}"
diff --git a/tests/unit/providers/inference/test_inference_client_caching.py b/tests/unit/providers/inference/test_inference_client_caching.py
index aa3a2c77a..6ddf790af 100644
--- a/tests/unit/providers/inference/test_inference_client_caching.py
+++ b/tests/unit/providers/inference/test_inference_client_caching.py
@@ -120,7 +120,7 @@ from llama_stack.providers.remote.inference.watsonx.watsonx import WatsonXInfere
VLLMInferenceAdapter,
"llama_stack.providers.remote.inference.vllm.VLLMProviderDataValidator",
{
- "url": "http://fake",
+ "base_url": "http://fake",
},
),
],
@@ -153,7 +153,7 @@ def test_litellm_provider_data_used(config_cls, adapter_cls, provider_data_valid
"""Validate data for LiteLLM-based providers. Similar to test_openai_provider_data_used, but without the
assumption that there is an OpenAI-compatible client object."""
- inference_adapter = adapter_cls(config=config_cls())
+ inference_adapter = adapter_cls(config=config_cls(base_url="http://fake"))
inference_adapter.__provider_spec__ = MagicMock()
inference_adapter.__provider_spec__.provider_data_validator = provider_data_validator
diff --git a/tests/unit/providers/inference/test_remote_vllm.py b/tests/unit/providers/inference/test_remote_vllm.py
index ffd45798e..0cf8ed306 100644
--- a/tests/unit/providers/inference/test_remote_vllm.py
+++ b/tests/unit/providers/inference/test_remote_vllm.py
@@ -10,7 +10,13 @@ from unittest.mock import AsyncMock, MagicMock, PropertyMock, patch
import pytest
-from llama_stack.apis.inference import (
+from llama_stack.core.routers.inference import InferenceRouter
+from llama_stack.core.routing_tables.models import ModelsRoutingTable
+from llama_stack.providers.remote.inference.vllm.config import VLLMInferenceAdapterConfig
+from llama_stack.providers.remote.inference.vllm.vllm import VLLMInferenceAdapter
+from llama_stack_api import (
+ HealthStatus,
+ Model,
OpenAIAssistantMessageParam,
OpenAIChatCompletion,
OpenAIChatCompletionRequestWithExtraBody,
@@ -20,12 +26,6 @@ from llama_stack.apis.inference import (
OpenAICompletionRequestWithExtraBody,
ToolChoice,
)
-from llama_stack.apis.models import Model
-from llama_stack.core.routers.inference import InferenceRouter
-from llama_stack.core.routing_tables.models import ModelsRoutingTable
-from llama_stack.providers.datatypes import HealthStatus
-from llama_stack.providers.remote.inference.vllm.config import VLLMInferenceAdapterConfig
-from llama_stack.providers.remote.inference.vllm.vllm import VLLMInferenceAdapter
# These are unit test for the remote vllm provider
# implementation. This should only contain tests which are specific to
@@ -40,7 +40,7 @@ from llama_stack.providers.remote.inference.vllm.vllm import VLLMInferenceAdapte
@pytest.fixture(scope="function")
async def vllm_inference_adapter():
- config = VLLMInferenceAdapterConfig(url="http://mocked.localhost:12345")
+ config = VLLMInferenceAdapterConfig(base_url="http://mocked.localhost:12345")
inference_adapter = VLLMInferenceAdapter(config=config)
inference_adapter.model_store = AsyncMock()
await inference_adapter.initialize()
@@ -204,7 +204,7 @@ async def test_vllm_completion_extra_body():
via extra_body to the underlying OpenAI client through the InferenceRouter.
"""
# Set up the vLLM adapter
- config = VLLMInferenceAdapterConfig(url="http://mocked.localhost:12345")
+ config = VLLMInferenceAdapterConfig(base_url="http://mocked.localhost:12345")
vllm_adapter = VLLMInferenceAdapter(config=config)
vllm_adapter.__provider_id__ = "vllm"
await vllm_adapter.initialize()
@@ -277,7 +277,7 @@ async def test_vllm_chat_completion_extra_body():
via extra_body to the underlying OpenAI client through the InferenceRouter for chat completion.
"""
# Set up the vLLM adapter
- config = VLLMInferenceAdapterConfig(url="http://mocked.localhost:12345")
+ config = VLLMInferenceAdapterConfig(base_url="http://mocked.localhost:12345")
vllm_adapter = VLLMInferenceAdapter(config=config)
vllm_adapter.__provider_id__ = "vllm"
await vllm_adapter.initialize()
diff --git a/tests/unit/providers/inline/agents/meta_reference/responses/test_streaming.py b/tests/unit/providers/inline/agents/meta_reference/responses/test_streaming.py
index fff29928c..658132340 100644
--- a/tests/unit/providers/inline/agents/meta_reference/responses/test_streaming.py
+++ b/tests/unit/providers/inline/agents/meta_reference/responses/test_streaming.py
@@ -8,11 +8,11 @@ from unittest.mock import AsyncMock
import pytest
-from llama_stack.apis.tools import ToolDef
from llama_stack.providers.inline.agents.meta_reference.responses.streaming import (
convert_tooldef_to_chat_tool,
)
from llama_stack.providers.inline.agents.meta_reference.responses.types import ChatCompletionContext
+from llama_stack_api import ToolDef
@pytest.fixture
diff --git a/src/llama_stack/apis/__init__.py b/tests/unit/providers/inline/inference/__init__.py
similarity index 100%
rename from src/llama_stack/apis/__init__.py
rename to tests/unit/providers/inline/inference/__init__.py
diff --git a/tests/unit/providers/inline/inference/test_meta_reference.py b/tests/unit/providers/inline/inference/test_meta_reference.py
new file mode 100644
index 000000000..381836397
--- /dev/null
+++ b/tests/unit/providers/inline/inference/test_meta_reference.py
@@ -0,0 +1,44 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from unittest.mock import Mock
+
+import pytest
+
+from llama_stack.providers.inline.inference.meta_reference.model_parallel import (
+ ModelRunner,
+)
+
+
+class TestModelRunner:
+ """Test ModelRunner task dispatching for model-parallel inference."""
+
+ def test_chat_completion_task_dispatch(self):
+ """Verify ModelRunner correctly dispatches chat_completion tasks."""
+ # Create a mock generator
+ mock_generator = Mock()
+ mock_generator.chat_completion = Mock(return_value=iter([]))
+
+ runner = ModelRunner(mock_generator)
+
+ # Create a chat_completion task
+ fake_params = {"model": "test"}
+ fake_messages = [{"role": "user", "content": "test"}]
+ task = ("chat_completion", [fake_params, fake_messages])
+
+ # Execute task
+ runner(task)
+
+ # Verify chat_completion was called with correct arguments
+ mock_generator.chat_completion.assert_called_once_with(fake_params, fake_messages)
+
+ def test_invalid_task_type_raises_error(self):
+ """Verify ModelRunner rejects invalid task types."""
+ mock_generator = Mock()
+ runner = ModelRunner(mock_generator)
+
+ with pytest.raises(ValueError, match="Unexpected task type"):
+ runner(("invalid_task", []))
diff --git a/tests/unit/providers/nvidia/test_datastore.py b/tests/unit/providers/nvidia/test_datastore.py
index b59636f7b..36006cc39 100644
--- a/tests/unit/providers/nvidia/test_datastore.py
+++ b/tests/unit/providers/nvidia/test_datastore.py
@@ -9,10 +9,9 @@ from unittest.mock import patch
import pytest
-from llama_stack.apis.datasets import Dataset, DatasetPurpose, URIDataSource
-from llama_stack.apis.resource import ResourceType
from llama_stack.providers.remote.datasetio.nvidia.config import NvidiaDatasetIOConfig
from llama_stack.providers.remote.datasetio.nvidia.datasetio import NvidiaDatasetIOAdapter
+from llama_stack_api import Dataset, DatasetPurpose, ResourceType, URIDataSource
@pytest.fixture
diff --git a/tests/unit/providers/nvidia/test_eval.py b/tests/unit/providers/nvidia/test_eval.py
index 86e005b76..783d664bf 100644
--- a/tests/unit/providers/nvidia/test_eval.py
+++ b/tests/unit/providers/nvidia/test_eval.py
@@ -9,14 +9,20 @@ from unittest.mock import MagicMock, patch
import pytest
-from llama_stack.apis.benchmarks import Benchmark
-from llama_stack.apis.common.job_types import Job, JobStatus
-from llama_stack.apis.eval.eval import BenchmarkConfig, EvaluateResponse, ModelCandidate, SamplingParams
-from llama_stack.apis.inference.inference import TopPSamplingStrategy
-from llama_stack.apis.resource import ResourceType
from llama_stack.models.llama.sku_types import CoreModelId
from llama_stack.providers.remote.eval.nvidia.config import NVIDIAEvalConfig
from llama_stack.providers.remote.eval.nvidia.eval import NVIDIAEvalImpl
+from llama_stack_api import (
+ Benchmark,
+ BenchmarkConfig,
+ EvaluateResponse,
+ Job,
+ JobStatus,
+ ModelCandidate,
+ ResourceType,
+ SamplingParams,
+ TopPSamplingStrategy,
+)
MOCK_DATASET_ID = "default/test-dataset"
MOCK_BENCHMARK_ID = "test-benchmark"
diff --git a/tests/unit/providers/nvidia/test_parameters.py b/tests/unit/providers/nvidia/test_parameters.py
index ad381da26..b714fc607 100644
--- a/tests/unit/providers/nvidia/test_parameters.py
+++ b/tests/unit/providers/nvidia/test_parameters.py
@@ -10,7 +10,12 @@ from unittest.mock import patch
import pytest
-from llama_stack.apis.post_training.post_training import (
+from llama_stack.core.library_client import convert_pydantic_to_json_value
+from llama_stack.providers.remote.post_training.nvidia.post_training import (
+ NvidiaPostTrainingAdapter,
+ NvidiaPostTrainingConfig,
+)
+from llama_stack_api import (
DataConfig,
DatasetFormat,
EfficiencyConfig,
@@ -19,11 +24,6 @@ from llama_stack.apis.post_training.post_training import (
OptimizerType,
TrainingConfig,
)
-from llama_stack.core.library_client import convert_pydantic_to_json_value
-from llama_stack.providers.remote.post_training.nvidia.post_training import (
- NvidiaPostTrainingAdapter,
- NvidiaPostTrainingConfig,
-)
class TestNvidiaParameters:
diff --git a/tests/unit/providers/nvidia/test_rerank_inference.py b/tests/unit/providers/nvidia/test_rerank_inference.py
index 2793b5f44..4ad9dc766 100644
--- a/tests/unit/providers/nvidia/test_rerank_inference.py
+++ b/tests/unit/providers/nvidia/test_rerank_inference.py
@@ -9,10 +9,10 @@ from unittest.mock import AsyncMock, MagicMock, patch
import aiohttp
import pytest
-from llama_stack.apis.models import ModelType
from llama_stack.providers.remote.inference.nvidia.config import NVIDIAConfig
from llama_stack.providers.remote.inference.nvidia.nvidia import NVIDIAInferenceAdapter
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
+from llama_stack_api import ModelType
class MockResponse:
@@ -146,7 +146,7 @@ async def test_hosted_model_not_in_endpoint_mapping():
async def test_self_hosted_ignores_endpoint():
adapter = create_adapter(
- config=NVIDIAConfig(url="http://localhost:8000", api_key=None),
+ config=NVIDIAConfig(base_url="http://localhost:8000", api_key=None),
rerank_endpoints={"test-model": "https://model.endpoint/rerank"}, # This should be ignored for self-hosted.
)
mock_session = MockSession(MockResponse())
diff --git a/tests/unit/providers/nvidia/test_safety.py b/tests/unit/providers/nvidia/test_safety.py
index 922d7f61f..07e04ddea 100644
--- a/tests/unit/providers/nvidia/test_safety.py
+++ b/tests/unit/providers/nvidia/test_safety.py
@@ -10,13 +10,16 @@ from unittest.mock import AsyncMock, MagicMock, patch
import pytest
-from llama_stack.apis.inference import CompletionMessage, UserMessage
-from llama_stack.apis.resource import ResourceType
-from llama_stack.apis.safety import RunShieldResponse, ViolationLevel
-from llama_stack.apis.shields import Shield
-from llama_stack.models.llama.datatypes import StopReason
from llama_stack.providers.remote.safety.nvidia.config import NVIDIASafetyConfig
from llama_stack.providers.remote.safety.nvidia.nvidia import NVIDIASafetyAdapter
+from llama_stack_api import (
+ OpenAIAssistantMessageParam,
+ OpenAIUserMessageParam,
+ ResourceType,
+ RunShieldResponse,
+ Shield,
+ ViolationLevel,
+)
class FakeNVIDIASafetyAdapter(NVIDIASafetyAdapter):
@@ -136,11 +139,9 @@ async def test_run_shield_allowed(nvidia_adapter, mock_guardrails_post):
# Run the shield
messages = [
- UserMessage(role="user", content="Hello, how are you?"),
- CompletionMessage(
- role="assistant",
+ OpenAIUserMessageParam(content="Hello, how are you?"),
+ OpenAIAssistantMessageParam(
content="I'm doing well, thank you for asking!",
- stop_reason=StopReason.end_of_message,
tool_calls=[],
),
]
@@ -191,13 +192,10 @@ async def test_run_shield_blocked(nvidia_adapter, mock_guardrails_post):
# Mock Guardrails API response
mock_guardrails_post.return_value = {"status": "blocked", "rails_status": {"reason": "harmful_content"}}
- # Run the shield
messages = [
- UserMessage(role="user", content="Hello, how are you?"),
- CompletionMessage(
- role="assistant",
+ OpenAIUserMessageParam(content="Hello, how are you?"),
+ OpenAIAssistantMessageParam(
content="I'm doing well, thank you for asking!",
- stop_reason=StopReason.end_of_message,
tool_calls=[],
),
]
@@ -243,7 +241,7 @@ async def test_run_shield_not_found(nvidia_adapter, mock_guardrails_post):
adapter.shield_store.get_shield.return_value = None
messages = [
- UserMessage(role="user", content="Hello, how are you?"),
+ OpenAIUserMessageParam(content="Hello, how are you?"),
]
with pytest.raises(ValueError):
@@ -274,11 +272,9 @@ async def test_run_shield_http_error(nvidia_adapter, mock_guardrails_post):
# Running the shield should raise an exception
messages = [
- UserMessage(role="user", content="Hello, how are you?"),
- CompletionMessage(
- role="assistant",
+ OpenAIUserMessageParam(content="Hello, how are you?"),
+ OpenAIAssistantMessageParam(
content="I'm doing well, thank you for asking!",
- stop_reason=StopReason.end_of_message,
tool_calls=[],
),
]
diff --git a/tests/unit/providers/nvidia/test_supervised_fine_tuning.py b/tests/unit/providers/nvidia/test_supervised_fine_tuning.py
index 91148605d..94948da41 100644
--- a/tests/unit/providers/nvidia/test_supervised_fine_tuning.py
+++ b/tests/unit/providers/nvidia/test_supervised_fine_tuning.py
@@ -10,15 +10,6 @@ from unittest.mock import patch
import pytest
-from llama_stack.apis.post_training.post_training import (
- DataConfig,
- DatasetFormat,
- LoraFinetuningConfig,
- OptimizerConfig,
- OptimizerType,
- QATFinetuningConfig,
- TrainingConfig,
-)
from llama_stack.core.library_client import convert_pydantic_to_json_value
from llama_stack.providers.remote.post_training.nvidia.post_training import (
ListNvidiaPostTrainingJobs,
@@ -27,6 +18,15 @@ from llama_stack.providers.remote.post_training.nvidia.post_training import (
NvidiaPostTrainingJob,
NvidiaPostTrainingJobStatusResponse,
)
+from llama_stack_api import (
+ DataConfig,
+ DatasetFormat,
+ LoraFinetuningConfig,
+ OptimizerConfig,
+ OptimizerType,
+ QATFinetuningConfig,
+ TrainingConfig,
+)
@pytest.fixture
diff --git a/tests/unit/providers/test_bedrock.py b/tests/unit/providers/test_bedrock.py
index 684fcf262..7126e1b69 100644
--- a/tests/unit/providers/test_bedrock.py
+++ b/tests/unit/providers/test_bedrock.py
@@ -7,9 +7,9 @@
from types import SimpleNamespace
from unittest.mock import AsyncMock, PropertyMock, patch
-from llama_stack.apis.inference import OpenAIChatCompletionRequestWithExtraBody
from llama_stack.providers.remote.inference.bedrock.bedrock import BedrockInferenceAdapter
from llama_stack.providers.remote.inference.bedrock.config import BedrockConfig
+from llama_stack_api import OpenAIChatCompletionRequestWithExtraBody
def test_can_create_adapter():
diff --git a/tests/unit/providers/test_configs.py b/tests/unit/providers/test_configs.py
index 867cfffbc..b4ba78394 100644
--- a/tests/unit/providers/test_configs.py
+++ b/tests/unit/providers/test_configs.py
@@ -4,8 +4,10 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
+from typing import get_args, get_origin
+
import pytest
-from pydantic import BaseModel
+from pydantic import BaseModel, HttpUrl
from llama_stack.core.distribution import get_provider_registry, providable_apis
from llama_stack.core.utils.dynamic import instantiate_class_type
@@ -41,3 +43,55 @@ class TestProviderConfigurations:
sample_config = config_type.sample_run_config(__distro_dir__="foobarbaz")
assert isinstance(sample_config, dict), f"{config_class_name}.sample_run_config() did not return a dict"
+
+ def test_remote_inference_url_standardization(self):
+ """Verify all remote inference providers use standardized base_url configuration."""
+ provider_registry = get_provider_registry()
+ inference_providers = provider_registry.get("inference", {})
+
+ # Filter for remote providers only
+ remote_providers = {k: v for k, v in inference_providers.items() if k.startswith("remote::")}
+
+ failures = []
+ for provider_type, provider_spec in remote_providers.items():
+ try:
+ config_class_name = provider_spec.config_class
+ config_type = instantiate_class_type(config_class_name)
+
+ # Check that config has base_url field (not url)
+ if hasattr(config_type, "model_fields"):
+ fields = config_type.model_fields
+
+ # Should NOT have 'url' field (old pattern)
+ if "url" in fields:
+ failures.append(
+ f"{provider_type}: Uses deprecated 'url' field instead of 'base_url'. "
+ f"Please rename to 'base_url' for consistency."
+ )
+
+ # Should have 'base_url' field with HttpUrl | None type
+ if "base_url" in fields:
+ field_info = fields["base_url"]
+ annotation = field_info.annotation
+
+ # Check if it's HttpUrl or HttpUrl | None
+ # get_origin() returns Union for (X | Y), None for plain types
+ # get_args() returns the types inside Union, e.g. (HttpUrl, NoneType)
+ is_valid = False
+ if get_origin(annotation) is not None: # It's a Union/Optional
+ if HttpUrl in get_args(annotation):
+ is_valid = True
+ elif annotation == HttpUrl: # Plain HttpUrl without | None
+ is_valid = True
+
+ if not is_valid:
+ failures.append(
+ f"{provider_type}: base_url field has incorrect type annotation. "
+ f"Expected 'HttpUrl | None', got '{annotation}'"
+ )
+
+ except Exception as e:
+ failures.append(f"{provider_type}: Error checking URL standardization: {str(e)}")
+
+ if failures:
+ pytest.fail("URL standardization violations found:\n" + "\n".join(f" - {f}" for f in failures))
diff --git a/tests/unit/providers/utils/inference/test_openai_compat.py b/tests/unit/providers/utils/inference/test_openai_compat.py
deleted file mode 100644
index c200c4395..000000000
--- a/tests/unit/providers/utils/inference/test_openai_compat.py
+++ /dev/null
@@ -1,220 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the terms described in the LICENSE file in
-# the root directory of this source tree.
-
-import pytest
-from pydantic import ValidationError
-
-from llama_stack.apis.common.content_types import TextContentItem
-from llama_stack.apis.inference import (
- CompletionMessage,
- OpenAIAssistantMessageParam,
- OpenAIChatCompletionContentPartImageParam,
- OpenAIChatCompletionContentPartTextParam,
- OpenAIDeveloperMessageParam,
- OpenAIImageURL,
- OpenAISystemMessageParam,
- OpenAIToolMessageParam,
- OpenAIUserMessageParam,
- SystemMessage,
- UserMessage,
-)
-from llama_stack.models.llama.datatypes import BuiltinTool, StopReason, ToolCall
-from llama_stack.providers.utils.inference.openai_compat import (
- convert_message_to_openai_dict,
- convert_message_to_openai_dict_new,
- openai_messages_to_messages,
-)
-
-
-async def test_convert_message_to_openai_dict():
- message = UserMessage(content=[TextContentItem(text="Hello, world!")], role="user")
- assert await convert_message_to_openai_dict(message) == {
- "role": "user",
- "content": [{"type": "text", "text": "Hello, world!"}],
- }
-
-
-# Test convert_message_to_openai_dict with a tool call
-async def test_convert_message_to_openai_dict_with_tool_call():
- message = CompletionMessage(
- content="",
- tool_calls=[ToolCall(call_id="123", tool_name="test_tool", arguments='{"foo": "bar"}')],
- stop_reason=StopReason.end_of_turn,
- )
-
- openai_dict = await convert_message_to_openai_dict(message)
-
- assert openai_dict == {
- "role": "assistant",
- "content": [{"type": "text", "text": ""}],
- "tool_calls": [
- {"id": "123", "type": "function", "function": {"name": "test_tool", "arguments": '{"foo": "bar"}'}}
- ],
- }
-
-
-async def test_convert_message_to_openai_dict_with_builtin_tool_call():
- message = CompletionMessage(
- content="",
- tool_calls=[
- ToolCall(
- call_id="123",
- tool_name=BuiltinTool.brave_search,
- arguments='{"foo": "bar"}',
- )
- ],
- stop_reason=StopReason.end_of_turn,
- )
-
- openai_dict = await convert_message_to_openai_dict(message)
-
- assert openai_dict == {
- "role": "assistant",
- "content": [{"type": "text", "text": ""}],
- "tool_calls": [
- {"id": "123", "type": "function", "function": {"name": "brave_search", "arguments": '{"foo": "bar"}'}}
- ],
- }
-
-
-async def test_openai_messages_to_messages_with_content_str():
- openai_messages = [
- OpenAISystemMessageParam(content="system message"),
- OpenAIUserMessageParam(content="user message"),
- OpenAIAssistantMessageParam(content="assistant message"),
- ]
-
- llama_messages = openai_messages_to_messages(openai_messages)
- assert len(llama_messages) == 3
- assert isinstance(llama_messages[0], SystemMessage)
- assert isinstance(llama_messages[1], UserMessage)
- assert isinstance(llama_messages[2], CompletionMessage)
- assert llama_messages[0].content == "system message"
- assert llama_messages[1].content == "user message"
- assert llama_messages[2].content == "assistant message"
-
-
-async def test_openai_messages_to_messages_with_content_list():
- openai_messages = [
- OpenAISystemMessageParam(content=[OpenAIChatCompletionContentPartTextParam(text="system message")]),
- OpenAIUserMessageParam(content=[OpenAIChatCompletionContentPartTextParam(text="user message")]),
- OpenAIAssistantMessageParam(content=[OpenAIChatCompletionContentPartTextParam(text="assistant message")]),
- ]
-
- llama_messages = openai_messages_to_messages(openai_messages)
- assert len(llama_messages) == 3
- assert isinstance(llama_messages[0], SystemMessage)
- assert isinstance(llama_messages[1], UserMessage)
- assert isinstance(llama_messages[2], CompletionMessage)
- assert llama_messages[0].content[0].text == "system message"
- assert llama_messages[1].content[0].text == "user message"
- assert llama_messages[2].content[0].text == "assistant message"
-
-
-@pytest.mark.parametrize(
- "message_class,kwargs",
- [
- (OpenAISystemMessageParam, {}),
- (OpenAIAssistantMessageParam, {}),
- (OpenAIDeveloperMessageParam, {}),
- (OpenAIUserMessageParam, {}),
- (OpenAIToolMessageParam, {"tool_call_id": "call_123"}),
- ],
-)
-def test_message_accepts_text_string(message_class, kwargs):
- """Test that messages accept string text content."""
- msg = message_class(content="Test message", **kwargs)
- assert msg.content == "Test message"
-
-
-@pytest.mark.parametrize(
- "message_class,kwargs",
- [
- (OpenAISystemMessageParam, {}),
- (OpenAIAssistantMessageParam, {}),
- (OpenAIDeveloperMessageParam, {}),
- (OpenAIUserMessageParam, {}),
- (OpenAIToolMessageParam, {"tool_call_id": "call_123"}),
- ],
-)
-def test_message_accepts_text_list(message_class, kwargs):
- """Test that messages accept list of text content parts."""
- content_list = [OpenAIChatCompletionContentPartTextParam(text="Test message")]
- msg = message_class(content=content_list, **kwargs)
- assert len(msg.content) == 1
- assert msg.content[0].text == "Test message"
-
-
-@pytest.mark.parametrize(
- "message_class,kwargs",
- [
- (OpenAISystemMessageParam, {}),
- (OpenAIAssistantMessageParam, {}),
- (OpenAIDeveloperMessageParam, {}),
- (OpenAIToolMessageParam, {"tool_call_id": "call_123"}),
- ],
-)
-def test_message_rejects_images(message_class, kwargs):
- """Test that system, assistant, developer, and tool messages reject image content."""
- with pytest.raises(ValidationError):
- message_class(
- content=[
- OpenAIChatCompletionContentPartImageParam(image_url=OpenAIImageURL(url="http://example.com/image.jpg"))
- ],
- **kwargs,
- )
-
-
-def test_user_message_accepts_images():
- """Test that user messages accept image content (unlike other message types)."""
- # List with images should work
- msg = OpenAIUserMessageParam(
- content=[
- OpenAIChatCompletionContentPartTextParam(text="Describe this image:"),
- OpenAIChatCompletionContentPartImageParam(image_url=OpenAIImageURL(url="http://example.com/image.jpg")),
- ]
- )
- assert len(msg.content) == 2
- assert msg.content[0].text == "Describe this image:"
- assert msg.content[1].image_url.url == "http://example.com/image.jpg"
-
-
-async def test_convert_message_to_openai_dict_new_user_message():
- """Test convert_message_to_openai_dict_new with UserMessage."""
- message = UserMessage(content="Hello, world!", role="user")
- result = await convert_message_to_openai_dict_new(message)
-
- assert result["role"] == "user"
- assert result["content"] == "Hello, world!"
-
-
-async def test_convert_message_to_openai_dict_new_completion_message_with_tool_calls():
- """Test convert_message_to_openai_dict_new with CompletionMessage containing tool calls."""
- message = CompletionMessage(
- content="I'll help you find the weather.",
- tool_calls=[
- ToolCall(
- call_id="call_123",
- tool_name="get_weather",
- arguments='{"city": "Sligo"}',
- )
- ],
- stop_reason=StopReason.end_of_turn,
- )
- result = await convert_message_to_openai_dict_new(message)
-
- # This would have failed with "Cannot instantiate typing.Union" before the fix
- assert result["role"] == "assistant"
- assert result["content"] == "I'll help you find the weather."
- assert "tool_calls" in result
- assert result["tool_calls"] is not None
- assert len(result["tool_calls"]) == 1
-
- tool_call = result["tool_calls"][0]
- assert tool_call.id == "call_123"
- assert tool_call.type == "function"
- assert tool_call.function.name == "get_weather"
- assert tool_call.function.arguments == '{"city": "Sligo"}'
diff --git a/tests/unit/providers/utils/inference/test_openai_mixin.py b/tests/unit/providers/utils/inference/test_openai_mixin.py
index 0b5ea078b..02d44f2ba 100644
--- a/tests/unit/providers/utils/inference/test_openai_mixin.py
+++ b/tests/unit/providers/utils/inference/test_openai_mixin.py
@@ -12,11 +12,17 @@ from unittest.mock import AsyncMock, MagicMock, Mock, PropertyMock, patch
import pytest
from pydantic import BaseModel, Field
-from llama_stack.apis.inference import Model, OpenAIChatCompletionRequestWithExtraBody, OpenAIUserMessageParam
-from llama_stack.apis.models import ModelType
from llama_stack.core.request_headers import request_provider_data_context
from llama_stack.providers.utils.inference.model_registry import RemoteInferenceProviderConfig
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
+from llama_stack_api import (
+ Model,
+ ModelType,
+ OpenAIChatCompletionRequestWithExtraBody,
+ OpenAICompletionRequestWithExtraBody,
+ OpenAIEmbeddingsRequestWithExtraBody,
+ OpenAIUserMessageParam,
+)
class OpenAIMixinImpl(OpenAIMixin):
@@ -835,3 +841,96 @@ class TestOpenAIMixinProviderDataApiKey:
error_message = str(exc_info.value)
assert "test_api_key" in error_message
assert "x-llamastack-provider-data" in error_message
+
+
+class TestOpenAIMixinAllowedModelsInference:
+ """Test cases for allowed_models enforcement during inference requests"""
+
+ async def test_inference_with_allowed_models(self, mixin, mock_client_context):
+ """Test that all inference methods succeed with allowed models"""
+ mixin.config.allowed_models = ["gpt-4", "text-davinci-003", "text-embedding-ada-002"]
+
+ mock_client = MagicMock()
+ mock_client.chat.completions.create = AsyncMock(return_value=MagicMock())
+ mock_client.completions.create = AsyncMock(return_value=MagicMock())
+ mock_embedding_response = MagicMock()
+ mock_embedding_response.data = [MagicMock(embedding=[0.1, 0.2, 0.3])]
+ mock_embedding_response.usage = MagicMock(prompt_tokens=5, total_tokens=5)
+ mock_client.embeddings.create = AsyncMock(return_value=mock_embedding_response)
+
+ with mock_client_context(mixin, mock_client):
+ # Test chat completion
+ await mixin.openai_chat_completion(
+ OpenAIChatCompletionRequestWithExtraBody(
+ model="gpt-4", messages=[OpenAIUserMessageParam(role="user", content="Hello")]
+ )
+ )
+ mock_client.chat.completions.create.assert_called_once()
+
+ # Test completion
+ await mixin.openai_completion(
+ OpenAICompletionRequestWithExtraBody(model="text-davinci-003", prompt="Hello")
+ )
+ mock_client.completions.create.assert_called_once()
+
+ # Test embeddings
+ await mixin.openai_embeddings(
+ OpenAIEmbeddingsRequestWithExtraBody(model="text-embedding-ada-002", input="test text")
+ )
+ mock_client.embeddings.create.assert_called_once()
+
+ async def test_inference_with_disallowed_models(self, mixin, mock_client_context):
+ """Test that all inference methods fail with disallowed models"""
+ mixin.config.allowed_models = ["gpt-4"]
+
+ mock_client = MagicMock()
+
+ with mock_client_context(mixin, mock_client):
+ # Test chat completion with disallowed model
+ with pytest.raises(ValueError, match="Model 'gpt-4-turbo' is not in the allowed models list"):
+ await mixin.openai_chat_completion(
+ OpenAIChatCompletionRequestWithExtraBody(
+ model="gpt-4-turbo", messages=[OpenAIUserMessageParam(role="user", content="Hello")]
+ )
+ )
+
+ # Test completion with disallowed model
+ with pytest.raises(ValueError, match="Model 'text-davinci-002' is not in the allowed models list"):
+ await mixin.openai_completion(
+ OpenAICompletionRequestWithExtraBody(model="text-davinci-002", prompt="Hello")
+ )
+
+ # Test embeddings with disallowed model
+ with pytest.raises(ValueError, match="Model 'text-embedding-3-large' is not in the allowed models list"):
+ await mixin.openai_embeddings(
+ OpenAIEmbeddingsRequestWithExtraBody(model="text-embedding-3-large", input="test text")
+ )
+
+ mock_client.chat.completions.create.assert_not_called()
+ mock_client.completions.create.assert_not_called()
+ mock_client.embeddings.create.assert_not_called()
+
+ async def test_inference_with_no_restrictions(self, mixin, mock_client_context):
+ """Test that inference succeeds when allowed_models is None or empty list blocks all"""
+ # Test with None (no restrictions)
+ assert mixin.config.allowed_models is None
+ mock_client = MagicMock()
+ mock_client.chat.completions.create = AsyncMock(return_value=MagicMock())
+
+ with mock_client_context(mixin, mock_client):
+ await mixin.openai_chat_completion(
+ OpenAIChatCompletionRequestWithExtraBody(
+ model="any-model", messages=[OpenAIUserMessageParam(role="user", content="Hello")]
+ )
+ )
+ mock_client.chat.completions.create.assert_called_once()
+
+ # Test with empty list (blocks all models)
+ mixin.config.allowed_models = []
+ with mock_client_context(mixin, mock_client):
+ with pytest.raises(ValueError, match="Model 'gpt-4' is not in the allowed models list"):
+ await mixin.openai_chat_completion(
+ OpenAIChatCompletionRequestWithExtraBody(
+ model="gpt-4", messages=[OpenAIUserMessageParam(role="user", content="Hello")]
+ )
+ )
diff --git a/tests/unit/providers/utils/inference/test_prompt_adapter.py b/tests/unit/providers/utils/inference/test_prompt_adapter.py
new file mode 100644
index 000000000..ab5736ac5
--- /dev/null
+++ b/tests/unit/providers/utils/inference/test_prompt_adapter.py
@@ -0,0 +1,32 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from llama_stack.models.llama.datatypes import RawTextItem
+from llama_stack.providers.utils.inference.prompt_adapter import (
+ convert_openai_message_to_raw_message,
+)
+from llama_stack_api import OpenAIAssistantMessageParam, OpenAIUserMessageParam
+
+
+class TestConvertOpenAIMessageToRawMessage:
+ """Test conversion of OpenAI message types to RawMessage format."""
+
+ async def test_user_message_conversion(self):
+ msg = OpenAIUserMessageParam(role="user", content="Hello world")
+ raw_msg = await convert_openai_message_to_raw_message(msg)
+
+ assert raw_msg.role == "user"
+ assert isinstance(raw_msg.content, RawTextItem)
+ assert raw_msg.content.text == "Hello world"
+
+ async def test_assistant_message_conversion(self):
+ msg = OpenAIAssistantMessageParam(role="assistant", content="Hi there!")
+ raw_msg = await convert_openai_message_to_raw_message(msg)
+
+ assert raw_msg.role == "assistant"
+ assert isinstance(raw_msg.content, RawTextItem)
+ assert raw_msg.content.text == "Hi there!"
+ assert raw_msg.tool_calls == []
diff --git a/tests/unit/providers/utils/memory/test_vector_store.py b/tests/unit/providers/utils/memory/test_vector_store.py
index 590bdd1d2..f3241ba20 100644
--- a/tests/unit/providers/utils/memory/test_vector_store.py
+++ b/tests/unit/providers/utils/memory/test_vector_store.py
@@ -8,9 +8,8 @@ from unittest.mock import AsyncMock, MagicMock, patch
import pytest
-from llama_stack.apis.common.content_types import URL, TextContentItem
-from llama_stack.apis.tools import RAGDocument
from llama_stack.providers.utils.memory.vector_store import content_from_data_and_mime_type, content_from_doc
+from llama_stack_api import URL, RAGDocument, TextContentItem
async def test_content_from_doc_with_url():
diff --git a/tests/unit/providers/utils/test_model_registry.py b/tests/unit/providers/utils/test_model_registry.py
index 04e75aa82..1e3efafa1 100644
--- a/tests/unit/providers/utils/test_model_registry.py
+++ b/tests/unit/providers/utils/test_model_registry.py
@@ -35,8 +35,8 @@
import pytest
-from llama_stack.apis.models import Model
from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper, ProviderModelEntry
+from llama_stack_api import Model
@pytest.fixture
diff --git a/tests/unit/providers/vector_io/conftest.py b/tests/unit/providers/vector_io/conftest.py
index 5e56ea417..b4ea77c0a 100644
--- a/tests/unit/providers/vector_io/conftest.py
+++ b/tests/unit/providers/vector_io/conftest.py
@@ -10,16 +10,15 @@ from unittest.mock import AsyncMock, MagicMock, patch
import numpy as np
import pytest
-from llama_stack.apis.vector_io import Chunk, ChunkMetadata, QueryChunksResponse
-from llama_stack.apis.vector_stores import VectorStore
from llama_stack.core.storage.datatypes import KVStoreReference, SqliteKVStoreConfig
+from llama_stack.core.storage.kvstore import register_kvstore_backends
from llama_stack.providers.inline.vector_io.faiss.config import FaissVectorIOConfig
from llama_stack.providers.inline.vector_io.faiss.faiss import FaissIndex, FaissVectorIOAdapter
from llama_stack.providers.inline.vector_io.sqlite_vec import SQLiteVectorIOConfig
from llama_stack.providers.inline.vector_io.sqlite_vec.sqlite_vec import SQLiteVecIndex, SQLiteVecVectorIOAdapter
from llama_stack.providers.remote.vector_io.pgvector.config import PGVectorVectorIOConfig
from llama_stack.providers.remote.vector_io.pgvector.pgvector import PGVectorIndex, PGVectorVectorIOAdapter
-from llama_stack.providers.utils.kvstore import register_kvstore_backends
+from llama_stack_api import Chunk, ChunkMetadata, QueryChunksResponse, VectorStore
EMBEDDING_DIMENSION = 768
COLLECTION_PREFIX = "test_collection"
@@ -280,7 +279,7 @@ async def pgvector_vec_adapter(unique_kvstore_config, mock_inference_api, embedd
) as mock_check_version:
mock_check_version.return_value = "0.5.1"
- with patch("llama_stack.providers.utils.kvstore.kvstore_impl") as mock_kvstore_impl:
+ with patch("llama_stack.core.storage.kvstore.kvstore_impl") as mock_kvstore_impl:
mock_kvstore = AsyncMock()
mock_kvstore_impl.return_value = mock_kvstore
diff --git a/tests/unit/providers/vector_io/test_faiss.py b/tests/unit/providers/vector_io/test_faiss.py
index 44bcd0cfd..075296cbb 100644
--- a/tests/unit/providers/vector_io/test_faiss.py
+++ b/tests/unit/providers/vector_io/test_faiss.py
@@ -10,15 +10,12 @@ from unittest.mock import MagicMock, patch
import numpy as np
import pytest
-from llama_stack.apis.files import Files
-from llama_stack.apis.vector_io import Chunk, QueryChunksResponse
-from llama_stack.apis.vector_stores import VectorStore
-from llama_stack.providers.datatypes import HealthStatus
from llama_stack.providers.inline.vector_io.faiss.config import FaissVectorIOConfig
from llama_stack.providers.inline.vector_io.faiss.faiss import (
FaissIndex,
FaissVectorIOAdapter,
)
+from llama_stack_api import Chunk, Files, HealthStatus, QueryChunksResponse, VectorStore
# This test is a unit test for the FaissVectorIOAdapter class. This should only contain
# tests which are specific to this class. More general (API-level) tests should be placed in
diff --git a/tests/unit/providers/vector_io/test_sqlite_vec.py b/tests/unit/providers/vector_io/test_sqlite_vec.py
index 5ee62cd63..d1548cf37 100644
--- a/tests/unit/providers/vector_io/test_sqlite_vec.py
+++ b/tests/unit/providers/vector_io/test_sqlite_vec.py
@@ -9,12 +9,12 @@ import asyncio
import numpy as np
import pytest
-from llama_stack.apis.vector_io import Chunk, QueryChunksResponse
from llama_stack.providers.inline.vector_io.sqlite_vec.sqlite_vec import (
SQLiteVecIndex,
SQLiteVecVectorIOAdapter,
_create_sqlite_connection,
)
+from llama_stack_api import Chunk, QueryChunksResponse
# This test is a unit test for the SQLiteVecVectorIOAdapter class. This should only contain
# tests which are specific to this class. More general (API-level) tests should be placed in
diff --git a/tests/unit/providers/vector_io/test_vector_io_openai_vector_stores.py b/tests/unit/providers/vector_io/test_vector_io_openai_vector_stores.py
index 121623e1b..3797abb2c 100644
--- a/tests/unit/providers/vector_io/test_vector_io_openai_vector_stores.py
+++ b/tests/unit/providers/vector_io/test_vector_io_openai_vector_stores.py
@@ -11,17 +11,17 @@ from unittest.mock import AsyncMock, patch
import numpy as np
import pytest
-from llama_stack.apis.common.errors import VectorStoreNotFoundError
-from llama_stack.apis.vector_io import (
+from llama_stack.providers.inline.vector_io.sqlite_vec.sqlite_vec import VECTOR_DBS_PREFIX
+from llama_stack_api import (
Chunk,
OpenAICreateVectorStoreFileBatchRequestWithExtraBody,
OpenAICreateVectorStoreRequestWithExtraBody,
QueryChunksResponse,
+ VectorStore,
VectorStoreChunkingStrategyAuto,
VectorStoreFileObject,
+ VectorStoreNotFoundError,
)
-from llama_stack.apis.vector_stores import VectorStore
-from llama_stack.providers.inline.vector_io.sqlite_vec.sqlite_vec import VECTOR_DBS_PREFIX
# This test is a unit test for the inline VectorIO providers. This should only contain
# tests which are specific to this class. More general (API-level) tests should be placed in
@@ -222,7 +222,7 @@ async def test_insert_chunks_missing_db_raises(vector_io_adapter):
async def test_insert_chunks_with_missing_document_id(vector_io_adapter):
"""Ensure no KeyError when document_id is missing or in different places."""
- from llama_stack.apis.vector_io import Chunk, ChunkMetadata
+ from llama_stack_api import Chunk, ChunkMetadata
fake_index = AsyncMock()
vector_io_adapter.cache["db1"] = fake_index
@@ -255,10 +255,9 @@ async def test_insert_chunks_with_missing_document_id(vector_io_adapter):
async def test_document_id_with_invalid_type_raises_error():
"""Ensure TypeError is raised when document_id is not a string."""
- from llama_stack.apis.vector_io import Chunk
-
# Integer document_id should raise TypeError
from llama_stack.providers.utils.vector_io.vector_utils import generate_chunk_id
+ from llama_stack_api import Chunk
chunk = Chunk(content="test", chunk_id=generate_chunk_id("test", "test"), metadata={"document_id": 12345})
with pytest.raises(TypeError) as exc_info:
diff --git a/tests/unit/providers/vector_io/test_vector_utils.py b/tests/unit/providers/vector_io/test_vector_utils.py
index 1ca753a44..3e6b2971f 100644
--- a/tests/unit/providers/vector_io/test_vector_utils.py
+++ b/tests/unit/providers/vector_io/test_vector_utils.py
@@ -4,8 +4,8 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
-from llama_stack.apis.vector_io import Chunk, ChunkMetadata
from llama_stack.providers.utils.vector_io.vector_utils import generate_chunk_id
+from llama_stack_api import Chunk, ChunkMetadata, VectorStoreFileObject
# This test is a unit test for the chunk_utils.py helpers. This should only contain
# tests which are specific to this file. More general (API-level) tests should be placed in
@@ -78,3 +78,77 @@ def test_chunk_serialization():
serialized_chunk = chunk.model_dump()
assert serialized_chunk["chunk_id"] == "test-chunk-id"
assert "chunk_id" in serialized_chunk
+
+
+def test_vector_store_file_object_attributes_validation():
+ """Test VectorStoreFileObject validates and sanitizes attributes at input boundary."""
+ # Test with metadata containing lists, nested dicts, and primitives
+ from llama_stack_api.vector_io import VectorStoreChunkingStrategyAuto
+
+ file_obj = VectorStoreFileObject(
+ id="file-123",
+ attributes={
+ "tags": ["transformers", "h100-compatible", "region:us"], # List -> string
+ "model_name": "granite-3.3-8b", # String preserved
+ "score": 0.95, # Float preserved
+ "active": True, # Bool preserved
+ "count": 42, # Int -> float
+ "nested": {"key": "value"}, # Dict filtered out
+ },
+ chunking_strategy=VectorStoreChunkingStrategyAuto(),
+ created_at=1234567890,
+ status="completed",
+ vector_store_id="vs-123",
+ )
+
+ # Lists converted to comma-separated strings
+ assert file_obj.attributes["tags"] == "transformers, h100-compatible, region:us"
+ # Primitives preserved
+ assert file_obj.attributes["model_name"] == "granite-3.3-8b"
+ assert file_obj.attributes["score"] == 0.95
+ assert file_obj.attributes["active"] is True
+ assert file_obj.attributes["count"] == 42.0 # int -> float
+ # Complex types filtered out
+ assert "nested" not in file_obj.attributes
+
+
+def test_vector_store_file_object_attributes_constraints():
+ """Test VectorStoreFileObject enforces OpenAPI constraints on attributes."""
+ from llama_stack_api.vector_io import VectorStoreChunkingStrategyAuto
+
+ # Test max 16 properties
+ many_attrs = {f"key{i}": f"value{i}" for i in range(20)}
+ file_obj = VectorStoreFileObject(
+ id="file-123",
+ attributes=many_attrs,
+ chunking_strategy=VectorStoreChunkingStrategyAuto(),
+ created_at=1234567890,
+ status="completed",
+ vector_store_id="vs-123",
+ )
+ assert len(file_obj.attributes) == 16 # Max 16 properties
+
+ # Test max 64 char keys are filtered
+ long_key_attrs = {"a" * 65: "value", "valid_key": "value"}
+ file_obj = VectorStoreFileObject(
+ id="file-124",
+ attributes=long_key_attrs,
+ chunking_strategy=VectorStoreChunkingStrategyAuto(),
+ created_at=1234567890,
+ status="completed",
+ vector_store_id="vs-123",
+ )
+ assert "a" * 65 not in file_obj.attributes
+ assert "valid_key" in file_obj.attributes
+
+ # Test max 512 char string values are truncated
+ long_value_attrs = {"key": "x" * 600}
+ file_obj = VectorStoreFileObject(
+ id="file-125",
+ attributes=long_value_attrs,
+ chunking_strategy=VectorStoreChunkingStrategyAuto(),
+ created_at=1234567890,
+ status="completed",
+ vector_store_id="vs-123",
+ )
+ assert len(file_obj.attributes["key"]) == 512
diff --git a/tests/unit/rag/test_rag_query.py b/tests/unit/rag/test_rag_query.py
index 8563d0d53..7eb17b74b 100644
--- a/tests/unit/rag/test_rag_query.py
+++ b/tests/unit/rag/test_rag_query.py
@@ -8,13 +8,8 @@ from unittest.mock import AsyncMock, MagicMock
import pytest
-from llama_stack.apis.tools.rag_tool import RAGQueryConfig
-from llama_stack.apis.vector_io import (
- Chunk,
- ChunkMetadata,
- QueryChunksResponse,
-)
from llama_stack.providers.inline.tool_runtime.rag.memory import MemoryToolRuntimeImpl
+from llama_stack_api import Chunk, ChunkMetadata, QueryChunksResponse, RAGQueryConfig
class TestRagQuery:
diff --git a/tests/unit/rag/test_vector_store.py b/tests/unit/rag/test_vector_store.py
index 1f73fdb8e..2562df8d6 100644
--- a/tests/unit/rag/test_vector_store.py
+++ b/tests/unit/rag/test_vector_store.py
@@ -13,12 +13,6 @@ from unittest.mock import AsyncMock, MagicMock
import numpy as np
import pytest
-from llama_stack.apis.inference.inference import (
- OpenAIEmbeddingData,
- OpenAIEmbeddingsRequestWithExtraBody,
-)
-from llama_stack.apis.tools import RAGDocument
-from llama_stack.apis.vector_io import Chunk
from llama_stack.providers.utils.memory.vector_store import (
URL,
VectorStoreWithIndex,
@@ -27,6 +21,7 @@ from llama_stack.providers.utils.memory.vector_store import (
make_overlapped_chunks,
)
from llama_stack.providers.utils.vector_io.vector_utils import generate_chunk_id
+from llama_stack_api import Chunk, OpenAIEmbeddingData, OpenAIEmbeddingsRequestWithExtraBody, RAGDocument
DUMMY_PDF_PATH = Path(os.path.abspath(__file__)).parent / "fixtures" / "dummy.pdf"
# Depending on the machine, this can get parsed a couple of ways
diff --git a/tests/unit/registry/test_registry.py b/tests/unit/registry/test_registry.py
index d4c9786d1..2b32de833 100644
--- a/tests/unit/registry/test_registry.py
+++ b/tests/unit/registry/test_registry.py
@@ -7,16 +7,15 @@
import pytest
-from llama_stack.apis.inference import Model
-from llama_stack.apis.vector_stores import VectorStore
from llama_stack.core.datatypes import VectorStoreWithOwner
from llama_stack.core.storage.datatypes import KVStoreReference, SqliteKVStoreConfig
+from llama_stack.core.storage.kvstore import kvstore_impl, register_kvstore_backends
from llama_stack.core.store.registry import (
KEY_FORMAT,
CachedDiskDistributionRegistry,
DiskDistributionRegistry,
)
-from llama_stack.providers.utils.kvstore import kvstore_impl, register_kvstore_backends
+from llama_stack_api import Model, VectorStore
@pytest.fixture
@@ -304,8 +303,8 @@ async def test_double_registration_different_objects(disk_dist_registry):
async def test_double_registration_with_cache(cached_disk_dist_registry):
"""Test double registration behavior with caching enabled."""
- from llama_stack.apis.models import ModelType
from llama_stack.core.datatypes import ModelWithOwner
+ from llama_stack_api import ModelType
model1 = ModelWithOwner(
identifier="test_model",
diff --git a/tests/unit/registry/test_registry_acl.py b/tests/unit/registry/test_registry_acl.py
index 09b9a3cfb..a09d2a30d 100644
--- a/tests/unit/registry/test_registry_acl.py
+++ b/tests/unit/registry/test_registry_acl.py
@@ -5,9 +5,9 @@
# the root directory of this source tree.
-from llama_stack.apis.models import ModelType
from llama_stack.core.datatypes import ModelWithOwner, User
from llama_stack.core.store.registry import CachedDiskDistributionRegistry
+from llama_stack_api import ModelType
async def test_registry_cache_with_acl(cached_disk_dist_registry):
diff --git a/tests/unit/server/test_access_control.py b/tests/unit/server/test_access_control.py
index ea4f9b8b2..23a9636d5 100644
--- a/tests/unit/server/test_access_control.py
+++ b/tests/unit/server/test_access_control.py
@@ -10,11 +10,10 @@ import pytest
import yaml
from pydantic import TypeAdapter, ValidationError
-from llama_stack.apis.datatypes import Api
-from llama_stack.apis.models import ModelType
from llama_stack.core.access_control.access_control import AccessDeniedError, is_action_allowed
from llama_stack.core.datatypes import AccessRule, ModelWithOwner, User
from llama_stack.core.routing_tables.models import ModelsRoutingTable
+from llama_stack_api import Api, ModelType
class AsyncMock(MagicMock):
diff --git a/tests/unit/server/test_auth.py b/tests/unit/server/test_auth.py
index cc9397f07..57a552514 100644
--- a/tests/unit/server/test_auth.py
+++ b/tests/unit/server/test_auth.py
@@ -144,7 +144,7 @@ def middleware_with_mocks(mock_auth_endpoint):
middleware = AuthenticationMiddleware(mock_app, auth_config, {})
# Mock the route_impls to simulate finding routes with required scopes
- from llama_stack.schema_utils import WebMethod
+ from llama_stack_api import WebMethod
routes = {
("POST", "/test/scoped"): WebMethod(route="/test/scoped", method="POST", required_scope="test.read"),
diff --git a/tests/unit/server/test_quota.py b/tests/unit/server/test_quota.py
index 0939414dd..cd8c38eed 100644
--- a/tests/unit/server/test_quota.py
+++ b/tests/unit/server/test_quota.py
@@ -15,7 +15,7 @@ from starlette.middleware.base import BaseHTTPMiddleware
from llama_stack.core.datatypes import QuotaConfig, QuotaPeriod
from llama_stack.core.server.quota import QuotaMiddleware
from llama_stack.core.storage.datatypes import KVStoreReference, SqliteKVStoreConfig
-from llama_stack.providers.utils.kvstore import register_kvstore_backends
+from llama_stack.core.storage.kvstore import register_kvstore_backends
@pytest.fixture
diff --git a/tests/unit/server/test_resolver.py b/tests/unit/server/test_resolver.py
index b44f12f7e..a1b03f630 100644
--- a/tests/unit/server/test_resolver.py
+++ b/tests/unit/server/test_resolver.py
@@ -11,7 +11,6 @@ from unittest.mock import AsyncMock, MagicMock
from pydantic import BaseModel, Field
-from llama_stack.apis.inference import Inference
from llama_stack.core.datatypes import Api, Provider, StackRunConfig
from llama_stack.core.resolver import resolve_impls
from llama_stack.core.routers.inference import InferenceRouter
@@ -25,9 +24,9 @@ from llama_stack.core.storage.datatypes import (
SqlStoreReference,
StorageConfig,
)
-from llama_stack.providers.datatypes import InlineProviderSpec, ProviderSpec
-from llama_stack.providers.utils.kvstore import register_kvstore_backends
-from llama_stack.providers.utils.sqlstore.sqlstore import register_sqlstore_backends
+from llama_stack.core.storage.kvstore import register_kvstore_backends
+from llama_stack.core.storage.sqlstore.sqlstore import register_sqlstore_backends
+from llama_stack_api import Inference, InlineProviderSpec, ProviderSpec
def add_protocol_methods(cls: type, protocol: type[Protocol]) -> None:
diff --git a/tests/unit/server/test_schema_registry.py b/tests/unit/server/test_schema_registry.py
new file mode 100644
index 000000000..548b43a29
--- /dev/null
+++ b/tests/unit/server/test_schema_registry.py
@@ -0,0 +1,48 @@
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the terms described in the LICENSE file in
+# the root directory of this source tree.
+
+from pydantic import BaseModel
+
+from llama_stack_api import Conversation, SamplingStrategy
+from llama_stack_api.schema_utils import (
+ clear_dynamic_schema_types,
+ get_registered_schema_info,
+ iter_dynamic_schema_types,
+ iter_json_schema_types,
+ iter_registered_schema_types,
+ register_dynamic_schema_type,
+)
+
+
+def test_json_schema_registry_contains_known_model() -> None:
+ assert Conversation in iter_json_schema_types()
+
+
+def test_registered_schema_registry_contains_sampling_strategy() -> None:
+ registered_names = {info.name for info in iter_registered_schema_types()}
+ assert "SamplingStrategy" in registered_names
+
+ schema_info = get_registered_schema_info(SamplingStrategy)
+ assert schema_info is not None
+ assert schema_info.name == "SamplingStrategy"
+
+
+def test_dynamic_schema_registration_round_trip() -> None:
+ existing_models = tuple(iter_dynamic_schema_types())
+ clear_dynamic_schema_types()
+ try:
+
+ class TemporaryModel(BaseModel):
+ foo: str
+
+ register_dynamic_schema_type(TemporaryModel)
+ assert TemporaryModel in iter_dynamic_schema_types()
+
+ clear_dynamic_schema_types()
+ assert TemporaryModel not in iter_dynamic_schema_types()
+ finally:
+ for model in existing_models:
+ register_dynamic_schema_type(model)
diff --git a/tests/unit/server/test_server.py b/tests/unit/server/test_server.py
index d6d4f4f23..53f193672 100644
--- a/tests/unit/server/test_server.py
+++ b/tests/unit/server/test_server.py
@@ -12,7 +12,7 @@ from pydantic import ValidationError
from llama_stack.core.access_control.access_control import AccessDeniedError
from llama_stack.core.datatypes import AuthenticationRequiredError
-from llama_stack.core.server.server import translate_exception
+from llama_stack.core.server.server import remove_disabled_providers, translate_exception
class TestTranslateException:
@@ -194,3 +194,70 @@ class TestTranslateException:
assert isinstance(result3, HTTPException)
assert result3.status_code == 403
assert result3.detail == "Permission denied: Access denied"
+
+
+class TestRemoveDisabledProviders:
+ """Test cases for the remove_disabled_providers function."""
+
+ def test_remove_explicitly_disabled_provider(self):
+ """Test that providers with provider_id='__disabled__' are removed."""
+ config = {
+ "providers": {
+ "inference": [
+ {"provider_id": "openai", "provider_type": "remote::openai", "config": {}},
+ {"provider_id": "__disabled__", "provider_type": "remote::vllm", "config": {}},
+ ]
+ }
+ }
+ result = remove_disabled_providers(config)
+ assert len(result["providers"]["inference"]) == 1
+ assert result["providers"]["inference"][0]["provider_id"] == "openai"
+
+ def test_remove_empty_provider_id(self):
+ """Test that providers with empty provider_id are removed."""
+ config = {
+ "providers": {
+ "inference": [
+ {"provider_id": "openai", "provider_type": "remote::openai", "config": {}},
+ {"provider_id": "", "provider_type": "remote::vllm", "config": {}},
+ ]
+ }
+ }
+ result = remove_disabled_providers(config)
+ assert len(result["providers"]["inference"]) == 1
+ assert result["providers"]["inference"][0]["provider_id"] == "openai"
+
+ def test_keep_models_with_none_provider_model_id(self):
+ """Test that models with None provider_model_id are NOT removed."""
+ config = {
+ "registered_resources": {
+ "models": [
+ {
+ "model_id": "llama-3-2-3b",
+ "provider_id": "vllm-inference",
+ "model_type": "llm",
+ "provider_model_id": None,
+ "metadata": {},
+ },
+ {
+ "model_id": "gpt-4o-mini",
+ "provider_id": "openai",
+ "model_type": "llm",
+ "provider_model_id": None,
+ "metadata": {},
+ },
+ {
+ "model_id": "granite-embedding-125m",
+ "provider_id": "sentence-transformers",
+ "model_type": "embedding",
+ "provider_model_id": "ibm-granite/granite-embedding-125m-english",
+ "metadata": {"embedding_dimension": 768},
+ },
+ ]
+ }
+ }
+ result = remove_disabled_providers(config)
+ assert len(result["registered_resources"]["models"]) == 3
+ assert result["registered_resources"]["models"][0]["model_id"] == "llama-3-2-3b"
+ assert result["registered_resources"]["models"][1]["model_id"] == "gpt-4o-mini"
+ assert result["registered_resources"]["models"][2]["model_id"] == "granite-embedding-125m"
diff --git a/tests/unit/server/test_sse.py b/tests/unit/server/test_sse.py
index f36c8c181..d82743c80 100644
--- a/tests/unit/server/test_sse.py
+++ b/tests/unit/server/test_sse.py
@@ -10,8 +10,8 @@ from unittest.mock import AsyncMock, MagicMock
import pytest
-from llama_stack.apis.common.responses import PaginatedResponse
from llama_stack.core.server.server import create_dynamic_typed_route, create_sse_event, sse_generator
+from llama_stack_api import PaginatedResponse
@pytest.fixture
@@ -104,12 +104,18 @@ async def test_paginated_response_url_setting():
route_handler = create_dynamic_typed_route(mock_api_method, "get", "/test/route")
- # Mock minimal request
+ # Mock minimal request with proper state object
request = MagicMock()
request.scope = {"user_attributes": {}, "principal": ""}
request.headers = {}
request.body = AsyncMock(return_value=b"")
+ # Create a simple state object without auto-generating attributes
+ class MockState:
+ pass
+
+ request.state = MockState()
+
result = await route_handler(request)
assert isinstance(result, PaginatedResponse)
diff --git a/tests/unit/tools/test_tools_json_schema.py b/tests/unit/tools/test_tools_json_schema.py
index 8fe3103bc..623955984 100644
--- a/tests/unit/tools/test_tools_json_schema.py
+++ b/tests/unit/tools/test_tools_json_schema.py
@@ -11,8 +11,8 @@ Tests the new input_schema and output_schema fields.
from pydantic import ValidationError
-from llama_stack.apis.tools import ToolDef
from llama_stack.models.llama.datatypes import BuiltinTool, ToolDefinition
+from llama_stack_api import ToolDef
class TestToolDefValidation:
diff --git a/tests/unit/utils/inference/test_inference_store.py b/tests/unit/utils/inference/test_inference_store.py
index d2de1c759..22d4ec1e5 100644
--- a/tests/unit/utils/inference/test_inference_store.py
+++ b/tests/unit/utils/inference/test_inference_store.py
@@ -8,16 +8,16 @@ import time
import pytest
-from llama_stack.apis.inference import (
+from llama_stack.core.storage.datatypes import InferenceStoreReference, SqliteSqlStoreConfig
+from llama_stack.core.storage.sqlstore.sqlstore import register_sqlstore_backends
+from llama_stack.providers.utils.inference.inference_store import InferenceStore
+from llama_stack_api import (
OpenAIAssistantMessageParam,
OpenAIChatCompletion,
OpenAIChoice,
OpenAIUserMessageParam,
Order,
)
-from llama_stack.core.storage.datatypes import InferenceStoreReference, SqliteSqlStoreConfig
-from llama_stack.providers.utils.inference.inference_store import InferenceStore
-from llama_stack.providers.utils.sqlstore.sqlstore import register_sqlstore_backends
@pytest.fixture(autouse=True)
diff --git a/tests/unit/utils/kvstore/test_sqlite_memory.py b/tests/unit/utils/kvstore/test_sqlite_memory.py
index a31377306..1aaf57b44 100644
--- a/tests/unit/utils/kvstore/test_sqlite_memory.py
+++ b/tests/unit/utils/kvstore/test_sqlite_memory.py
@@ -5,8 +5,8 @@
# the root directory of this source tree.
-from llama_stack.providers.utils.kvstore.config import SqliteKVStoreConfig
-from llama_stack.providers.utils.kvstore.sqlite.sqlite import SqliteKVStoreImpl
+from llama_stack.core.storage.kvstore.config import SqliteKVStoreConfig
+from llama_stack.core.storage.kvstore.sqlite.sqlite import SqliteKVStoreImpl
async def test_memory_kvstore_persistence_behavior():
diff --git a/tests/unit/utils/responses/test_responses_store.py b/tests/unit/utils/responses/test_responses_store.py
index 34cff3d3f..a71fb39f6 100644
--- a/tests/unit/utils/responses/test_responses_store.py
+++ b/tests/unit/utils/responses/test_responses_store.py
@@ -10,15 +10,10 @@ from uuid import uuid4
import pytest
-from llama_stack.apis.agents import Order
-from llama_stack.apis.agents.openai_responses import (
- OpenAIResponseInput,
- OpenAIResponseObject,
-)
-from llama_stack.apis.inference import OpenAIMessageParam, OpenAIUserMessageParam
from llama_stack.core.storage.datatypes import ResponsesStoreReference, SqliteSqlStoreConfig
+from llama_stack.core.storage.sqlstore.sqlstore import register_sqlstore_backends
from llama_stack.providers.utils.responses.responses_store import ResponsesStore
-from llama_stack.providers.utils.sqlstore.sqlstore import register_sqlstore_backends
+from llama_stack_api import OpenAIMessageParam, OpenAIResponseInput, OpenAIResponseObject, OpenAIUserMessageParam, Order
def build_store(db_path: str, policy: list | None = None) -> ResponsesStore:
@@ -46,7 +41,7 @@ def create_test_response_object(
def create_test_response_input(content: str, input_id: str) -> OpenAIResponseInput:
"""Helper to create a test response input."""
- from llama_stack.apis.agents.openai_responses import OpenAIResponseMessage
+ from llama_stack_api import OpenAIResponseMessage
return OpenAIResponseMessage(
id=input_id,
diff --git a/tests/unit/utils/sqlstore/test_sqlstore.py b/tests/unit/utils/sqlstore/test_sqlstore.py
index 00669b698..421e3b69d 100644
--- a/tests/unit/utils/sqlstore/test_sqlstore.py
+++ b/tests/unit/utils/sqlstore/test_sqlstore.py
@@ -9,9 +9,9 @@ from tempfile import TemporaryDirectory
import pytest
-from llama_stack.providers.utils.sqlstore.api import ColumnType
-from llama_stack.providers.utils.sqlstore.sqlalchemy_sqlstore import SqlAlchemySqlStoreImpl
-from llama_stack.providers.utils.sqlstore.sqlstore import SqliteSqlStoreConfig
+from llama_stack.core.storage.sqlstore.sqlalchemy_sqlstore import SqlAlchemySqlStoreImpl
+from llama_stack.core.storage.sqlstore.sqlstore import SqliteSqlStoreConfig
+from llama_stack_api.internal.sqlstore import ColumnDefinition, ColumnType
async def test_sqlite_sqlstore():
@@ -65,6 +65,38 @@ async def test_sqlite_sqlstore():
assert result.has_more is False
+async def test_sqlstore_upsert_support():
+ with TemporaryDirectory() as tmp_dir:
+ db_path = tmp_dir + "/upsert.db"
+ store = SqlAlchemySqlStoreImpl(SqliteSqlStoreConfig(db_path=db_path))
+
+ await store.create_table(
+ "items",
+ {
+ "id": ColumnDefinition(type=ColumnType.STRING, primary_key=True),
+ "value": ColumnType.STRING,
+ "updated_at": ColumnType.INTEGER,
+ },
+ )
+
+ await store.upsert(
+ table="items",
+ data={"id": "item_1", "value": "first", "updated_at": 1},
+ conflict_columns=["id"],
+ )
+ row = await store.fetch_one("items", {"id": "item_1"})
+ assert row == {"id": "item_1", "value": "first", "updated_at": 1}
+
+ await store.upsert(
+ table="items",
+ data={"id": "item_1", "value": "second", "updated_at": 2},
+ conflict_columns=["id"],
+ update_columns=["value", "updated_at"],
+ )
+ row = await store.fetch_one("items", {"id": "item_1"})
+ assert row == {"id": "item_1", "value": "second", "updated_at": 2}
+
+
async def test_sqlstore_pagination_basic():
"""Test basic pagination functionality at the SQL store level."""
with TemporaryDirectory() as tmp_dir:
diff --git a/tests/unit/utils/test_authorized_sqlstore.py b/tests/unit/utils/test_authorized_sqlstore.py
index d85e784a9..e9a6b511b 100644
--- a/tests/unit/utils/test_authorized_sqlstore.py
+++ b/tests/unit/utils/test_authorized_sqlstore.py
@@ -10,13 +10,13 @@ from unittest.mock import patch
from llama_stack.core.access_control.access_control import default_policy, is_action_allowed
from llama_stack.core.access_control.datatypes import Action
from llama_stack.core.datatypes import User
-from llama_stack.providers.utils.sqlstore.api import ColumnType
-from llama_stack.providers.utils.sqlstore.authorized_sqlstore import AuthorizedSqlStore, SqlRecord
-from llama_stack.providers.utils.sqlstore.sqlalchemy_sqlstore import SqlAlchemySqlStoreImpl
-from llama_stack.providers.utils.sqlstore.sqlstore import SqliteSqlStoreConfig
+from llama_stack.core.storage.sqlstore.authorized_sqlstore import AuthorizedSqlStore, SqlRecord
+from llama_stack.core.storage.sqlstore.sqlalchemy_sqlstore import SqlAlchemySqlStoreImpl
+from llama_stack.core.storage.sqlstore.sqlstore import SqliteSqlStoreConfig
+from llama_stack_api.internal.sqlstore import ColumnType
-@patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user")
+@patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user")
async def test_authorized_fetch_with_where_sql_access_control(mock_get_authenticated_user):
"""Test that fetch_all works correctly with where_sql for access control"""
with TemporaryDirectory() as tmp_dir:
@@ -78,7 +78,7 @@ async def test_authorized_fetch_with_where_sql_access_control(mock_get_authentic
assert row["title"] == "User Document"
-@patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user")
+@patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user")
async def test_sql_policy_consistency(mock_get_authenticated_user):
"""Test that SQL WHERE clause logic exactly matches is_action_allowed policy logic"""
with TemporaryDirectory() as tmp_dir:
@@ -164,7 +164,7 @@ async def test_sql_policy_consistency(mock_get_authenticated_user):
)
-@patch("llama_stack.providers.utils.sqlstore.authorized_sqlstore.get_authenticated_user")
+@patch("llama_stack.core.storage.sqlstore.authorized_sqlstore.get_authenticated_user")
async def test_authorized_store_user_attribute_capture(mock_get_authenticated_user):
"""Test that user attributes are properly captured during insert"""
with TemporaryDirectory() as tmp_dir:
diff --git a/uv.lock b/uv.lock
index ba9a862a3..8c648c362 100644
--- a/uv.lock
+++ b/uv.lock
@@ -1,5 +1,5 @@
version = 1
-revision = 3
+revision = 2
requires-python = ">=3.12"
resolution-markers = [
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'linux')",
@@ -139,6 +139,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/aa/f3/0b6ced594e51cc95d8c1fc1640d3623770d01e4969d29c0bd09945fafefa/altair-5.5.0-py3-none-any.whl", hash = "sha256:91a310b926508d560fe0148d02a194f38b824122641ef528113d029fcd129f8c", size = 731200, upload-time = "2024-11-23T23:39:56.4Z" },
]
+[[package]]
+name = "annotated-doc"
+version = "0.0.4"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/57/ba/046ceea27344560984e26a590f90bc7f4a75b06701f653222458922b558c/annotated_doc-0.0.4.tar.gz", hash = "sha256:fbcda96e87e9c92ad167c2e53839e57503ecfda18804ea28102353485033faa4", size = 7288, upload-time = "2025-11-10T22:07:42.062Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/1e/d3/26bf1008eb3d2daa8ef4cacc7f3bfdc11818d111f7e2d0201bc6e3b49d45/annotated_doc-0.0.4-py3-none-any.whl", hash = "sha256:571ac1dc6991c450b25a9c2d84a3705e2ae7a53467b5d111c24fa8baabbed320", size = 5303, upload-time = "2025-11-10T22:07:40.673Z" },
+]
+
[[package]]
name = "annotated-types"
version = "0.7.0"
@@ -1037,16 +1046,17 @@ wheels = [
[[package]]
name = "fastapi"
-version = "0.119.0"
+version = "0.121.2"
source = { registry = "https://pypi.org/simple" }
dependencies = [
+ { name = "annotated-doc" },
{ name = "pydantic" },
{ name = "starlette" },
{ name = "typing-extensions" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/0a/f9/5c5bcce82a7997cc0eb8c47b7800f862f6b56adc40486ed246e5010d443b/fastapi-0.119.0.tar.gz", hash = "sha256:451082403a2c1f0b99c6bd57c09110ed5463856804c8078d38e5a1f1035dbbb7", size = 336756, upload-time = "2025-10-11T17:13:40.53Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/fb/48/f08f264da34cf160db82c62ffb335e838b1fc16cbcc905f474c7d4c815db/fastapi-0.121.2.tar.gz", hash = "sha256:ca8e932b2b823ec1721c641e3669472c855ad9564a2854c9899d904c2848b8b9", size = 342944, upload-time = "2025-11-13T17:05:54.692Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/ce/70/584c4d7cad80f5e833715c0a29962d7c93b4d18eed522a02981a6d1b6ee5/fastapi-0.119.0-py3-none-any.whl", hash = "sha256:90a2e49ed19515320abb864df570dd766be0662c5d577688f1600170f7f73cf2", size = 107095, upload-time = "2025-10-11T17:13:39.048Z" },
+ { url = "https://files.pythonhosted.org/packages/eb/23/dfb161e91db7c92727db505dc72a384ee79681fe0603f706f9f9f52c2901/fastapi-0.121.2-py3-none-any.whl", hash = "sha256:f2d80b49a86a846b70cc3a03eb5ea6ad2939298bf6a7fe377aa9cd3dd079d358", size = 109201, upload-time = "2025-11-13T17:05:52.718Z" },
]
[[package]]
@@ -1824,6 +1834,21 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/fe/54/c86cd8e011fe98803d7e382fd67c0df5ceab8d2b7ad8c5a81524f791551c/jsonschema-4.25.0-py3-none-any.whl", hash = "sha256:24c2e8da302de79c8b9382fee3e76b355e44d2a4364bb207159ce10b517bd716", size = 89184, upload-time = "2025-07-18T15:39:42.956Z" },
]
+[[package]]
+name = "jsonschema-path"
+version = "0.3.4"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "pathable" },
+ { name = "pyyaml" },
+ { name = "referencing" },
+ { name = "requests" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/6e/45/41ebc679c2a4fced6a722f624c18d658dee42612b83ea24c1caf7c0eb3a8/jsonschema_path-0.3.4.tar.gz", hash = "sha256:8365356039f16cc65fddffafda5f58766e34bebab7d6d105616ab52bc4297001", size = 11159, upload-time = "2025-01-24T14:33:16.547Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/cb/58/3485da8cb93d2f393bce453adeef16896751f14ba3e2024bc21dc9597646/jsonschema_path-0.3.4-py3-none-any.whl", hash = "sha256:f502191fdc2b22050f9a81c9237be9d27145b9001c55842bece5e94e382e52f8", size = 14810, upload-time = "2025-01-24T14:33:14.652Z" },
+]
+
[[package]]
name = "jsonschema-specifications"
version = "2025.4.1"
@@ -1903,6 +1928,38 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/89/43/d9bebfc3db7dea6ec80df5cb2aad8d274dd18ec2edd6c4f21f32c237cbbb/kubernetes-33.1.0-py2.py3-none-any.whl", hash = "sha256:544de42b24b64287f7e0aa9513c93cb503f7f40eea39b20f66810011a86eabc5", size = 1941335, upload-time = "2025-06-09T21:57:56.327Z" },
]
+[[package]]
+name = "lazy-object-proxy"
+version = "1.12.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/08/a2/69df9c6ba6d316cfd81fe2381e464db3e6de5db45f8c43c6a23504abf8cb/lazy_object_proxy-1.12.0.tar.gz", hash = "sha256:1f5a462d92fd0cfb82f1fab28b51bfb209fabbe6aabf7f0d51472c0c124c0c61", size = 43681, upload-time = "2025-08-22T13:50:06.783Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/0d/1b/b5f5bd6bda26f1e15cd3232b223892e4498e34ec70a7f4f11c401ac969f1/lazy_object_proxy-1.12.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8ee0d6027b760a11cc18281e702c0309dd92da458a74b4c15025d7fc490deede", size = 26746, upload-time = "2025-08-22T13:42:37.572Z" },
+ { url = "https://files.pythonhosted.org/packages/55/64/314889b618075c2bfc19293ffa9153ce880ac6153aacfd0a52fcabf21a66/lazy_object_proxy-1.12.0-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:4ab2c584e3cc8be0dfca422e05ad30a9abe3555ce63e9ab7a559f62f8dbc6ff9", size = 71457, upload-time = "2025-08-22T13:42:38.743Z" },
+ { url = "https://files.pythonhosted.org/packages/11/53/857fc2827fc1e13fbdfc0ba2629a7d2579645a06192d5461809540b78913/lazy_object_proxy-1.12.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:14e348185adbd03ec17d051e169ec45686dcd840a3779c9d4c10aabe2ca6e1c0", size = 71036, upload-time = "2025-08-22T13:42:40.184Z" },
+ { url = "https://files.pythonhosted.org/packages/2b/24/e581ffed864cd33c1b445b5763d617448ebb880f48675fc9de0471a95cbc/lazy_object_proxy-1.12.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:c4fcbe74fb85df8ba7825fa05eddca764138da752904b378f0ae5ab33a36c308", size = 69329, upload-time = "2025-08-22T13:42:41.311Z" },
+ { url = "https://files.pythonhosted.org/packages/78/be/15f8f5a0b0b2e668e756a152257d26370132c97f2f1943329b08f057eff0/lazy_object_proxy-1.12.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:563d2ec8e4d4b68ee7848c5ab4d6057a6d703cb7963b342968bb8758dda33a23", size = 70690, upload-time = "2025-08-22T13:42:42.51Z" },
+ { url = "https://files.pythonhosted.org/packages/5d/aa/f02be9bbfb270e13ee608c2b28b8771f20a5f64356c6d9317b20043c6129/lazy_object_proxy-1.12.0-cp312-cp312-win_amd64.whl", hash = "sha256:53c7fd99eb156bbb82cbc5d5188891d8fdd805ba6c1e3b92b90092da2a837073", size = 26563, upload-time = "2025-08-22T13:42:43.685Z" },
+ { url = "https://files.pythonhosted.org/packages/f4/26/b74c791008841f8ad896c7f293415136c66cc27e7c7577de4ee68040c110/lazy_object_proxy-1.12.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:86fd61cb2ba249b9f436d789d1356deae69ad3231dc3c0f17293ac535162672e", size = 26745, upload-time = "2025-08-22T13:42:44.982Z" },
+ { url = "https://files.pythonhosted.org/packages/9b/52/641870d309e5d1fb1ea7d462a818ca727e43bfa431d8c34b173eb090348c/lazy_object_proxy-1.12.0-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:81d1852fb30fab81696f93db1b1e55a5d1ff7940838191062f5f56987d5fcc3e", size = 71537, upload-time = "2025-08-22T13:42:46.141Z" },
+ { url = "https://files.pythonhosted.org/packages/47/b6/919118e99d51c5e76e8bf5a27df406884921c0acf2c7b8a3b38d847ab3e9/lazy_object_proxy-1.12.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:be9045646d83f6c2664c1330904b245ae2371b5c57a3195e4028aedc9f999655", size = 71141, upload-time = "2025-08-22T13:42:47.375Z" },
+ { url = "https://files.pythonhosted.org/packages/e5/47/1d20e626567b41de085cf4d4fb3661a56c159feaa73c825917b3b4d4f806/lazy_object_proxy-1.12.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:67f07ab742f1adfb3966c40f630baaa7902be4222a17941f3d85fd1dae5565ff", size = 69449, upload-time = "2025-08-22T13:42:48.49Z" },
+ { url = "https://files.pythonhosted.org/packages/58/8d/25c20ff1a1a8426d9af2d0b6f29f6388005fc8cd10d6ee71f48bff86fdd0/lazy_object_proxy-1.12.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:75ba769017b944fcacbf6a80c18b2761a1795b03f8899acdad1f1c39db4409be", size = 70744, upload-time = "2025-08-22T13:42:49.608Z" },
+ { url = "https://files.pythonhosted.org/packages/c0/67/8ec9abe15c4f8a4bcc6e65160a2c667240d025cbb6591b879bea55625263/lazy_object_proxy-1.12.0-cp313-cp313-win_amd64.whl", hash = "sha256:7b22c2bbfb155706b928ac4d74c1a63ac8552a55ba7fff4445155523ea4067e1", size = 26568, upload-time = "2025-08-22T13:42:57.719Z" },
+ { url = "https://files.pythonhosted.org/packages/23/12/cd2235463f3469fd6c62d41d92b7f120e8134f76e52421413a0ad16d493e/lazy_object_proxy-1.12.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:4a79b909aa16bde8ae606f06e6bbc9d3219d2e57fb3e0076e17879072b742c65", size = 27391, upload-time = "2025-08-22T13:42:50.62Z" },
+ { url = "https://files.pythonhosted.org/packages/60/9e/f1c53e39bbebad2e8609c67d0830cc275f694d0ea23d78e8f6db526c12d3/lazy_object_proxy-1.12.0-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:338ab2f132276203e404951205fe80c3fd59429b3a724e7b662b2eb539bb1be9", size = 80552, upload-time = "2025-08-22T13:42:51.731Z" },
+ { url = "https://files.pythonhosted.org/packages/4c/b6/6c513693448dcb317d9d8c91d91f47addc09553613379e504435b4cc8b3e/lazy_object_proxy-1.12.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8c40b3c9faee2e32bfce0df4ae63f4e73529766893258eca78548bac801c8f66", size = 82857, upload-time = "2025-08-22T13:42:53.225Z" },
+ { url = "https://files.pythonhosted.org/packages/12/1c/d9c4aaa4c75da11eb7c22c43d7c90a53b4fca0e27784a5ab207768debea7/lazy_object_proxy-1.12.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:717484c309df78cedf48396e420fa57fc8a2b1f06ea889df7248fdd156e58847", size = 80833, upload-time = "2025-08-22T13:42:54.391Z" },
+ { url = "https://files.pythonhosted.org/packages/0b/ae/29117275aac7d7d78ae4f5a4787f36ff33262499d486ac0bf3e0b97889f6/lazy_object_proxy-1.12.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:a6b7ea5ea1ffe15059eb44bcbcb258f97bcb40e139b88152c40d07b1a1dfc9ac", size = 79516, upload-time = "2025-08-22T13:42:55.812Z" },
+ { url = "https://files.pythonhosted.org/packages/19/40/b4e48b2c38c69392ae702ae7afa7b6551e0ca5d38263198b7c79de8b3bdf/lazy_object_proxy-1.12.0-cp313-cp313t-win_amd64.whl", hash = "sha256:08c465fb5cd23527512f9bd7b4c7ba6cec33e28aad36fbbe46bf7b858f9f3f7f", size = 27656, upload-time = "2025-08-22T13:42:56.793Z" },
+ { url = "https://files.pythonhosted.org/packages/ef/3a/277857b51ae419a1574557c0b12e0d06bf327b758ba94cafc664cb1e2f66/lazy_object_proxy-1.12.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c9defba70ab943f1df98a656247966d7729da2fe9c2d5d85346464bf320820a3", size = 26582, upload-time = "2025-08-22T13:49:49.366Z" },
+ { url = "https://files.pythonhosted.org/packages/1a/b6/c5e0fa43535bb9c87880e0ba037cdb1c50e01850b0831e80eb4f4762f270/lazy_object_proxy-1.12.0-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:6763941dbf97eea6b90f5b06eb4da9418cc088fce0e3883f5816090f9afcde4a", size = 71059, upload-time = "2025-08-22T13:49:50.488Z" },
+ { url = "https://files.pythonhosted.org/packages/06/8a/7dcad19c685963c652624702f1a968ff10220b16bfcc442257038216bf55/lazy_object_proxy-1.12.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fdc70d81235fc586b9e3d1aeef7d1553259b62ecaae9db2167a5d2550dcc391a", size = 71034, upload-time = "2025-08-22T13:49:54.224Z" },
+ { url = "https://files.pythonhosted.org/packages/12/ac/34cbfb433a10e28c7fd830f91c5a348462ba748413cbb950c7f259e67aa7/lazy_object_proxy-1.12.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:0a83c6f7a6b2bfc11ef3ed67f8cbe99f8ff500b05655d8e7df9aab993a6abc95", size = 69529, upload-time = "2025-08-22T13:49:55.29Z" },
+ { url = "https://files.pythonhosted.org/packages/6f/6a/11ad7e349307c3ca4c0175db7a77d60ce42a41c60bcb11800aabd6a8acb8/lazy_object_proxy-1.12.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:256262384ebd2a77b023ad02fbcc9326282bcfd16484d5531154b02bc304f4c5", size = 70391, upload-time = "2025-08-22T13:49:56.35Z" },
+ { url = "https://files.pythonhosted.org/packages/59/97/9b410ed8fbc6e79c1ee8b13f8777a80137d4bc189caf2c6202358e66192c/lazy_object_proxy-1.12.0-cp314-cp314-win_amd64.whl", hash = "sha256:7601ec171c7e8584f8ff3f4e440aa2eebf93e854f04639263875b8c2971f819f", size = 26988, upload-time = "2025-08-22T13:49:57.302Z" },
+]
+
[[package]]
name = "linkify"
version = "1.4"
@@ -1945,6 +2002,7 @@ dependencies = [
{ name = "httpx" },
{ name = "jinja2" },
{ name = "jsonschema" },
+ { name = "llama-stack-api" },
{ name = "openai" },
{ name = "opentelemetry-exporter-otlp-proto-http" },
{ name = "opentelemetry-sdk" },
@@ -1981,6 +2039,7 @@ dev = [
{ name = "black" },
{ name = "mypy" },
{ name = "nbval" },
+ { name = "openapi-spec-validator" },
{ name = "pre-commit" },
{ name = "pytest" },
{ name = "pytest-asyncio" },
@@ -2094,6 +2153,7 @@ requires-dist = [
{ name = "httpx" },
{ name = "jinja2", specifier = ">=3.1.6" },
{ name = "jsonschema" },
+ { name = "llama-stack-api", editable = "src/llama_stack_api" },
{ name = "llama-stack-client", marker = "extra == 'client'", specifier = ">=0.3.0" },
{ name = "openai", specifier = ">=2.5.0" },
{ name = "opentelemetry-exporter-otlp-proto-http", specifier = ">=1.30.0" },
@@ -2107,7 +2167,7 @@ requires-dist = [
{ name = "pyyaml", specifier = ">=6.0" },
{ name = "rich" },
{ name = "sqlalchemy", extras = ["asyncio"], specifier = ">=2.0.41" },
- { name = "starlette" },
+ { name = "starlette", specifier = ">=0.49.1" },
{ name = "termcolor" },
{ name = "tiktoken" },
{ name = "uvicorn", specifier = ">=0.34.0" },
@@ -2125,7 +2185,8 @@ dev = [
{ name = "black" },
{ name = "mypy" },
{ name = "nbval" },
- { name = "pre-commit" },
+ { name = "openapi-spec-validator", specifier = ">=0.7.2" },
+ { name = "pre-commit", specifier = ">=4.4.0" },
{ name = "pytest", specifier = ">=8.4" },
{ name = "pytest-asyncio", specifier = ">=1.0" },
{ name = "pytest-cov" },
@@ -2166,7 +2227,7 @@ test = [
{ name = "milvus-lite", specifier = ">=2.5.0" },
{ name = "psycopg2-binary", specifier = ">=2.9.0" },
{ name = "pymilvus", specifier = ">=2.6.1" },
- { name = "pypdf" },
+ { name = "pypdf", specifier = ">=6.1.3" },
{ name = "qdrant-client" },
{ name = "requests" },
{ name = "sqlalchemy" },
@@ -2219,13 +2280,32 @@ unit = [
{ name = "moto", extras = ["s3"], specifier = ">=5.1.10" },
{ name = "ollama" },
{ name = "psycopg2-binary", specifier = ">=2.9.0" },
- { name = "pypdf" },
+ { name = "pypdf", specifier = ">=6.1.3" },
{ name = "sqlalchemy" },
{ name = "sqlalchemy", extras = ["asyncio"], specifier = ">=2.0.41" },
{ name = "sqlite-vec" },
{ name = "together" },
]
+[[package]]
+name = "llama-stack-api"
+version = "0.4.0.dev0"
+source = { editable = "src/llama_stack_api" }
+dependencies = [
+ { name = "jsonschema" },
+ { name = "opentelemetry-exporter-otlp-proto-http" },
+ { name = "opentelemetry-sdk" },
+ { name = "pydantic" },
+]
+
+[package.metadata]
+requires-dist = [
+ { name = "jsonschema" },
+ { name = "opentelemetry-exporter-otlp-proto-http", specifier = ">=1.30.0" },
+ { name = "opentelemetry-sdk", specifier = ">=1.30.0" },
+ { name = "pydantic", specifier = ">=2.11.9" },
+]
+
[[package]]
name = "llama-stack-client"
version = "0.3.0"
@@ -2979,6 +3059,35 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/14/f3/ebbd700d8dc1e6380a7a382969d96bc0cbea8717b52fb38ff0ca2a7653e8/openai-2.5.0-py3-none-any.whl", hash = "sha256:21380e5f52a71666dbadbf322dd518bdf2b9d11ed0bb3f96bea17310302d6280", size = 999851, upload-time = "2025-10-17T18:14:45.528Z" },
]
+[[package]]
+name = "openapi-schema-validator"
+version = "0.6.3"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "jsonschema" },
+ { name = "jsonschema-specifications" },
+ { name = "rfc3339-validator" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/8b/f3/5507ad3325169347cd8ced61c232ff3df70e2b250c49f0fe140edb4973c6/openapi_schema_validator-0.6.3.tar.gz", hash = "sha256:f37bace4fc2a5d96692f4f8b31dc0f8d7400fd04f3a937798eaf880d425de6ee", size = 11550, upload-time = "2025-01-10T18:08:22.268Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/21/c6/ad0fba32775ae749016829dace42ed80f4407b171da41313d1a3a5f102e4/openapi_schema_validator-0.6.3-py3-none-any.whl", hash = "sha256:f3b9870f4e556b5a62a1c39da72a6b4b16f3ad9c73dc80084b1b11e74ba148a3", size = 8755, upload-time = "2025-01-10T18:08:19.758Z" },
+]
+
+[[package]]
+name = "openapi-spec-validator"
+version = "0.7.2"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "jsonschema" },
+ { name = "jsonschema-path" },
+ { name = "lazy-object-proxy" },
+ { name = "openapi-schema-validator" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/82/af/fe2d7618d6eae6fb3a82766a44ed87cd8d6d82b4564ed1c7cfb0f6378e91/openapi_spec_validator-0.7.2.tar.gz", hash = "sha256:cc029309b5c5dbc7859df0372d55e9d1ff43e96d678b9ba087f7c56fc586f734", size = 36855, upload-time = "2025-06-07T14:48:56.299Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/27/dd/b3fd642260cb17532f66cc1e8250f3507d1e580483e209dc1e9d13bd980d/openapi_spec_validator-0.7.2-py3-none-any.whl", hash = "sha256:4bbdc0894ec85f1d1bea1d6d9c8b2c3c8d7ccaa13577ef40da9c006c9fd0eb60", size = 39713, upload-time = "2025-06-07T14:48:54.077Z" },
+]
+
[[package]]
name = "opentelemetry-api"
version = "1.36.0"
@@ -3215,6 +3324,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/c6/ac/dac4a63f978e4dcb3c6d3a78c4d8e0192a113d288502a1216950c41b1027/parso-0.8.4-py2.py3-none-any.whl", hash = "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18", size = 103650, upload-time = "2024-04-05T09:43:53.299Z" },
]
+[[package]]
+name = "pathable"
+version = "0.4.4"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/67/93/8f2c2075b180c12c1e9f6a09d1a985bc2036906b13dff1d8917e395f2048/pathable-0.4.4.tar.gz", hash = "sha256:6905a3cd17804edfac7875b5f6c9142a218c7caef78693c2dbbbfbac186d88b2", size = 8124, upload-time = "2025-01-10T18:43:13.247Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/7d/eb/b6260b31b1a96386c0a880edebe26f89669098acea8e0318bff6adb378fd/pathable-0.4.4-py3-none-any.whl", hash = "sha256:5ae9e94793b6ef5a4cbe0a7ce9dbbefc1eec38df253763fd0aeeacf2762dbbc2", size = 9592, upload-time = "2025-01-10T18:43:11.88Z" },
+]
+
[[package]]
name = "pathspec"
version = "0.12.1"
@@ -3403,7 +3521,7 @@ wheels = [
[[package]]
name = "pre-commit"
-version = "4.2.0"
+version = "4.4.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "cfgv" },
@@ -3412,9 +3530,9 @@ dependencies = [
{ name = "pyyaml" },
{ name = "virtualenv" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/08/39/679ca9b26c7bb2999ff122d50faa301e49af82ca9c066ec061cfbc0c6784/pre_commit-4.2.0.tar.gz", hash = "sha256:601283b9757afd87d40c4c4a9b2b5de9637a8ea02eaff7adc2d0fb4e04841146", size = 193424, upload-time = "2025-03-18T21:35:20.987Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/a6/49/7845c2d7bf6474efd8e27905b51b11e6ce411708c91e829b93f324de9929/pre_commit-4.4.0.tar.gz", hash = "sha256:f0233ebab440e9f17cabbb558706eb173d19ace965c68cdce2c081042b4fab15", size = 197501, upload-time = "2025-11-08T21:12:11.607Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/88/74/a88bf1b1efeae488a0c0b7bdf71429c313722d1fc0f377537fbe554e6180/pre_commit-4.2.0-py2.py3-none-any.whl", hash = "sha256:a009ca7205f1eb497d10b845e52c838a98b6cdd2102a6c8e4540e94ee75c58bd", size = 220707, upload-time = "2025-03-18T21:35:19.343Z" },
+ { url = "https://files.pythonhosted.org/packages/27/11/574fe7d13acf30bfd0a8dd7fa1647040f2b8064f13f43e8c963b1e65093b/pre_commit-4.4.0-py2.py3-none-any.whl", hash = "sha256:b35ea52957cbf83dcc5d8ee636cbead8624e3a15fbfa61a370e42158ac8a5813", size = 226049, upload-time = "2025-11-08T21:12:10.228Z" },
]
[[package]]
@@ -3973,11 +4091,11 @@ wheels = [
[[package]]
name = "pypdf"
-version = "5.9.0"
+version = "6.2.0"
source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/89/3a/584b97a228950ed85aec97c811c68473d9b8d149e6a8c155668287cf1a28/pypdf-5.9.0.tar.gz", hash = "sha256:30f67a614d558e495e1fbb157ba58c1de91ffc1718f5e0dfeb82a029233890a1", size = 5035118, upload-time = "2025-07-27T14:04:52.364Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/4e/2b/8795ec0378384000b0a37a2b5e6d67fa3d84802945aa2c612a78a784d7d4/pypdf-6.2.0.tar.gz", hash = "sha256:46b4d8495d68ae9c818e7964853cd9984e6a04c19fe7112760195395992dce48", size = 5272001, upload-time = "2025-11-09T11:10:41.911Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/48/d9/6cff57c80a6963e7dd183bf09e9f21604a77716644b1e580e97b259f7612/pypdf-5.9.0-py3-none-any.whl", hash = "sha256:be10a4c54202f46d9daceaa8788be07aa8cd5ea8c25c529c50dd509206382c35", size = 313193, upload-time = "2025-07-27T14:04:50.53Z" },
+ { url = "https://files.pythonhosted.org/packages/de/ba/743ddcaf1a8fb439342399645921e2cf2c600464cba5531a11f1cc0822b6/pypdf-6.2.0-py3-none-any.whl", hash = "sha256:4c0f3e62677217a777ab79abe22bf1285442d70efabf552f61c7a03b6f5c569f", size = 326592, upload-time = "2025-11-09T11:10:39.941Z" },
]
[[package]]
@@ -4372,6 +4490,18 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/1c/4c/cc276ce57e572c102d9542d383b2cfd551276581dc60004cb94fe8774c11/responses-0.25.8-py3-none-any.whl", hash = "sha256:0c710af92def29c8352ceadff0c3fe340ace27cf5af1bbe46fb71275bcd2831c", size = 34769, upload-time = "2025-08-08T19:01:45.018Z" },
]
+[[package]]
+name = "rfc3339-validator"
+version = "0.1.4"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "six" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/28/ea/a9387748e2d111c3c2b275ba970b735e04e15cdb1eb30693b6b5708c4dbd/rfc3339_validator-0.1.4.tar.gz", hash = "sha256:138a2abdf93304ad60530167e51d2dfb9549521a836871b88d7f4695d0022f6b", size = 5513, upload-time = "2021-05-12T16:37:54.178Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/7b/44/4e421b96b67b2daff264473f7465db72fbdf36a07e05494f50300cc7b0c6/rfc3339_validator-0.1.4-py2.py3-none-any.whl", hash = "sha256:24f6ec1eda14ef823da9e36ec7113124b39c04d50a4d3d3a3c2859577e7791fa", size = 3490, upload-time = "2021-05-12T16:37:52.536Z" },
+]
+
[[package]]
name = "rich"
version = "14.1.0"
@@ -4484,40 +4614,48 @@ wheels = [
[[package]]
name = "ruamel-yaml"
-version = "0.18.14"
+version = "0.18.16"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "ruamel-yaml-clib", marker = "python_full_version < '3.14' and platform_python_implementation == 'CPython'" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/39/87/6da0df742a4684263261c253f00edd5829e6aca970fff69e75028cccc547/ruamel.yaml-0.18.14.tar.gz", hash = "sha256:7227b76aaec364df15936730efbf7d72b30c0b79b1d578bbb8e3dcb2d81f52b7", size = 145511, upload-time = "2025-06-09T08:51:09.828Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/9f/c7/ee630b29e04a672ecfc9b63227c87fd7a37eb67c1bf30fe95376437f897c/ruamel.yaml-0.18.16.tar.gz", hash = "sha256:a6e587512f3c998b2225d68aa1f35111c29fad14aed561a26e73fab729ec5e5a", size = 147269, upload-time = "2025-10-22T17:54:02.346Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/af/6d/6fe4805235e193aad4aaf979160dd1f3c487c57d48b810c816e6e842171b/ruamel.yaml-0.18.14-py3-none-any.whl", hash = "sha256:710ff198bb53da66718c7db27eec4fbcc9aa6ca7204e4c1df2f282b6fe5eb6b2", size = 118570, upload-time = "2025-06-09T08:51:06.348Z" },
+ { url = "https://files.pythonhosted.org/packages/0f/73/bb1bc2529f852e7bf64a2dec885e89ff9f5cc7bbf6c9340eed30ff2c69c5/ruamel.yaml-0.18.16-py3-none-any.whl", hash = "sha256:048f26d64245bae57a4f9ef6feb5b552a386830ef7a826f235ffb804c59efbba", size = 119858, upload-time = "2025-10-22T17:53:59.012Z" },
]
[[package]]
name = "ruamel-yaml-clib"
-version = "0.2.12"
+version = "0.2.14"
source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/20/84/80203abff8ea4993a87d823a5f632e4d92831ef75d404c9fc78d0176d2b5/ruamel.yaml.clib-0.2.12.tar.gz", hash = "sha256:6c8fbb13ec503f99a91901ab46e0b07ae7941cd527393187039aec586fdfd36f", size = 225315, upload-time = "2024-10-20T10:10:56.22Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/d8/e9/39ec4d4b3f91188fad1842748f67d4e749c77c37e353c4e545052ee8e893/ruamel.yaml.clib-0.2.14.tar.gz", hash = "sha256:803f5044b13602d58ea378576dd75aa759f52116a0232608e8fdada4da33752e", size = 225394, upload-time = "2025-09-22T19:51:23.753Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/48/41/e7a405afbdc26af961678474a55373e1b323605a4f5e2ddd4a80ea80f628/ruamel.yaml.clib-0.2.12-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:20b0f8dc160ba83b6dcc0e256846e1a02d044e13f7ea74a3d1d56ede4e48c632", size = 133433, upload-time = "2024-10-20T10:12:55.657Z" },
- { url = "https://files.pythonhosted.org/packages/ec/b0/b850385604334c2ce90e3ee1013bd911aedf058a934905863a6ea95e9eb4/ruamel.yaml.clib-0.2.12-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:943f32bc9dedb3abff9879edc134901df92cfce2c3d5c9348f172f62eb2d771d", size = 647362, upload-time = "2024-10-20T10:12:57.155Z" },
- { url = "https://files.pythonhosted.org/packages/44/d0/3f68a86e006448fb6c005aee66565b9eb89014a70c491d70c08de597f8e4/ruamel.yaml.clib-0.2.12-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95c3829bb364fdb8e0332c9931ecf57d9be3519241323c5274bd82f709cebc0c", size = 754118, upload-time = "2024-10-20T10:12:58.501Z" },
- { url = "https://files.pythonhosted.org/packages/52/a9/d39f3c5ada0a3bb2870d7db41901125dbe2434fa4f12ca8c5b83a42d7c53/ruamel.yaml.clib-0.2.12-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:749c16fcc4a2b09f28843cda5a193e0283e47454b63ec4b81eaa2242f50e4ccd", size = 706497, upload-time = "2024-10-20T10:13:00.211Z" },
- { url = "https://files.pythonhosted.org/packages/b0/fa/097e38135dadd9ac25aecf2a54be17ddf6e4c23e43d538492a90ab3d71c6/ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bf165fef1f223beae7333275156ab2022cffe255dcc51c27f066b4370da81e31", size = 698042, upload-time = "2024-10-21T11:26:46.038Z" },
- { url = "https://files.pythonhosted.org/packages/ec/d5/a659ca6f503b9379b930f13bc6b130c9f176469b73b9834296822a83a132/ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:32621c177bbf782ca5a18ba4d7af0f1082a3f6e517ac2a18b3974d4edf349680", size = 745831, upload-time = "2024-10-21T11:26:47.487Z" },
- { url = "https://files.pythonhosted.org/packages/db/5d/36619b61ffa2429eeaefaab4f3374666adf36ad8ac6330d855848d7d36fd/ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b82a7c94a498853aa0b272fd5bc67f29008da798d4f93a2f9f289feb8426a58d", size = 715692, upload-time = "2024-12-11T19:58:17.252Z" },
- { url = "https://files.pythonhosted.org/packages/b1/82/85cb92f15a4231c89b95dfe08b09eb6adca929ef7df7e17ab59902b6f589/ruamel.yaml.clib-0.2.12-cp312-cp312-win32.whl", hash = "sha256:e8c4ebfcfd57177b572e2040777b8abc537cdef58a2120e830124946aa9b42c5", size = 98777, upload-time = "2024-10-20T10:13:01.395Z" },
- { url = "https://files.pythonhosted.org/packages/d7/8f/c3654f6f1ddb75daf3922c3d8fc6005b1ab56671ad56ffb874d908bfa668/ruamel.yaml.clib-0.2.12-cp312-cp312-win_amd64.whl", hash = "sha256:0467c5965282c62203273b838ae77c0d29d7638c8a4e3a1c8bdd3602c10904e4", size = 115523, upload-time = "2024-10-20T10:13:02.768Z" },
- { url = "https://files.pythonhosted.org/packages/29/00/4864119668d71a5fa45678f380b5923ff410701565821925c69780356ffa/ruamel.yaml.clib-0.2.12-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:4c8c5d82f50bb53986a5e02d1b3092b03622c02c2eb78e29bec33fd9593bae1a", size = 132011, upload-time = "2024-10-20T10:13:04.377Z" },
- { url = "https://files.pythonhosted.org/packages/7f/5e/212f473a93ae78c669ffa0cb051e3fee1139cb2d385d2ae1653d64281507/ruamel.yaml.clib-0.2.12-cp313-cp313-manylinux2014_aarch64.whl", hash = "sha256:e7e3736715fbf53e9be2a79eb4db68e4ed857017344d697e8b9749444ae57475", size = 642488, upload-time = "2024-10-20T10:13:05.906Z" },
- { url = "https://files.pythonhosted.org/packages/1f/8f/ecfbe2123ade605c49ef769788f79c38ddb1c8fa81e01f4dbf5cf1a44b16/ruamel.yaml.clib-0.2.12-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b7e75b4965e1d4690e93021adfcecccbca7d61c7bddd8e22406ef2ff20d74ef", size = 745066, upload-time = "2024-10-20T10:13:07.26Z" },
- { url = "https://files.pythonhosted.org/packages/e2/a9/28f60726d29dfc01b8decdb385de4ced2ced9faeb37a847bd5cf26836815/ruamel.yaml.clib-0.2.12-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:96777d473c05ee3e5e3c3e999f5d23c6f4ec5b0c38c098b3a5229085f74236c6", size = 701785, upload-time = "2024-10-20T10:13:08.504Z" },
- { url = "https://files.pythonhosted.org/packages/84/7e/8e7ec45920daa7f76046578e4f677a3215fe8f18ee30a9cb7627a19d9b4c/ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:3bc2a80e6420ca8b7d3590791e2dfc709c88ab9152c00eeb511c9875ce5778bf", size = 693017, upload-time = "2024-10-21T11:26:48.866Z" },
- { url = "https://files.pythonhosted.org/packages/c5/b3/d650eaade4ca225f02a648321e1ab835b9d361c60d51150bac49063b83fa/ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:e188d2699864c11c36cdfdada94d781fd5d6b0071cd9c427bceb08ad3d7c70e1", size = 741270, upload-time = "2024-10-21T11:26:50.213Z" },
- { url = "https://files.pythonhosted.org/packages/87/b8/01c29b924dcbbed75cc45b30c30d565d763b9c4d540545a0eeecffb8f09c/ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4f6f3eac23941b32afccc23081e1f50612bdbe4e982012ef4f5797986828cd01", size = 709059, upload-time = "2024-12-11T19:58:18.846Z" },
- { url = "https://files.pythonhosted.org/packages/30/8c/ed73f047a73638257aa9377ad356bea4d96125b305c34a28766f4445cc0f/ruamel.yaml.clib-0.2.12-cp313-cp313-win32.whl", hash = "sha256:6442cb36270b3afb1b4951f060eccca1ce49f3d087ca1ca4563a6eb479cb3de6", size = 98583, upload-time = "2024-10-20T10:13:09.658Z" },
- { url = "https://files.pythonhosted.org/packages/b0/85/e8e751d8791564dd333d5d9a4eab0a7a115f7e349595417fd50ecae3395c/ruamel.yaml.clib-0.2.12-cp313-cp313-win_amd64.whl", hash = "sha256:e5b8daf27af0b90da7bb903a876477a9e6d7270be6146906b276605997c7e9a3", size = 115190, upload-time = "2024-10-20T10:13:10.66Z" },
+ { url = "https://files.pythonhosted.org/packages/b4/42/ccfb34a25289afbbc42017e4d3d4288e61d35b2e00cfc6b92974a6a1f94b/ruamel.yaml.clib-0.2.14-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:6aeadc170090ff1889f0d2c3057557f9cd71f975f17535c26a5d37af98f19c27", size = 271775, upload-time = "2025-09-23T14:24:12.771Z" },
+ { url = "https://files.pythonhosted.org/packages/82/73/e628a92e80197ff6a79ab81ec3fa00d4cc082d58ab78d3337b7ba7043301/ruamel.yaml.clib-0.2.14-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:5e56ac47260c0eed992789fa0b8efe43404a9adb608608631a948cee4fc2b052", size = 138842, upload-time = "2025-09-22T19:50:49.156Z" },
+ { url = "https://files.pythonhosted.org/packages/2b/c5/346c7094344a60419764b4b1334d9e0285031c961176ff88ffb652405b0c/ruamel.yaml.clib-0.2.14-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:a911aa73588d9a8b08d662b9484bc0567949529824a55d3885b77e8dd62a127a", size = 647404, upload-time = "2025-09-22T19:50:52.921Z" },
+ { url = "https://files.pythonhosted.org/packages/df/99/65080c863eb06d4498de3d6c86f3e90595e02e159fd8529f1565f56cfe2c/ruamel.yaml.clib-0.2.14-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a05ba88adf3d7189a974b2de7a9d56731548d35dc0a822ec3dc669caa7019b29", size = 753141, upload-time = "2025-09-22T19:50:50.294Z" },
+ { url = "https://files.pythonhosted.org/packages/3d/e3/0de85f3e3333f8e29e4b10244374a202a87665d1131798946ee22cf05c7c/ruamel.yaml.clib-0.2.14-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fb04c5650de6668b853623eceadcdb1a9f2fee381f5d7b6bc842ee7c239eeec4", size = 703477, upload-time = "2025-09-22T19:50:51.508Z" },
+ { url = "https://files.pythonhosted.org/packages/d9/25/0d2f09d8833c7fd77ab8efeff213093c16856479a9d293180a0d89f6bed9/ruamel.yaml.clib-0.2.14-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:df3ec9959241d07bc261f4983d25a1205ff37703faf42b474f15d54d88b4f8c9", size = 741157, upload-time = "2025-09-23T18:42:50.408Z" },
+ { url = "https://files.pythonhosted.org/packages/d3/8c/959f10c2e2153cbdab834c46e6954b6dd9e3b109c8f8c0a3cf1618310985/ruamel.yaml.clib-0.2.14-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:fbc08c02e9b147a11dfcaa1ac8a83168b699863493e183f7c0c8b12850b7d259", size = 745859, upload-time = "2025-09-22T19:50:54.497Z" },
+ { url = "https://files.pythonhosted.org/packages/ed/6b/e580a7c18b485e1a5f30a32cda96b20364b0ba649d9d2baaf72f8bd21f83/ruamel.yaml.clib-0.2.14-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c099cafc1834d3c5dac305865d04235f7c21c167c8dd31ebc3d6bbc357e2f023", size = 770200, upload-time = "2025-09-22T19:50:55.718Z" },
+ { url = "https://files.pythonhosted.org/packages/ef/44/3455eebc761dc8e8fdced90f2b0a3fa61e32ba38b50de4130e2d57db0f21/ruamel.yaml.clib-0.2.14-cp312-cp312-win32.whl", hash = "sha256:b5b0f7e294700b615a3bcf6d28b26e6da94e8eba63b079f4ec92e9ba6c0d6b54", size = 98829, upload-time = "2025-09-22T19:50:58.895Z" },
+ { url = "https://files.pythonhosted.org/packages/76/ab/5121f7f3b651db93de546f8c982c241397aad0a4765d793aca1dac5eadee/ruamel.yaml.clib-0.2.14-cp312-cp312-win_amd64.whl", hash = "sha256:a37f40a859b503304dd740686359fcf541d6fb3ff7fc10f539af7f7150917c68", size = 115570, upload-time = "2025-09-22T19:50:57.981Z" },
+ { url = "https://files.pythonhosted.org/packages/d7/ae/e3811f05415594025e96000349d3400978adaed88d8f98d494352d9761ee/ruamel.yaml.clib-0.2.14-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7e4f9da7e7549946e02a6122dcad00b7c1168513acb1f8a726b1aaf504a99d32", size = 269205, upload-time = "2025-09-23T14:24:15.06Z" },
+ { url = "https://files.pythonhosted.org/packages/72/06/7d51f4688d6d72bb72fa74254e1593c4f5ebd0036be5b41fe39315b275e9/ruamel.yaml.clib-0.2.14-cp313-cp313-macosx_15_0_arm64.whl", hash = "sha256:dd7546c851e59c06197a7c651335755e74aa383a835878ca86d2c650c07a2f85", size = 137417, upload-time = "2025-09-22T19:50:59.82Z" },
+ { url = "https://files.pythonhosted.org/packages/5a/08/b4499234a420ef42960eeb05585df5cc7eb25ccb8c980490b079e6367050/ruamel.yaml.clib-0.2.14-cp313-cp313-manylinux2014_aarch64.whl", hash = "sha256:1c1acc3a0209ea9042cc3cfc0790edd2eddd431a2ec3f8283d081e4d5018571e", size = 642558, upload-time = "2025-09-22T19:51:03.388Z" },
+ { url = "https://files.pythonhosted.org/packages/b6/ba/1975a27dedf1c4c33306ee67c948121be8710b19387aada29e2f139c43ee/ruamel.yaml.clib-0.2.14-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2070bf0ad1540d5c77a664de07ebcc45eebd1ddcab71a7a06f26936920692beb", size = 744087, upload-time = "2025-09-22T19:51:00.897Z" },
+ { url = "https://files.pythonhosted.org/packages/20/15/8a19a13d27f3bd09fa18813add8380a29115a47b553845f08802959acbce/ruamel.yaml.clib-0.2.14-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bd8fe07f49c170e09d76773fb86ad9135e0beee44f36e1576a201b0676d3d1d", size = 699709, upload-time = "2025-09-22T19:51:02.075Z" },
+ { url = "https://files.pythonhosted.org/packages/19/ee/8d6146a079ad21e534b5083c9ee4a4c8bec42f79cf87594b60978286b39a/ruamel.yaml.clib-0.2.14-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ff86876889ea478b1381089e55cf9e345707b312beda4986f823e1d95e8c0f59", size = 708926, upload-time = "2025-09-23T18:42:51.707Z" },
+ { url = "https://files.pythonhosted.org/packages/a9/f5/426b714abdc222392e68f3b8ad323930d05a214a27c7e7a0f06c69126401/ruamel.yaml.clib-0.2.14-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:1f118b707eece8cf84ecbc3e3ec94d9db879d85ed608f95870d39b2d2efa5dca", size = 740202, upload-time = "2025-09-22T19:51:04.673Z" },
+ { url = "https://files.pythonhosted.org/packages/3d/ac/3c5c2b27a183f4fda8a57c82211721c016bcb689a4a175865f7646db9f94/ruamel.yaml.clib-0.2.14-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b30110b29484adc597df6bd92a37b90e63a8c152ca8136aad100a02f8ba6d1b6", size = 765196, upload-time = "2025-09-22T19:51:05.916Z" },
+ { url = "https://files.pythonhosted.org/packages/92/2e/06f56a71fd55021c993ed6e848c9b2e5e9cfce180a42179f0ddd28253f7c/ruamel.yaml.clib-0.2.14-cp313-cp313-win32.whl", hash = "sha256:f4e97a1cf0b7a30af9e1d9dad10a5671157b9acee790d9e26996391f49b965a2", size = 98635, upload-time = "2025-09-22T19:51:08.183Z" },
+ { url = "https://files.pythonhosted.org/packages/51/79/76aba16a1689b50528224b182f71097ece338e7a4ab55e84c2e73443b78a/ruamel.yaml.clib-0.2.14-cp313-cp313-win_amd64.whl", hash = "sha256:090782b5fb9d98df96509eecdbcaffd037d47389a89492320280d52f91330d78", size = 115238, upload-time = "2025-09-22T19:51:07.081Z" },
+ { url = "https://files.pythonhosted.org/packages/21/e2/a59ff65c26aaf21a24eb38df777cb9af5d87ba8fc8107c163c2da9d1e85e/ruamel.yaml.clib-0.2.14-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:7df6f6e9d0e33c7b1d435defb185095386c469109de723d514142632a7b9d07f", size = 271441, upload-time = "2025-09-23T14:24:16.498Z" },
+ { url = "https://files.pythonhosted.org/packages/6b/fa/3234f913fe9a6525a7b97c6dad1f51e72b917e6872e051a5e2ffd8b16fbb/ruamel.yaml.clib-0.2.14-cp314-cp314-macosx_15_0_arm64.whl", hash = "sha256:70eda7703b8126f5e52fcf276e6c0f40b0d314674f896fc58c47b0aef2b9ae83", size = 137970, upload-time = "2025-09-22T19:51:09.472Z" },
+ { url = "https://files.pythonhosted.org/packages/ef/ec/4edbf17ac2c87fa0845dd366ef8d5852b96eb58fcd65fc1ecf5fe27b4641/ruamel.yaml.clib-0.2.14-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:a0cb71ccc6ef9ce36eecb6272c81afdc2f565950cdcec33ae8e6cd8f7fc86f27", size = 739639, upload-time = "2025-09-22T19:51:10.566Z" },
+ { url = "https://files.pythonhosted.org/packages/15/18/b0e1fafe59051de9e79cdd431863b03593ecfa8341c110affad7c8121efc/ruamel.yaml.clib-0.2.14-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:e7cb9ad1d525d40f7d87b6df7c0ff916a66bc52cb61b66ac1b2a16d0c1b07640", size = 764456, upload-time = "2025-09-22T19:51:11.736Z" },
+ { url = "https://files.pythonhosted.org/packages/e7/cd/150fdb96b8fab27fe08d8a59fe67554568727981806e6bc2677a16081ec7/ruamel_yaml_clib-0.2.14-cp314-cp314-win32.whl", hash = "sha256:9b4104bf43ca0cd4e6f738cb86326a3b2f6eef00f417bd1e7efb7bdffe74c539", size = 102394, upload-time = "2025-11-14T21:57:36.703Z" },
+ { url = "https://files.pythonhosted.org/packages/bd/e6/a3fa40084558c7e1dc9546385f22a93949c890a8b2e445b2ba43935f51da/ruamel_yaml_clib-0.2.14-cp314-cp314-win_amd64.whl", hash = "sha256:13997d7d354a9890ea1ec5937a219817464e5cc344805b37671562a401ca3008", size = 122673, upload-time = "2025-11-14T21:57:38.177Z" },
]
[[package]]
@@ -5039,15 +5177,15 @@ wheels = [
[[package]]
name = "starlette"
-version = "0.47.2"
+version = "0.49.3"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "anyio" },
{ name = "typing-extensions", marker = "python_full_version < '3.13'" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/04/57/d062573f391d062710d4088fa1369428c38d51460ab6fedff920efef932e/starlette-0.47.2.tar.gz", hash = "sha256:6ae9aa5db235e4846decc1e7b79c4f346adf41e9777aebeb49dfd09bbd7023d8", size = 2583948, upload-time = "2025-07-20T17:31:58.522Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/de/1a/608df0b10b53b0beb96a37854ee05864d182ddd4b1156a22f1ad3860425a/starlette-0.49.3.tar.gz", hash = "sha256:1c14546f299b5901a1ea0e34410575bc33bbd741377a10484a54445588d00284", size = 2655031, upload-time = "2025-11-01T15:12:26.13Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/f7/1f/b876b1f83aef204198a42dc101613fefccb32258e5428b5f9259677864b4/starlette-0.47.2-py3-none-any.whl", hash = "sha256:c5847e96134e5c5371ee9fac6fdf1a67336d5815e09eb2a01fdb57a351ef915b", size = 72984, upload-time = "2025-07-20T17:31:56.738Z" },
+ { url = "https://files.pythonhosted.org/packages/a3/e0/021c772d6a662f43b63044ab481dc6ac7592447605b5b35a957785363122/starlette-0.49.3-py3-none-any.whl", hash = "sha256:b579b99715fdc2980cf88c8ec96d3bf1ce16f5a8051a7c2b84ef9b1cdecaea2f", size = 74340, upload-time = "2025-11-01T15:12:24.387Z" },
]
[[package]]