diff --git a/.github/workflows/conformance.yml b/.github/workflows/conformance.yml index 5eddb193f..2dd62a9c4 100644 --- a/.github/workflows/conformance.yml +++ b/.github/workflows/conformance.yml @@ -1,6 +1,11 @@ # API Conformance Tests # This workflow ensures that API changes maintain backward compatibility and don't break existing integrations # It runs schema validation and OpenAPI diff checks to catch breaking changes early +# +# The workflow handles both monolithic and split API specifications: +# - If split specs exist (stable/experimental/deprecated), they are stitched together for comparison +# - If only monolithic spec exists, it is used directly +# This allows for clean API organization while maintaining robust conformance testing name: API Conformance Tests @@ -11,11 +16,14 @@ on: branches: [ main ] pull_request: branches: [ main ] - types: [opened, synchronize, reopened] + types: [opened, synchronize, reopened, edited] paths: - - 'docs/static/llama-stack-spec.yaml' - - 'docs/static/llama-stack-spec.html' - - '.github/workflows/conformance.yml' # This workflow itself + - 'docs/static/llama-stack-spec.yaml' # Legacy monolithic spec + - 'docs/static/stable-llama-stack-spec.yaml' # Stable APIs spec + - 'docs/static/experimental-llama-stack-spec.yaml' # Experimental APIs spec + - 'docs/static/deprecated-llama-stack-spec.yaml' # Deprecated APIs spec + - 'docs/static/llama-stack-spec.html' # Legacy HTML spec + - '.github/workflows/conformance.yml' # This workflow itself concurrency: group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/main' && github.run_id || github.ref }} @@ -27,14 +35,31 @@ jobs: check-schema-compatibility: runs-on: ubuntu-latest steps: - # Using specific version 4.1.7 because 5.0.0 fails when trying to run this locally using `act` - # This ensures consistent behavior between local testing and CI - name: Checkout PR Code uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + with: + fetch-depth: 0 + # Check if we should skip conformance testing due to breaking changes + - name: Check if conformance test should be skipped + id: skip-check + run: | + PR_TITLE="${{ github.event.pull_request.title }}" + + # Skip if title contains "!:" indicating breaking change (like "feat!:") + if [[ "$PR_TITLE" == *"!:"* ]]; then + echo "skip=true" >> $GITHUB_OUTPUT + exit 0 + fi + + # Get all commits in this PR and check for BREAKING CHANGE footer + git log --format="%B" ${{ github.event.pull_request.base.sha }}..${{ github.event.pull_request.head.sha }} | \ + grep -q "BREAKING CHANGE:" && echo "skip=true" >> $GITHUB_OUTPUT || echo "skip=false" >> $GITHUB_OUTPUT + shell: bash # Checkout the base branch to compare against (usually main) # This allows us to diff the current changes against the previous state - name: Checkout Base Branch + if: steps.skip-check.outputs.skip != 'true' uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: ref: ${{ github.event.pull_request.base.ref }} @@ -42,6 +67,7 @@ jobs: # Cache oasdiff to avoid checksum failures and speed up builds - name: Cache oasdiff + if: steps.skip-check.outputs.skip != 'true' id: cache-oasdiff uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 with: @@ -50,20 +76,68 @@ jobs: # Install oasdiff: https://github.com/oasdiff/oasdiff, a tool for detecting breaking changes in OpenAPI specs. - name: Install oasdiff - if: steps.cache-oasdiff.outputs.cache-hit != 'true' + if: steps.skip-check.outputs.skip != 'true' && steps.cache-oasdiff.outputs.cache-hit != 'true' run: | curl -fsSL https://raw.githubusercontent.com/oasdiff/oasdiff/main/install.sh | sh cp /usr/local/bin/oasdiff ~/oasdiff # Setup cached oasdiff - name: Setup cached oasdiff - if: steps.cache-oasdiff.outputs.cache-hit == 'true' + if: steps.skip-check.outputs.skip != 'true' && steps.cache-oasdiff.outputs.cache-hit == 'true' run: | sudo cp ~/oasdiff /usr/local/bin/oasdiff sudo chmod +x /usr/local/bin/oasdiff + # Install yq for YAML processing + - name: Install yq + run: | + sudo wget -qO /usr/local/bin/yq https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 + sudo chmod +x /usr/local/bin/yq + + # Verify API specs exist for conformance testing + - name: Check API Specs + run: | + echo "Checking for API specification files..." + + # Check current branch + if [ -f "docs/static/stable-llama-stack-spec.yaml" ]; then + echo "✓ Found stable API spec in current branch" + CURRENT_SPEC="docs/static/stable-llama-stack-spec.yaml" + elif [ -f "docs/static/llama-stack-spec.yaml" ]; then + echo "✓ Found monolithic API spec in current branch" + CURRENT_SPEC="docs/static/llama-stack-spec.yaml" + else + echo "❌ No API specs found in current branch" + exit 1 + fi + + # Check base branch + if [ -f "base/docs/static/stable-llama-stack-spec.yaml" ]; then + echo "✓ Found stable API spec in base branch" + BASE_SPEC="base/docs/static/stable-llama-stack-spec.yaml" + elif [ -f "base/docs/static/llama-stack-spec.yaml" ]; then + echo "✓ Found monolithic API spec in base branch" + BASE_SPEC="base/docs/static/llama-stack-spec.yaml" + else + echo "❌ No API specs found in base branch" + exit 1 + fi + + # Export for next step + echo "BASE_SPEC=${BASE_SPEC}" >> $GITHUB_ENV + echo "CURRENT_SPEC=${CURRENT_SPEC}" >> $GITHUB_ENV + + echo "Will compare: ${BASE_SPEC} -> ${CURRENT_SPEC}" + # Run oasdiff to detect breaking changes in the API specification # This step will fail if incompatible changes are detected, preventing breaking changes from being merged - name: Run OpenAPI Breaking Change Diff + if: steps.skip-check.outputs.skip != 'true' run: | oasdiff breaking --fail-on ERR base/docs/static/llama-stack-spec.yaml docs/static/llama-stack-spec.yaml --match-path '^/v1/' + + # Report when test is skipped + - name: Report skip reason + if: steps.skip-check.outputs.skip == 'true' + run: | + oasdiff breaking --fail-on ERR $BASE_SPEC $CURRENT_SPEC --match-path '^/v1/' diff --git a/docs/docs/api-overview.md b/docs/docs/api-overview.md new file mode 100644 index 000000000..bb95f445b --- /dev/null +++ b/docs/docs/api-overview.md @@ -0,0 +1,49 @@ +# API Reference Overview + +The Llama Stack provides a comprehensive set of APIs organized by stability level to help you choose the right endpoints for your use case. + +## 🟢 Stable APIs + +**Production-ready APIs with backward compatibility guarantees.** + +These APIs are fully tested, documented, and stable. They follow semantic versioning principles and maintain backward compatibility within major versions. Recommended for production applications. + +[**Browse Stable APIs →**](./api/llama-stack-specification) + +**Key Features:** +- ✅ Backward compatibility guaranteed +- ✅ Comprehensive testing and validation +- ✅ Production-ready reliability +- ✅ Long-term support + +--- + +## 🟡 Experimental APIs + +**Preview APIs that may change before becoming stable.** + +These APIs include v1alpha and v1beta endpoints that are feature-complete but may undergo changes based on feedback. Great for exploring new capabilities and providing feedback. + +[**Browse Experimental APIs →**](./api-experimental/llama-stack-specification-experimental-apis) + +**Key Features:** +- 🧪 Latest features and capabilities +- 🧪 May change based on user feedback +- 🧪 Active development and iteration +- 🧪 Opportunity to influence final design + +--- + +## 🔴 Deprecated APIs + +**Legacy APIs for migration reference.** + +These APIs are deprecated and will be removed in future versions. They are provided for migration purposes and to help transition to newer, stable alternatives. + +[**Browse Deprecated APIs →**](./api-deprecated/llama-stack-specification-deprecated-apis) + +**Key Features:** +- ⚠️ Will be removed in future versions +- ⚠️ Migration guidance provided +- ⚠️ Use for compatibility during transition +- ⚠️ Not recommended for new projects diff --git a/docs/docs/providers/agents/index.mdx b/docs/docs/providers/agents/index.mdx index 5cd37776d..06eb104af 100644 --- a/docs/docs/providers/agents/index.mdx +++ b/docs/docs/providers/agents/index.mdx @@ -1,12 +1,7 @@ --- -description: "Agents API for creating and interacting with agentic systems. +description: "Agents - Main functionalities provided by this API: - - Create agents with specific instructions and ability to use tools. - - Interactions with agents are grouped into sessions (\"threads\"), and each interaction is called a \"turn\". - - Agents can be provided with various tools (see the ToolGroups and ToolRuntime APIs for more details). - - Agents can be provided with various shields (see the Safety API for more details). - - Agents can also use Memory to retrieve information from knowledge bases. See the RAG Tool and Vector IO APIs for more details." + APIs for creating and interacting with agentic systems." sidebar_label: Agents title: Agents --- @@ -15,13 +10,8 @@ title: Agents ## Overview -Agents API for creating and interacting with agentic systems. +Agents - Main functionalities provided by this API: - - Create agents with specific instructions and ability to use tools. - - Interactions with agents are grouped into sessions ("threads"), and each interaction is called a "turn". - - Agents can be provided with various tools (see the ToolGroups and ToolRuntime APIs for more details). - - Agents can be provided with various shields (see the Safety API for more details). - - Agents can also use Memory to retrieve information from knowledge bases. See the RAG Tool and Vector IO APIs for more details. + APIs for creating and interacting with agentic systems. This section contains documentation for all available providers for the **agents** API. diff --git a/docs/docusaurus.config.ts b/docs/docusaurus.config.ts index 937aa4ddf..9aa9c6672 100644 --- a/docs/docusaurus.config.ts +++ b/docs/docusaurus.config.ts @@ -55,10 +55,27 @@ const config: Config = { label: 'Docs', }, { - type: 'docSidebar', - sidebarId: 'apiSidebar', - position: 'left', + type: 'dropdown', label: 'API Reference', + position: 'left', + to: '/docs/api-overview', + items: [ + { + type: 'docSidebar', + sidebarId: 'stableApiSidebar', + label: '🟢 Stable APIs', + }, + { + type: 'docSidebar', + sidebarId: 'experimentalApiSidebar', + label: '🟡 Experimental APIs', + }, + { + type: 'docSidebar', + sidebarId: 'deprecatedApiSidebar', + label: '🔴 Deprecated APIs', + }, + ], }, { href: 'https://github.com/llamastack/llama-stack', @@ -83,7 +100,7 @@ const config: Config = { }, { label: 'API Reference', - to: '/docs/api/llama-stack-specification', + to: '/docs/api-overview', }, ], }, @@ -170,7 +187,7 @@ const config: Config = { id: "openapi", docsPluginId: "classic", config: { - llamastack: { + stable: { specPath: "static/llama-stack-spec.yaml", outputDir: "docs/api", downloadUrl: "https://raw.githubusercontent.com/meta-llama/llama-stack/main/docs/static/llama-stack-spec.yaml", @@ -179,6 +196,24 @@ const config: Config = { categoryLinkSource: "tag", }, } satisfies OpenApiPlugin.Options, + experimental: { + specPath: "static/experimental-llama-stack-spec.yaml", + outputDir: "docs/api-experimental", + downloadUrl: "https://raw.githubusercontent.com/meta-llama/llama-stack/main/docs/static/experimental-llama-stack-spec.yaml", + sidebarOptions: { + groupPathsBy: "tag", + categoryLinkSource: "tag", + }, + } satisfies OpenApiPlugin.Options, + deprecated: { + specPath: "static/deprecated-llama-stack-spec.yaml", + outputDir: "docs/api-deprecated", + downloadUrl: "https://raw.githubusercontent.com/meta-llama/llama-stack/main/docs/static/deprecated-llama-stack-spec.yaml", + sidebarOptions: { + groupPathsBy: "tag", + categoryLinkSource: "tag", + }, + } satisfies OpenApiPlugin.Options, } satisfies Plugin.PluginOptions, }, ], diff --git a/docs/openapi_generator/generate.py b/docs/openapi_generator/generate.py index 54031d839..ea0f62b00 100644 --- a/docs/openapi_generator/generate.py +++ b/docs/openapi_generator/generate.py @@ -34,40 +34,52 @@ def str_presenter(dumper, data): return dumper.represent_scalar("tag:yaml.org,2002:str", data, style=style) -def main(output_dir: str): - output_dir = Path(output_dir) - if not output_dir.exists(): - raise ValueError(f"Directory {output_dir} does not exist") +def generate_spec(output_dir: Path, stability_filter: str = None, main_spec: bool = False): + """Generate OpenAPI spec with optional stability filtering.""" - # Validate API protocols before generating spec - return_type_errors = validate_api() - if return_type_errors: - print("\nAPI Method Return Type Validation Errors:\n") - for error in return_type_errors: - print(error, file=sys.stderr) - sys.exit(1) - now = str(datetime.now()) - print( - "Converting the spec to YAML (openapi.yaml) and HTML (openapi.html) at " + now - ) - print("") + if stability_filter: + title_suffix = { + "stable": " - Stable APIs" if not main_spec else "", + "experimental": " - Experimental APIs", + "deprecated": " - Deprecated APIs" + }.get(stability_filter, f" - {stability_filter.title()} APIs") + + # Use main spec filename for stable when main_spec=True + if main_spec and stability_filter == "stable": + filename_prefix = "" + else: + filename_prefix = f"{stability_filter}-" + + description_suffix = { + "stable": "\n\n**✅ STABLE**: Production-ready APIs with backward compatibility guarantees.", + "experimental": "\n\n**🧪 EXPERIMENTAL**: Pre-release APIs (v1alpha, v1beta) that may change before becoming stable.", + "deprecated": "\n\n**⚠️ DEPRECATED**: Legacy APIs that may be removed in future versions. Use for migration reference only." + }.get(stability_filter, "") + else: + title_suffix = "" + filename_prefix = "" + description_suffix = "" spec = Specification( LlamaStack, Options( server=Server(url="http://any-hosted-llama-stack.com"), info=Info( - title="Llama Stack Specification", + title=f"Llama Stack Specification{title_suffix}", version=LLAMA_STACK_API_V1, - description="""This is the specification of the Llama Stack that provides + description=f"""This is the specification of the Llama Stack that provides a set of endpoints and their corresponding interfaces that are tailored to - best leverage Llama Models.""", + best leverage Llama Models.{description_suffix}""", ), include_standard_error_responses=True, + stability_filter=stability_filter, # Pass the filter to the generator ), ) - with open(output_dir / "llama-stack-spec.yaml", "w", encoding="utf-8") as fp: + yaml_filename = f"{filename_prefix}llama-stack-spec.yaml" + html_filename = f"{filename_prefix}llama-stack-spec.html" + + with open(output_dir / yaml_filename, "w", encoding="utf-8") as fp: y = yaml.YAML() y.default_flow_style = False y.block_seq_indent = 2 @@ -83,9 +95,36 @@ def main(output_dir: str): fp, ) - with open(output_dir / "llama-stack-spec.html", "w") as fp: + with open(output_dir / html_filename, "w") as fp: spec.write_html(fp, pretty_print=True) + print(f"Generated {yaml_filename} and {html_filename}") + +def main(output_dir: str): + output_dir = Path(output_dir) + if not output_dir.exists(): + raise ValueError(f"Directory {output_dir} does not exist") + + # Validate API protocols before generating spec + return_type_errors = validate_api() + if return_type_errors: + print("\nAPI Method Return Type Validation Errors:\n") + for error in return_type_errors: + print(error, file=sys.stderr) + sys.exit(1) + + now = str(datetime.now()) + print(f"Converting the spec to YAML (openapi.yaml) and HTML (openapi.html) at {now}") + print("") + + # Generate main spec as stable APIs (llama-stack-spec.yaml) + print("Generating main specification (stable APIs)...") + generate_spec(output_dir, "stable", main_spec=True) + + print("Generating other stability-filtered specifications...") + generate_spec(output_dir, "experimental") + generate_spec(output_dir, "deprecated") + if __name__ == "__main__": fire.Fire(main) diff --git a/docs/openapi_generator/pyopenapi/generator.py b/docs/openapi_generator/pyopenapi/generator.py index a38e02e7f..d3ad2201b 100644 --- a/docs/openapi_generator/pyopenapi/generator.py +++ b/docs/openapi_generator/pyopenapi/generator.py @@ -7,13 +7,14 @@ import hashlib import inspect import ipaddress +import os import types import typing from dataclasses import make_dataclass +from pathlib import Path from typing import Annotated, Any, Dict, get_args, get_origin, Set, Union from fastapi import UploadFile -from pydantic import BaseModel from llama_stack.apis.datatypes import Error from llama_stack.strong_typing.core import JsonType @@ -35,6 +36,7 @@ from llama_stack.strong_typing.schema import ( SchemaOptions, ) from llama_stack.strong_typing.serialization import json_dump_string, object_to_json +from pydantic import BaseModel from .operations import ( EndpointOperation, @@ -546,6 +548,84 @@ class Generator: return extra_tags + def _get_api_group_for_operation(self, op) -> str | None: + """ + Determine the API group for an operation based on its route path. + + Args: + op: The endpoint operation + + Returns: + The API group name derived from the route, or None if unable to determine + """ + if not hasattr(op, 'webmethod') or not op.webmethod or not hasattr(op.webmethod, 'route'): + return None + + route = op.webmethod.route + if not route or not route.startswith('/'): + return None + + # Extract API group from route path + # Examples: /v1/agents/list -> agents-api + # /v1/responses -> responses-api + # /v1/models -> models-api + path_parts = route.strip('/').split('/') + + if len(path_parts) < 2: + return None + + # Skip version prefix (v1, v1alpha, v1beta, etc.) + if path_parts[0].startswith('v1'): + if len(path_parts) < 2: + return None + api_segment = path_parts[1] + else: + api_segment = path_parts[0] + + # Convert to supplementary file naming convention + # agents -> agents-api, responses -> responses-api, etc. + return f"{api_segment}-api" + + def _load_supplemental_content(self, api_group: str | None) -> str: + """ + Load supplemental content for an API group based on stability level. + + Follows this resolution order: + 1. docs/supplementary/{stability}/{api_group}.md + 2. docs/supplementary/shared/{api_group}.md (fallback) + 3. Empty string if no files found + + Args: + api_group: The API group name (e.g., "agents-responses-api"), or None if no mapping exists + + Returns: + The supplemental content as markdown string, or empty string if not found + """ + if not api_group: + return "" + + base_path = Path(__file__).parent.parent.parent / "supplementary" + + # Try stability-specific content first if stability filter is set + if self.options.stability_filter: + stability_path = base_path / self.options.stability_filter / f"{api_group}.md" + if stability_path.exists(): + try: + return stability_path.read_text(encoding="utf-8") + except Exception as e: + print(f"Warning: Could not read stability-specific supplemental content from {stability_path}: {e}") + + # Fall back to shared content + shared_path = base_path / "shared" / f"{api_group}.md" + if shared_path.exists(): + try: + return shared_path.read_text(encoding="utf-8") + except Exception as e: + print(f"Warning: Could not read shared supplemental content from {shared_path}: {e}") + + # No supplemental content found + return "" + def _build_operation(self, op: EndpointOperation) -> Operation: if op.defining_class.__name__ in [ "SyntheticDataGeneration", @@ -797,10 +877,14 @@ class Generator: else: callbacks = None - description = "\n".join( + # Build base description from docstring + base_description = "\n".join( filter(None, [doc_string.short_description, doc_string.long_description]) ) + # Individual endpoints get clean descriptions only + description = base_description + return Operation( tags=[ getattr(op.defining_class, "API_NAMESPACE", op.defining_class.__name__) @@ -811,16 +895,121 @@ class Generator: requestBody=requestBody, responses=responses, callbacks=callbacks, - deprecated=True if "DEPRECATED" in op.func_name else None, + deprecated=getattr(op.webmethod, "deprecated", False) + or "DEPRECATED" in op.func_name, security=[] if op.public else None, ) + def _get_api_stability_priority(self, api_level: str) -> int: + """ + Return sorting priority for API stability levels. + Lower numbers = higher priority (appear first) + + :param api_level: The API level (e.g., "v1", "v1beta", "v1alpha") + :return: Priority number for sorting + """ + stability_order = { + "v1": 0, # Stable - highest priority + "v1beta": 1, # Beta - medium priority + "v1alpha": 2, # Alpha - lowest priority + } + return stability_order.get(api_level, 999) # Unknown levels go last + def generate(self) -> Document: paths: Dict[str, PathItem] = {} endpoint_classes: Set[type] = set() - for op in get_endpoint_operations( - self.endpoint, use_examples=self.options.use_examples - ): + + # Collect all operations and filter by stability if specified + operations = list( + get_endpoint_operations( + self.endpoint, use_examples=self.options.use_examples + ) + ) + + # Filter operations by stability level if requested + if self.options.stability_filter: + filtered_operations = [] + for op in operations: + deprecated = ( + getattr(op.webmethod, "deprecated", False) + or "DEPRECATED" in op.func_name + ) + stability_level = op.webmethod.level + + if self.options.stability_filter == "stable": + # Include v1 non-deprecated endpoints + if stability_level == "v1" and not deprecated: + filtered_operations.append(op) + elif self.options.stability_filter == "experimental": + # Include v1alpha and v1beta endpoints (deprecated or not) + if stability_level in ["v1alpha", "v1beta"]: + filtered_operations.append(op) + elif self.options.stability_filter == "deprecated": + # Include only deprecated endpoints + if deprecated: + filtered_operations.append(op) + + operations = filtered_operations + print( + f"Filtered to {len(operations)} operations for stability level: {self.options.stability_filter}" + ) + + # Sort operations by multiple criteria for consistent ordering: + # 1. Stability level with deprecation handling (global priority): + # - Active stable (v1) comes first + # - Beta (v1beta) comes next + # - Alpha (v1alpha) comes next + # - Deprecated stable (v1 deprecated) comes last + # 2. Route path (group related endpoints within same stability level) + # 3. HTTP method (GET, POST, PUT, DELETE, PATCH) + # 4. Operation name (alphabetical) + def sort_key(op): + http_method_order = { + HTTPMethod.GET: 0, + HTTPMethod.POST: 1, + HTTPMethod.PUT: 2, + HTTPMethod.DELETE: 3, + HTTPMethod.PATCH: 4, + } + + # Enhanced stability priority for migration pattern support + deprecated = getattr(op.webmethod, "deprecated", False) + stability_priority = self._get_api_stability_priority(op.webmethod.level) + + # Deprecated versions should appear after everything else + # This ensures deprecated stable endpoints come last globally + if deprecated: + stability_priority += 10 # Push deprecated endpoints to the end + + return ( + stability_priority, # Global stability handling comes first + op.get_route( + op.webmethod + ), # Group by route path within stability level + http_method_order.get(op.http_method, 999), + op.func_name, + ) + + operations.sort(key=sort_key) + + # Debug output for migration pattern tracking + migration_routes = {} + for op in operations: + route_key = (op.get_route(op.webmethod), op.http_method) + if route_key not in migration_routes: + migration_routes[route_key] = [] + migration_routes[route_key].append( + (op.webmethod.level, getattr(op.webmethod, "deprecated", False)) + ) + + for route_key, versions in migration_routes.items(): + if len(versions) > 1: + print(f"Migration pattern detected for {route_key[1]} {route_key[0]}:") + for level, deprecated in versions: + status = "DEPRECATED" if deprecated else "ACTIVE" + print(f" - {level} ({status})") + + for op in operations: endpoint_classes.add(op.defining_class) operation = self._build_operation(op) @@ -851,10 +1040,22 @@ class Generator: doc_string = parse_type(cls) if hasattr(cls, "API_NAMESPACE") and cls.API_NAMESPACE != cls.__name__: continue + + # Add supplemental content to tag pages + api_group = f"{cls.__name__.lower()}-api" + supplemental_content = self._load_supplemental_content(api_group) + + tag_description = doc_string.long_description or "" + if supplemental_content: + if tag_description: + tag_description = f"{tag_description}\n\n{supplemental_content}" + else: + tag_description = supplemental_content + operation_tags.append( Tag( name=cls.__name__, - description=doc_string.long_description, + description=tag_description, displayName=doc_string.short_description, ) ) diff --git a/docs/openapi_generator/pyopenapi/options.py b/docs/openapi_generator/pyopenapi/options.py index edc861ad5..53855b5b6 100644 --- a/docs/openapi_generator/pyopenapi/options.py +++ b/docs/openapi_generator/pyopenapi/options.py @@ -54,6 +54,7 @@ class Options: property_description_fun: Optional[Callable[[type, str, str], str]] = None captions: Optional[Dict[str, str]] = None include_standard_error_responses: bool = True + stability_filter: Optional[str] = None default_captions: ClassVar[Dict[str, str]] = { "Operations": "Operations", diff --git a/docs/sidebars.ts b/docs/sidebars.ts index 01c1390c1..2724de05c 100644 --- a/docs/sidebars.ts +++ b/docs/sidebars.ts @@ -335,8 +335,10 @@ const sidebars: SidebarsConfig = { }, ], - // API Reference sidebar - use plugin-generated sidebar - apiSidebar: require('./docs/api/sidebar.ts').default, + // API Reference sidebars - use plugin-generated sidebars + stableApiSidebar: require('./docs/api/sidebar.ts').default, + experimentalApiSidebar: require('./docs/api-experimental/sidebar.ts').default, + deprecatedApiSidebar: require('./docs/api-deprecated/sidebar.ts').default, }; export default sidebars; diff --git a/docs/src/css/custom.css b/docs/src/css/custom.css index 0e4d95b9b..7f642ccb6 100644 --- a/docs/src/css/custom.css +++ b/docs/src/css/custom.css @@ -189,3 +189,29 @@ button[class*="button"]:hover, .pagination-nav__link--prev:hover { background-color: #f3f4f6 !important; } + +/* Deprecated endpoint styling */ +.menu__list-item--deprecated .menu__link { + text-decoration: line-through !important; + opacity: 0.7; + font-style: italic; +} + +.menu__list-item--deprecated .menu__link:hover { + opacity: 0.9; +} + +/* Deprecated endpoint badges - slightly muted */ +.menu__list-item--deprecated.api-method > .menu__link::before { + opacity: 0.7; + border-style: dashed !important; +} + +/* Dark theme adjustments for deprecated endpoints */ +[data-theme='dark'] .menu__list-item--deprecated .menu__link { + opacity: 0.6; +} + +[data-theme='dark'] .menu__list-item--deprecated .menu__link:hover { + opacity: 0.8; +} diff --git a/docs/static/deprecated-llama-stack-spec.html b/docs/static/deprecated-llama-stack-spec.html new file mode 100644 index 000000000..21ba4a1de --- /dev/null +++ b/docs/static/deprecated-llama-stack-spec.html @@ -0,0 +1,6339 @@ + + + + + + + OpenAPI specification + + + + + + + + + + + + + diff --git a/docs/static/deprecated-llama-stack-spec.yaml b/docs/static/deprecated-llama-stack-spec.yaml new file mode 100644 index 000000000..ee8458c4e --- /dev/null +++ b/docs/static/deprecated-llama-stack-spec.yaml @@ -0,0 +1,4659 @@ +openapi: 3.1.0 +info: + title: >- + Llama Stack Specification - Deprecated APIs + version: v1 + description: >- + This is the specification of the Llama Stack that provides + a set of endpoints and their corresponding interfaces that are + tailored to + best leverage Llama Models. + + **⚠️ DEPRECATED**: Legacy APIs that may be removed in future versions. Use for + migration reference only. +servers: + - url: http://any-hosted-llama-stack.com +paths: + /v1/agents: + get: + responses: + '200': + description: A PaginatedResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/PaginatedResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Agents + summary: List all agents. + description: List all agents. + parameters: + - name: start_index + in: query + description: The index to start the pagination from. + required: false + schema: + type: integer + - name: limit + in: query + description: The number of agents to return. + required: false + schema: + type: integer + deprecated: true + post: + responses: + '200': + description: >- + An AgentCreateResponse with the agent ID. + content: + application/json: + schema: + $ref: '#/components/schemas/AgentCreateResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Agents + summary: >- + Create an agent with the given configuration. + description: >- + Create an agent with the given configuration. + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/CreateAgentRequest' + required: true + deprecated: true + /v1/agents/{agent_id}: + get: + responses: + '200': + description: An Agent of the agent. + content: + application/json: + schema: + $ref: '#/components/schemas/Agent' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Agents + summary: Describe an agent by its ID. + description: Describe an agent by its ID. + parameters: + - name: agent_id + in: path + description: ID of the agent. + required: true + schema: + type: string + deprecated: true + delete: + responses: + '200': + description: OK + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Agents + summary: >- + Delete an agent by its ID and its associated sessions and turns. + description: >- + Delete an agent by its ID and its associated sessions and turns. + parameters: + - name: agent_id + in: path + description: The ID of the agent to delete. + required: true + schema: + type: string + deprecated: true + /v1/agents/{agent_id}/session: + post: + responses: + '200': + description: An AgentSessionCreateResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/AgentSessionCreateResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Agents + summary: Create a new session for an agent. + description: Create a new session for an agent. + parameters: + - name: agent_id + in: path + description: >- + The ID of the agent to create the session for. + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/CreateAgentSessionRequest' + required: true + deprecated: true + /v1/agents/{agent_id}/session/{session_id}: + get: + responses: + '200': + description: A Session. + content: + application/json: + schema: + $ref: '#/components/schemas/Session' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Agents + summary: Retrieve an agent session by its ID. + description: Retrieve an agent session by its ID. + parameters: + - name: session_id + in: path + description: The ID of the session to get. + required: true + schema: + type: string + - name: agent_id + in: path + description: >- + The ID of the agent to get the session for. + required: true + schema: + type: string + - name: turn_ids + in: query + description: >- + (Optional) List of turn IDs to filter the session by. + required: false + schema: + type: array + items: + type: string + deprecated: true + delete: + responses: + '200': + description: OK + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Agents + summary: >- + Delete an agent session by its ID and its associated turns. + description: >- + Delete an agent session by its ID and its associated turns. + parameters: + - name: session_id + in: path + description: The ID of the session to delete. + required: true + schema: + type: string + - name: agent_id + in: path + description: >- + The ID of the agent to delete the session for. + required: true + schema: + type: string + deprecated: true + /v1/agents/{agent_id}/session/{session_id}/turn: + post: + responses: + '200': + description: >- + If stream=False, returns a Turn object. If stream=True, returns an SSE + event stream of AgentTurnResponseStreamChunk. + content: + application/json: + schema: + $ref: '#/components/schemas/Turn' + text/event-stream: + schema: + $ref: '#/components/schemas/AgentTurnResponseStreamChunk' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Agents + summary: Create a new turn for an agent. + description: Create a new turn for an agent. + parameters: + - name: agent_id + in: path + description: >- + The ID of the agent to create the turn for. + required: true + schema: + type: string + - name: session_id + in: path + description: >- + The ID of the session to create the turn for. + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/CreateAgentTurnRequest' + required: true + deprecated: true + /v1/agents/{agent_id}/session/{session_id}/turn/{turn_id}: + get: + responses: + '200': + description: A Turn. + content: + application/json: + schema: + $ref: '#/components/schemas/Turn' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Agents + summary: Retrieve an agent turn by its ID. + description: Retrieve an agent turn by its ID. + parameters: + - name: agent_id + in: path + description: The ID of the agent to get the turn for. + required: true + schema: + type: string + - name: session_id + in: path + description: >- + The ID of the session to get the turn for. + required: true + schema: + type: string + - name: turn_id + in: path + description: The ID of the turn to get. + required: true + schema: + type: string + deprecated: true + /v1/agents/{agent_id}/session/{session_id}/turn/{turn_id}/resume: + post: + responses: + '200': + description: >- + A Turn object if stream is False, otherwise an AsyncIterator of AgentTurnResponseStreamChunk + objects. + content: + application/json: + schema: + $ref: '#/components/schemas/Turn' + text/event-stream: + schema: + $ref: '#/components/schemas/AgentTurnResponseStreamChunk' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Agents + summary: >- + Resume an agent turn with executed tool call responses. + description: >- + Resume an agent turn with executed tool call responses. + + When a Turn has the status `awaiting_input` due to pending input from client + side tool calls, this endpoint can be used to submit the outputs from the + tool calls once they are ready. + parameters: + - name: agent_id + in: path + description: The ID of the agent to resume. + required: true + schema: + type: string + - name: session_id + in: path + description: The ID of the session to resume. + required: true + schema: + type: string + - name: turn_id + in: path + description: The ID of the turn to resume. + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/ResumeAgentTurnRequest' + required: true + deprecated: true + /v1/agents/{agent_id}/session/{session_id}/turn/{turn_id}/step/{step_id}: + get: + responses: + '200': + description: An AgentStepResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/AgentStepResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Agents + summary: Retrieve an agent step by its ID. + description: Retrieve an agent step by its ID. + parameters: + - name: agent_id + in: path + description: The ID of the agent to get the step for. + required: true + schema: + type: string + - name: session_id + in: path + description: >- + The ID of the session to get the step for. + required: true + schema: + type: string + - name: turn_id + in: path + description: The ID of the turn to get the step for. + required: true + schema: + type: string + - name: step_id + in: path + description: The ID of the step to get. + required: true + schema: + type: string + deprecated: true + /v1/agents/{agent_id}/sessions: + get: + responses: + '200': + description: A PaginatedResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/PaginatedResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Agents + summary: List all session(s) of a given agent. + description: List all session(s) of a given agent. + parameters: + - name: agent_id + in: path + description: >- + The ID of the agent to list sessions for. + required: true + schema: + type: string + - name: start_index + in: query + description: The index to start the pagination from. + required: false + schema: + type: integer + - name: limit + in: query + description: The number of sessions to return. + required: false + schema: + type: integer + deprecated: true + /v1/datasetio/append-rows/{dataset_id}: + post: + responses: + '200': + description: OK + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - DatasetIO + summary: Append rows to a dataset. + description: Append rows to a dataset. + parameters: + - name: dataset_id + in: path + description: >- + The ID of the dataset to append the rows to. + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/AppendRowsRequest' + required: true + deprecated: true + /v1/datasetio/iterrows/{dataset_id}: + get: + responses: + '200': + description: A PaginatedResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/PaginatedResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - DatasetIO + summary: >- + Get a paginated list of rows from a dataset. + description: >- + Get a paginated list of rows from a dataset. + + Uses offset-based pagination where: + + - start_index: The starting index (0-based). If None, starts from beginning. + + - limit: Number of items to return. If None or -1, returns all items. + + + The response includes: + + - data: List of items for the current page. + + - has_more: Whether there are more items available after this set. + parameters: + - name: dataset_id + in: path + description: >- + The ID of the dataset to get the rows from. + required: true + schema: + type: string + - name: start_index + in: query + description: >- + Index into dataset for the first row to get. Get all rows if None. + required: false + schema: + type: integer + - name: limit + in: query + description: The number of rows to get. + required: false + schema: + type: integer + deprecated: true + /v1/datasets: + get: + responses: + '200': + description: A ListDatasetsResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/ListDatasetsResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Datasets + summary: List all datasets. + description: List all datasets. + parameters: [] + deprecated: true + post: + responses: + '200': + description: A Dataset. + content: + application/json: + schema: + $ref: '#/components/schemas/Dataset' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Datasets + summary: Register a new dataset. + description: Register a new dataset. + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/RegisterDatasetRequest' + required: true + deprecated: true + /v1/datasets/{dataset_id}: + get: + responses: + '200': + description: A Dataset. + content: + application/json: + schema: + $ref: '#/components/schemas/Dataset' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Datasets + summary: Get a dataset by its ID. + description: Get a dataset by its ID. + parameters: + - name: dataset_id + in: path + description: The ID of the dataset to get. + required: true + schema: + type: string + deprecated: true + delete: + responses: + '200': + description: OK + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Datasets + summary: Unregister a dataset by its ID. + description: Unregister a dataset by its ID. + parameters: + - name: dataset_id + in: path + description: The ID of the dataset to unregister. + required: true + schema: + type: string + deprecated: true + /v1/eval/benchmarks: + get: + responses: + '200': + description: A ListBenchmarksResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/ListBenchmarksResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Benchmarks + summary: List all benchmarks. + description: List all benchmarks. + parameters: [] + deprecated: true + post: + responses: + '200': + description: OK + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Benchmarks + summary: Register a benchmark. + description: Register a benchmark. + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/RegisterBenchmarkRequest' + required: true + deprecated: true + /v1/eval/benchmarks/{benchmark_id}: + get: + responses: + '200': + description: A Benchmark. + content: + application/json: + schema: + $ref: '#/components/schemas/Benchmark' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Benchmarks + summary: Get a benchmark by its ID. + description: Get a benchmark by its ID. + parameters: + - name: benchmark_id + in: path + description: The ID of the benchmark to get. + required: true + schema: + type: string + deprecated: true + delete: + responses: + '200': + description: OK + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Benchmarks + summary: Unregister a benchmark. + description: Unregister a benchmark. + parameters: + - name: benchmark_id + in: path + description: The ID of the benchmark to unregister. + required: true + schema: + type: string + deprecated: true + /v1/eval/benchmarks/{benchmark_id}/evaluations: + post: + responses: + '200': + description: >- + EvaluateResponse object containing generations and scores. + content: + application/json: + schema: + $ref: '#/components/schemas/EvaluateResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Eval + summary: Evaluate a list of rows on a benchmark. + description: Evaluate a list of rows on a benchmark. + parameters: + - name: benchmark_id + in: path + description: >- + The ID of the benchmark to run the evaluation on. + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/EvaluateRowsRequest' + required: true + deprecated: true + /v1/eval/benchmarks/{benchmark_id}/jobs: + post: + responses: + '200': + description: >- + The job that was created to run the evaluation. + content: + application/json: + schema: + $ref: '#/components/schemas/Job' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Eval + summary: Run an evaluation on a benchmark. + description: Run an evaluation on a benchmark. + parameters: + - name: benchmark_id + in: path + description: >- + The ID of the benchmark to run the evaluation on. + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/RunEvalRequest' + required: true + deprecated: true + /v1/eval/benchmarks/{benchmark_id}/jobs/{job_id}: + get: + responses: + '200': + description: The status of the evaluation job. + content: + application/json: + schema: + $ref: '#/components/schemas/Job' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Eval + summary: Get the status of a job. + description: Get the status of a job. + parameters: + - name: benchmark_id + in: path + description: >- + The ID of the benchmark to run the evaluation on. + required: true + schema: + type: string + - name: job_id + in: path + description: The ID of the job to get the status of. + required: true + schema: + type: string + deprecated: true + delete: + responses: + '200': + description: OK + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Eval + summary: Cancel a job. + description: Cancel a job. + parameters: + - name: benchmark_id + in: path + description: >- + The ID of the benchmark to run the evaluation on. + required: true + schema: + type: string + - name: job_id + in: path + description: The ID of the job to cancel. + required: true + schema: + type: string + deprecated: true + /v1/eval/benchmarks/{benchmark_id}/jobs/{job_id}/result: + get: + responses: + '200': + description: The result of the job. + content: + application/json: + schema: + $ref: '#/components/schemas/EvaluateResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Eval + summary: Get the result of a job. + description: Get the result of a job. + parameters: + - name: benchmark_id + in: path + description: >- + The ID of the benchmark to run the evaluation on. + required: true + schema: + type: string + - name: job_id + in: path + description: The ID of the job to get the result of. + required: true + schema: + type: string + deprecated: true + /v1/post-training/job/artifacts: + get: + responses: + '200': + description: A PostTrainingJobArtifactsResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/PostTrainingJobArtifactsResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - PostTraining (Coming Soon) + summary: Get the artifacts of a training job. + description: Get the artifacts of a training job. + parameters: + - name: job_uuid + in: query + description: >- + The UUID of the job to get the artifacts of. + required: true + schema: + type: string + deprecated: true + /v1/post-training/job/cancel: + post: + responses: + '200': + description: OK + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - PostTraining (Coming Soon) + summary: Cancel a training job. + description: Cancel a training job. + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/CancelTrainingJobRequest' + required: true + deprecated: true + /v1/post-training/job/status: + get: + responses: + '200': + description: A PostTrainingJobStatusResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/PostTrainingJobStatusResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - PostTraining (Coming Soon) + summary: Get the status of a training job. + description: Get the status of a training job. + parameters: + - name: job_uuid + in: query + description: >- + The UUID of the job to get the status of. + required: true + schema: + type: string + deprecated: true + /v1/post-training/jobs: + get: + responses: + '200': + description: A ListPostTrainingJobsResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/ListPostTrainingJobsResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - PostTraining (Coming Soon) + summary: Get all training jobs. + description: Get all training jobs. + parameters: [] + deprecated: true + /v1/post-training/preference-optimize: + post: + responses: + '200': + description: A PostTrainingJob. + content: + application/json: + schema: + $ref: '#/components/schemas/PostTrainingJob' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - PostTraining (Coming Soon) + summary: Run preference optimization of a model. + description: Run preference optimization of a model. + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/PreferenceOptimizeRequest' + required: true + deprecated: true + /v1/post-training/supervised-fine-tune: + post: + responses: + '200': + description: A PostTrainingJob. + content: + application/json: + schema: + $ref: '#/components/schemas/PostTrainingJob' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - PostTraining (Coming Soon) + summary: Run supervised fine-tuning of a model. + description: Run supervised fine-tuning of a model. + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/SupervisedFineTuneRequest' + required: true + deprecated: true + /v1/telemetry/metrics/{metric_name}: + post: + responses: + '200': + description: A QueryMetricsResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/QueryMetricsResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Telemetry + summary: Query metrics. + description: Query metrics. + parameters: + - name: metric_name + in: path + description: The name of the metric to query. + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/QueryMetricsRequest' + required: true + deprecated: true + /v1/telemetry/spans: + post: + responses: + '200': + description: A QuerySpansResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/QuerySpansResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Telemetry + summary: Query spans. + description: Query spans. + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/QuerySpansRequest' + required: true + deprecated: true + /v1/telemetry/spans/export: + post: + responses: + '200': + description: OK + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Telemetry + summary: Save spans to a dataset. + description: Save spans to a dataset. + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/SaveSpansToDatasetRequest' + required: true + deprecated: true + /v1/telemetry/spans/{span_id}/tree: + post: + responses: + '200': + description: A QuerySpanTreeResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/QuerySpanTreeResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Telemetry + summary: Get a span tree by its ID. + description: Get a span tree by its ID. + parameters: + - name: span_id + in: path + description: The ID of the span to get the tree from. + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/GetSpanTreeRequest' + required: true + deprecated: true + /v1/telemetry/traces: + post: + responses: + '200': + description: A QueryTracesResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/QueryTracesResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Telemetry + summary: Query traces. + description: Query traces. + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/QueryTracesRequest' + required: true + deprecated: true + /v1/telemetry/traces/{trace_id}: + get: + responses: + '200': + description: A Trace. + content: + application/json: + schema: + $ref: '#/components/schemas/Trace' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Telemetry + summary: Get a trace by its ID. + description: Get a trace by its ID. + parameters: + - name: trace_id + in: path + description: The ID of the trace to get. + required: true + schema: + type: string + deprecated: true + /v1/telemetry/traces/{trace_id}/spans/{span_id}: + get: + responses: + '200': + description: A Span. + content: + application/json: + schema: + $ref: '#/components/schemas/Span' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Telemetry + summary: Get a span by its ID. + description: Get a span by its ID. + parameters: + - name: trace_id + in: path + description: >- + The ID of the trace to get the span from. + required: true + schema: + type: string + - name: span_id + in: path + description: The ID of the span to get. + required: true + schema: + type: string + deprecated: true +jsonSchemaDialect: >- + https://json-schema.org/draft/2020-12/schema +components: + schemas: + Error: + type: object + properties: + status: + type: integer + description: HTTP status code + title: + type: string + description: >- + Error title, a short summary of the error which is invariant for an error + type + detail: + type: string + description: >- + Error detail, a longer human-readable description of the error + instance: + type: string + description: >- + (Optional) A URL which can be used to retrieve more information about + the specific occurrence of the error + additionalProperties: false + required: + - status + - title + - detail + title: Error + description: >- + Error response from the API. Roughly follows RFC 7807. + PaginatedResponse: + type: object + properties: + data: + type: array + items: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: The list of items for the current page + has_more: + type: boolean + description: >- + Whether there are more items available after this set + url: + type: string + description: The URL for accessing this list + additionalProperties: false + required: + - data + - has_more + title: PaginatedResponse + description: >- + A generic paginated response that follows a simple format. + AgentConfig: + type: object + properties: + sampling_params: + $ref: '#/components/schemas/SamplingParams' + input_shields: + type: array + items: + type: string + output_shields: + type: array + items: + type: string + toolgroups: + type: array + items: + $ref: '#/components/schemas/AgentTool' + client_tools: + type: array + items: + $ref: '#/components/schemas/ToolDef' + tool_choice: + type: string + enum: + - auto + - required + - none + title: ToolChoice + description: >- + Whether tool use is required or automatic. This is a hint to the model + which may not be followed. It depends on the Instruction Following capabilities + of the model. + deprecated: true + tool_prompt_format: + type: string + enum: + - json + - function_tag + - python_list + title: ToolPromptFormat + description: >- + Prompt format for calling custom / zero shot tools. + deprecated: true + tool_config: + $ref: '#/components/schemas/ToolConfig' + max_infer_iters: + type: integer + default: 10 + model: + type: string + description: >- + The model identifier to use for the agent + instructions: + type: string + description: The system instructions for the agent + name: + type: string + description: >- + Optional name for the agent, used in telemetry and identification + enable_session_persistence: + type: boolean + default: false + description: >- + Optional flag indicating whether session data has to be persisted + response_format: + $ref: '#/components/schemas/ResponseFormat' + description: Optional response format configuration + additionalProperties: false + required: + - model + - instructions + title: AgentConfig + description: Configuration for an agent. + AgentTool: + oneOf: + - type: string + - type: object + properties: + name: + type: string + args: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + additionalProperties: false + required: + - name + - args + title: AgentToolGroupWithArgs + GrammarResponseFormat: + type: object + properties: + type: + type: string + enum: + - json_schema + - grammar + description: >- + Must be "grammar" to identify this format type + const: grammar + default: grammar + bnf: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + The BNF grammar specification the response should conform to + additionalProperties: false + required: + - type + - bnf + title: GrammarResponseFormat + description: >- + Configuration for grammar-guided response generation. + GreedySamplingStrategy: + type: object + properties: + type: + type: string + const: greedy + default: greedy + description: >- + Must be "greedy" to identify this sampling strategy + additionalProperties: false + required: + - type + title: GreedySamplingStrategy + description: >- + Greedy sampling strategy that selects the highest probability token at each + step. + JsonSchemaResponseFormat: + type: object + properties: + type: + type: string + enum: + - json_schema + - grammar + description: >- + Must be "json_schema" to identify this format type + const: json_schema + default: json_schema + json_schema: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + The JSON schema the response should conform to. In a Python SDK, this + is often a `pydantic` model. + additionalProperties: false + required: + - type + - json_schema + title: JsonSchemaResponseFormat + description: >- + Configuration for JSON schema-guided response generation. + ResponseFormat: + oneOf: + - $ref: '#/components/schemas/JsonSchemaResponseFormat' + - $ref: '#/components/schemas/GrammarResponseFormat' + discriminator: + propertyName: type + mapping: + json_schema: '#/components/schemas/JsonSchemaResponseFormat' + grammar: '#/components/schemas/GrammarResponseFormat' + SamplingParams: + type: object + properties: + strategy: + oneOf: + - $ref: '#/components/schemas/GreedySamplingStrategy' + - $ref: '#/components/schemas/TopPSamplingStrategy' + - $ref: '#/components/schemas/TopKSamplingStrategy' + discriminator: + propertyName: type + mapping: + greedy: '#/components/schemas/GreedySamplingStrategy' + top_p: '#/components/schemas/TopPSamplingStrategy' + top_k: '#/components/schemas/TopKSamplingStrategy' + description: The sampling strategy. + max_tokens: + type: integer + default: 0 + description: >- + The maximum number of tokens that can be generated in the completion. + The token count of your prompt plus max_tokens cannot exceed the model's + context length. + repetition_penalty: + type: number + default: 1.0 + description: >- + Number between -2.0 and 2.0. Positive values penalize new tokens based + on whether they appear in the text so far, increasing the model's likelihood + to talk about new topics. + stop: + type: array + items: + type: string + description: >- + Up to 4 sequences where the API will stop generating further tokens. The + returned text will not contain the stop sequence. + additionalProperties: false + required: + - strategy + title: SamplingParams + description: Sampling parameters. + ToolConfig: + type: object + properties: + tool_choice: + oneOf: + - type: string + enum: + - auto + - required + - none + title: ToolChoice + description: >- + Whether tool use is required or automatic. This is a hint to the model + which may not be followed. It depends on the Instruction Following + capabilities of the model. + - type: string + default: auto + description: >- + (Optional) Whether tool use is automatic, required, or none. Can also + specify a tool name to use a specific tool. Defaults to ToolChoice.auto. + tool_prompt_format: + type: string + enum: + - json + - function_tag + - python_list + description: >- + (Optional) Instructs the model how to format tool calls. By default, Llama + Stack will attempt to use a format that is best adapted to the model. + - `ToolPromptFormat.json`: The tool calls are formatted as a JSON object. + - `ToolPromptFormat.function_tag`: The tool calls are enclosed in a + tag. - `ToolPromptFormat.python_list`: The tool calls are output as Python + syntax -- a list of function calls. + system_message_behavior: + type: string + enum: + - append + - replace + description: >- + (Optional) Config for how to override the default system prompt. - `SystemMessageBehavior.append`: + Appends the provided system message to the default system prompt. - `SystemMessageBehavior.replace`: + Replaces the default system prompt with the provided system message. The + system message can include the string '{{function_definitions}}' to indicate + where the function definitions should be inserted. + default: append + additionalProperties: false + title: ToolConfig + description: Configuration for tool use. + ToolDef: + type: object + properties: + name: + type: string + description: Name of the tool + description: + type: string + description: >- + (Optional) Human-readable description of what the tool does + parameters: + type: array + items: + $ref: '#/components/schemas/ToolParameter' + description: >- + (Optional) List of parameters this tool accepts + metadata: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + (Optional) Additional metadata about the tool + additionalProperties: false + required: + - name + title: ToolDef + description: >- + Tool definition used in runtime contexts. + ToolParameter: + type: object + properties: + name: + type: string + description: Name of the parameter + parameter_type: + type: string + description: >- + Type of the parameter (e.g., string, integer) + description: + type: string + description: >- + Human-readable description of what the parameter does + required: + type: boolean + default: true + description: >- + Whether this parameter is required for tool invocation + items: + type: object + description: >- + Type of the elements when parameter_type is array + title: + type: string + description: (Optional) Title of the parameter + default: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + (Optional) Default value for the parameter if not provided + additionalProperties: false + required: + - name + - parameter_type + - description + - required + title: ToolParameter + description: Parameter definition for a tool. + TopKSamplingStrategy: + type: object + properties: + type: + type: string + const: top_k + default: top_k + description: >- + Must be "top_k" to identify this sampling strategy + top_k: + type: integer + description: >- + Number of top tokens to consider for sampling. Must be at least 1 + additionalProperties: false + required: + - type + - top_k + title: TopKSamplingStrategy + description: >- + Top-k sampling strategy that restricts sampling to the k most likely tokens. + TopPSamplingStrategy: + type: object + properties: + type: + type: string + const: top_p + default: top_p + description: >- + Must be "top_p" to identify this sampling strategy + temperature: + type: number + description: >- + Controls randomness in sampling. Higher values increase randomness + top_p: + type: number + default: 0.95 + description: >- + Cumulative probability threshold for nucleus sampling. Defaults to 0.95 + additionalProperties: false + required: + - type + title: TopPSamplingStrategy + description: >- + Top-p (nucleus) sampling strategy that samples from the smallest set of tokens + with cumulative probability >= p. + CreateAgentRequest: + type: object + properties: + agent_config: + $ref: '#/components/schemas/AgentConfig' + description: The configuration for the agent. + additionalProperties: false + required: + - agent_config + title: CreateAgentRequest + AgentCreateResponse: + type: object + properties: + agent_id: + type: string + description: Unique identifier for the created agent + additionalProperties: false + required: + - agent_id + title: AgentCreateResponse + description: >- + Response returned when creating a new agent. + Agent: + type: object + properties: + agent_id: + type: string + description: Unique identifier for the agent + agent_config: + $ref: '#/components/schemas/AgentConfig' + description: Configuration settings for the agent + created_at: + type: string + format: date-time + description: Timestamp when the agent was created + additionalProperties: false + required: + - agent_id + - agent_config + - created_at + title: Agent + description: >- + An agent instance with configuration and metadata. + CreateAgentSessionRequest: + type: object + properties: + session_name: + type: string + description: The name of the session to create. + additionalProperties: false + required: + - session_name + title: CreateAgentSessionRequest + AgentSessionCreateResponse: + type: object + properties: + session_id: + type: string + description: >- + Unique identifier for the created session + additionalProperties: false + required: + - session_id + title: AgentSessionCreateResponse + description: >- + Response returned when creating a new agent session. + CompletionMessage: + type: object + properties: + role: + type: string + const: assistant + default: assistant + description: >- + Must be "assistant" to identify this as the model's response + content: + $ref: '#/components/schemas/InterleavedContent' + description: The content of the model's response + stop_reason: + type: string + enum: + - end_of_turn + - end_of_message + - out_of_tokens + description: >- + Reason why the model stopped generating. Options are: - `StopReason.end_of_turn`: + The model finished generating the entire response. - `StopReason.end_of_message`: + The model finished generating but generated a partial response -- usually, + a tool call. The user may call the tool and continue the conversation + with the tool's response. - `StopReason.out_of_tokens`: The model ran + out of token budget. + tool_calls: + type: array + items: + $ref: '#/components/schemas/ToolCall' + description: >- + List of tool calls. Each tool call is a ToolCall object. + additionalProperties: false + required: + - role + - content + - stop_reason + title: CompletionMessage + description: >- + A message containing the model's (assistant) response in a chat conversation. + ImageContentItem: + type: object + properties: + type: + type: string + const: image + default: image + description: >- + Discriminator type of the content item. Always "image" + image: + type: object + properties: + url: + $ref: '#/components/schemas/URL' + description: >- + A URL of the image or data URL in the format of data:image/{type};base64,{data}. + Note that URL could have length limits. + data: + type: string + contentEncoding: base64 + description: base64 encoded image data as string + additionalProperties: false + description: >- + Image as a base64 encoded string or an URL + additionalProperties: false + required: + - type + - image + title: ImageContentItem + description: A image content item + InferenceStep: + type: object + properties: + turn_id: + type: string + description: The ID of the turn. + step_id: + type: string + description: The ID of the step. + started_at: + type: string + format: date-time + description: The time the step started. + completed_at: + type: string + format: date-time + description: The time the step completed. + step_type: + type: string + enum: + - inference + - tool_execution + - shield_call + - memory_retrieval + title: StepType + description: Type of the step in an agent turn. + const: inference + default: inference + model_response: + $ref: '#/components/schemas/CompletionMessage' + description: The response from the LLM. + additionalProperties: false + required: + - turn_id + - step_id + - step_type + - model_response + title: InferenceStep + description: An inference step in an agent turn. + InterleavedContent: + oneOf: + - type: string + - $ref: '#/components/schemas/InterleavedContentItem' + - type: array + items: + $ref: '#/components/schemas/InterleavedContentItem' + InterleavedContentItem: + oneOf: + - $ref: '#/components/schemas/ImageContentItem' + - $ref: '#/components/schemas/TextContentItem' + discriminator: + propertyName: type + mapping: + image: '#/components/schemas/ImageContentItem' + text: '#/components/schemas/TextContentItem' + MemoryRetrievalStep: + type: object + properties: + turn_id: + type: string + description: The ID of the turn. + step_id: + type: string + description: The ID of the step. + started_at: + type: string + format: date-time + description: The time the step started. + completed_at: + type: string + format: date-time + description: The time the step completed. + step_type: + type: string + enum: + - inference + - tool_execution + - shield_call + - memory_retrieval + title: StepType + description: Type of the step in an agent turn. + const: memory_retrieval + default: memory_retrieval + vector_db_ids: + type: string + description: >- + The IDs of the vector databases to retrieve context from. + inserted_context: + $ref: '#/components/schemas/InterleavedContent' + description: >- + The context retrieved from the vector databases. + additionalProperties: false + required: + - turn_id + - step_id + - step_type + - vector_db_ids + - inserted_context + title: MemoryRetrievalStep + description: >- + A memory retrieval step in an agent turn. + SafetyViolation: + type: object + properties: + violation_level: + $ref: '#/components/schemas/ViolationLevel' + description: Severity level of the violation + user_message: + type: string + description: >- + (Optional) Message to convey to the user about the violation + metadata: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + Additional metadata including specific violation codes for debugging and + telemetry + additionalProperties: false + required: + - violation_level + - metadata + title: SafetyViolation + description: >- + Details of a safety violation detected by content moderation. + Session: + type: object + properties: + session_id: + type: string + description: >- + Unique identifier for the conversation session + session_name: + type: string + description: Human-readable name for the session + turns: + type: array + items: + $ref: '#/components/schemas/Turn' + description: >- + List of all turns that have occurred in this session + started_at: + type: string + format: date-time + description: Timestamp when the session was created + additionalProperties: false + required: + - session_id + - session_name + - turns + - started_at + title: Session + description: >- + A single session of an interaction with an Agentic System. + ShieldCallStep: + type: object + properties: + turn_id: + type: string + description: The ID of the turn. + step_id: + type: string + description: The ID of the step. + started_at: + type: string + format: date-time + description: The time the step started. + completed_at: + type: string + format: date-time + description: The time the step completed. + step_type: + type: string + enum: + - inference + - tool_execution + - shield_call + - memory_retrieval + title: StepType + description: Type of the step in an agent turn. + const: shield_call + default: shield_call + violation: + $ref: '#/components/schemas/SafetyViolation' + description: The violation from the shield call. + additionalProperties: false + required: + - turn_id + - step_id + - step_type + title: ShieldCallStep + description: A shield call step in an agent turn. + TextContentItem: + type: object + properties: + type: + type: string + const: text + default: text + description: >- + Discriminator type of the content item. Always "text" + text: + type: string + description: Text content + additionalProperties: false + required: + - type + - text + title: TextContentItem + description: A text content item + ToolCall: + type: object + properties: + call_id: + type: string + tool_name: + oneOf: + - type: string + enum: + - brave_search + - wolfram_alpha + - photogen + - code_interpreter + title: BuiltinTool + - type: string + arguments: + oneOf: + - type: string + - type: object + additionalProperties: + oneOf: + - type: string + - type: integer + - type: number + - type: boolean + - type: 'null' + - type: array + items: + oneOf: + - type: string + - type: integer + - type: number + - type: boolean + - type: 'null' + - type: object + additionalProperties: + oneOf: + - type: string + - type: integer + - type: number + - type: boolean + - type: 'null' + arguments_json: + type: string + additionalProperties: false + required: + - call_id + - tool_name + - arguments + title: ToolCall + ToolExecutionStep: + type: object + properties: + turn_id: + type: string + description: The ID of the turn. + step_id: + type: string + description: The ID of the step. + started_at: + type: string + format: date-time + description: The time the step started. + completed_at: + type: string + format: date-time + description: The time the step completed. + step_type: + type: string + enum: + - inference + - tool_execution + - shield_call + - memory_retrieval + title: StepType + description: Type of the step in an agent turn. + const: tool_execution + default: tool_execution + tool_calls: + type: array + items: + $ref: '#/components/schemas/ToolCall' + description: The tool calls to execute. + tool_responses: + type: array + items: + $ref: '#/components/schemas/ToolResponse' + description: The tool responses from the tool calls. + additionalProperties: false + required: + - turn_id + - step_id + - step_type + - tool_calls + - tool_responses + title: ToolExecutionStep + description: A tool execution step in an agent turn. + ToolResponse: + type: object + properties: + call_id: + type: string + description: >- + Unique identifier for the tool call this response is for + tool_name: + oneOf: + - type: string + enum: + - brave_search + - wolfram_alpha + - photogen + - code_interpreter + title: BuiltinTool + - type: string + description: Name of the tool that was invoked + content: + $ref: '#/components/schemas/InterleavedContent' + description: The response content from the tool + metadata: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + (Optional) Additional metadata about the tool response + additionalProperties: false + required: + - call_id + - tool_name + - content + title: ToolResponse + description: Response from a tool invocation. + ToolResponseMessage: + type: object + properties: + role: + type: string + const: tool + default: tool + description: >- + Must be "tool" to identify this as a tool response + call_id: + type: string + description: >- + Unique identifier for the tool call this response is for + content: + $ref: '#/components/schemas/InterleavedContent' + description: The response content from the tool + additionalProperties: false + required: + - role + - call_id + - content + title: ToolResponseMessage + description: >- + A message representing the result of a tool invocation. + Turn: + type: object + properties: + turn_id: + type: string + description: >- + Unique identifier for the turn within a session + session_id: + type: string + description: >- + Unique identifier for the conversation session + input_messages: + type: array + items: + oneOf: + - $ref: '#/components/schemas/UserMessage' + - $ref: '#/components/schemas/ToolResponseMessage' + description: >- + List of messages that initiated this turn + steps: + type: array + items: + oneOf: + - $ref: '#/components/schemas/InferenceStep' + - $ref: '#/components/schemas/ToolExecutionStep' + - $ref: '#/components/schemas/ShieldCallStep' + - $ref: '#/components/schemas/MemoryRetrievalStep' + discriminator: + propertyName: step_type + mapping: + inference: '#/components/schemas/InferenceStep' + tool_execution: '#/components/schemas/ToolExecutionStep' + shield_call: '#/components/schemas/ShieldCallStep' + memory_retrieval: '#/components/schemas/MemoryRetrievalStep' + description: >- + Ordered list of processing steps executed during this turn + output_message: + $ref: '#/components/schemas/CompletionMessage' + description: >- + The model's generated response containing content and metadata + output_attachments: + type: array + items: + type: object + properties: + content: + oneOf: + - type: string + - $ref: '#/components/schemas/InterleavedContentItem' + - type: array + items: + $ref: '#/components/schemas/InterleavedContentItem' + - $ref: '#/components/schemas/URL' + description: The content of the attachment. + mime_type: + type: string + description: The MIME type of the attachment. + additionalProperties: false + required: + - content + - mime_type + title: Attachment + description: An attachment to an agent turn. + description: >- + (Optional) Files or media attached to the agent's response + started_at: + type: string + format: date-time + description: Timestamp when the turn began + completed_at: + type: string + format: date-time + description: >- + (Optional) Timestamp when the turn finished, if completed + additionalProperties: false + required: + - turn_id + - session_id + - input_messages + - steps + - output_message + - started_at + title: Turn + description: >- + A single turn in an interaction with an Agentic System. + URL: + type: object + properties: + uri: + type: string + description: The URL string pointing to the resource + additionalProperties: false + required: + - uri + title: URL + description: A URL reference to external content. + UserMessage: + type: object + properties: + role: + type: string + const: user + default: user + description: >- + Must be "user" to identify this as a user message + content: + $ref: '#/components/schemas/InterleavedContent' + description: >- + The content of the message, which can include text and other media + context: + $ref: '#/components/schemas/InterleavedContent' + description: >- + (Optional) This field is used internally by Llama Stack to pass RAG context. + This field may be removed in the API in the future. + additionalProperties: false + required: + - role + - content + title: UserMessage + description: >- + A message from the user in a chat conversation. + ViolationLevel: + type: string + enum: + - info + - warn + - error + title: ViolationLevel + description: Severity level of a safety violation. + CreateAgentTurnRequest: + type: object + properties: + messages: + type: array + items: + oneOf: + - $ref: '#/components/schemas/UserMessage' + - $ref: '#/components/schemas/ToolResponseMessage' + description: List of messages to start the turn with. + stream: + type: boolean + description: >- + (Optional) If True, generate an SSE event stream of the response. Defaults + to False. + documents: + type: array + items: + type: object + properties: + content: + oneOf: + - type: string + - $ref: '#/components/schemas/InterleavedContentItem' + - type: array + items: + $ref: '#/components/schemas/InterleavedContentItem' + - $ref: '#/components/schemas/URL' + description: The content of the document. + mime_type: + type: string + description: The MIME type of the document. + additionalProperties: false + required: + - content + - mime_type + title: Document + description: A document to be used by an agent. + description: >- + (Optional) List of documents to create the turn with. + toolgroups: + type: array + items: + $ref: '#/components/schemas/AgentTool' + description: >- + (Optional) List of toolgroups to create the turn with, will be used in + addition to the agent's config toolgroups for the request. + tool_config: + $ref: '#/components/schemas/ToolConfig' + description: >- + (Optional) The tool configuration to create the turn with, will be used + to override the agent's tool_config. + additionalProperties: false + required: + - messages + title: CreateAgentTurnRequest + AgentTurnResponseEvent: + type: object + properties: + payload: + oneOf: + - $ref: '#/components/schemas/AgentTurnResponseStepStartPayload' + - $ref: '#/components/schemas/AgentTurnResponseStepProgressPayload' + - $ref: '#/components/schemas/AgentTurnResponseStepCompletePayload' + - $ref: '#/components/schemas/AgentTurnResponseTurnStartPayload' + - $ref: '#/components/schemas/AgentTurnResponseTurnCompletePayload' + - $ref: '#/components/schemas/AgentTurnResponseTurnAwaitingInputPayload' + discriminator: + propertyName: event_type + mapping: + step_start: '#/components/schemas/AgentTurnResponseStepStartPayload' + step_progress: '#/components/schemas/AgentTurnResponseStepProgressPayload' + step_complete: '#/components/schemas/AgentTurnResponseStepCompletePayload' + turn_start: '#/components/schemas/AgentTurnResponseTurnStartPayload' + turn_complete: '#/components/schemas/AgentTurnResponseTurnCompletePayload' + turn_awaiting_input: '#/components/schemas/AgentTurnResponseTurnAwaitingInputPayload' + description: >- + Event-specific payload containing event data + additionalProperties: false + required: + - payload + title: AgentTurnResponseEvent + description: >- + An event in an agent turn response stream. + AgentTurnResponseStepCompletePayload: + type: object + properties: + event_type: + type: string + enum: + - step_start + - step_complete + - step_progress + - turn_start + - turn_complete + - turn_awaiting_input + const: step_complete + default: step_complete + description: Type of event being reported + step_type: + type: string + enum: + - inference + - tool_execution + - shield_call + - memory_retrieval + description: Type of step being executed + step_id: + type: string + description: >- + Unique identifier for the step within a turn + step_details: + oneOf: + - $ref: '#/components/schemas/InferenceStep' + - $ref: '#/components/schemas/ToolExecutionStep' + - $ref: '#/components/schemas/ShieldCallStep' + - $ref: '#/components/schemas/MemoryRetrievalStep' + discriminator: + propertyName: step_type + mapping: + inference: '#/components/schemas/InferenceStep' + tool_execution: '#/components/schemas/ToolExecutionStep' + shield_call: '#/components/schemas/ShieldCallStep' + memory_retrieval: '#/components/schemas/MemoryRetrievalStep' + description: Complete details of the executed step + additionalProperties: false + required: + - event_type + - step_type + - step_id + - step_details + title: AgentTurnResponseStepCompletePayload + description: >- + Payload for step completion events in agent turn responses. + AgentTurnResponseStepProgressPayload: + type: object + properties: + event_type: + type: string + enum: + - step_start + - step_complete + - step_progress + - turn_start + - turn_complete + - turn_awaiting_input + const: step_progress + default: step_progress + description: Type of event being reported + step_type: + type: string + enum: + - inference + - tool_execution + - shield_call + - memory_retrieval + description: Type of step being executed + step_id: + type: string + description: >- + Unique identifier for the step within a turn + delta: + oneOf: + - $ref: '#/components/schemas/TextDelta' + - $ref: '#/components/schemas/ImageDelta' + - $ref: '#/components/schemas/ToolCallDelta' + discriminator: + propertyName: type + mapping: + text: '#/components/schemas/TextDelta' + image: '#/components/schemas/ImageDelta' + tool_call: '#/components/schemas/ToolCallDelta' + description: >- + Incremental content changes during step execution + additionalProperties: false + required: + - event_type + - step_type + - step_id + - delta + title: AgentTurnResponseStepProgressPayload + description: >- + Payload for step progress events in agent turn responses. + AgentTurnResponseStepStartPayload: + type: object + properties: + event_type: + type: string + enum: + - step_start + - step_complete + - step_progress + - turn_start + - turn_complete + - turn_awaiting_input + const: step_start + default: step_start + description: Type of event being reported + step_type: + type: string + enum: + - inference + - tool_execution + - shield_call + - memory_retrieval + description: Type of step being executed + step_id: + type: string + description: >- + Unique identifier for the step within a turn + metadata: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + (Optional) Additional metadata for the step + additionalProperties: false + required: + - event_type + - step_type + - step_id + title: AgentTurnResponseStepStartPayload + description: >- + Payload for step start events in agent turn responses. + AgentTurnResponseStreamChunk: + type: object + properties: + event: + $ref: '#/components/schemas/AgentTurnResponseEvent' + description: >- + Individual event in the agent turn response stream + additionalProperties: false + required: + - event + title: AgentTurnResponseStreamChunk + description: Streamed agent turn completion response. + "AgentTurnResponseTurnAwaitingInputPayload": + type: object + properties: + event_type: + type: string + enum: + - step_start + - step_complete + - step_progress + - turn_start + - turn_complete + - turn_awaiting_input + const: turn_awaiting_input + default: turn_awaiting_input + description: Type of event being reported + turn: + $ref: '#/components/schemas/Turn' + description: >- + Turn data when waiting for external tool responses + additionalProperties: false + required: + - event_type + - turn + title: >- + AgentTurnResponseTurnAwaitingInputPayload + description: >- + Payload for turn awaiting input events in agent turn responses. + AgentTurnResponseTurnCompletePayload: + type: object + properties: + event_type: + type: string + enum: + - step_start + - step_complete + - step_progress + - turn_start + - turn_complete + - turn_awaiting_input + const: turn_complete + default: turn_complete + description: Type of event being reported + turn: + $ref: '#/components/schemas/Turn' + description: >- + Complete turn data including all steps and results + additionalProperties: false + required: + - event_type + - turn + title: AgentTurnResponseTurnCompletePayload + description: >- + Payload for turn completion events in agent turn responses. + AgentTurnResponseTurnStartPayload: + type: object + properties: + event_type: + type: string + enum: + - step_start + - step_complete + - step_progress + - turn_start + - turn_complete + - turn_awaiting_input + const: turn_start + default: turn_start + description: Type of event being reported + turn_id: + type: string + description: >- + Unique identifier for the turn within a session + additionalProperties: false + required: + - event_type + - turn_id + title: AgentTurnResponseTurnStartPayload + description: >- + Payload for turn start events in agent turn responses. + ImageDelta: + type: object + properties: + type: + type: string + const: image + default: image + description: >- + Discriminator type of the delta. Always "image" + image: + type: string + contentEncoding: base64 + description: The incremental image data as bytes + additionalProperties: false + required: + - type + - image + title: ImageDelta + description: >- + An image content delta for streaming responses. + TextDelta: + type: object + properties: + type: + type: string + const: text + default: text + description: >- + Discriminator type of the delta. Always "text" + text: + type: string + description: The incremental text content + additionalProperties: false + required: + - type + - text + title: TextDelta + description: >- + A text content delta for streaming responses. + ToolCallDelta: + type: object + properties: + type: + type: string + const: tool_call + default: tool_call + description: >- + Discriminator type of the delta. Always "tool_call" + tool_call: + oneOf: + - type: string + - $ref: '#/components/schemas/ToolCall' + description: >- + Either an in-progress tool call string or the final parsed tool call + parse_status: + type: string + enum: + - started + - in_progress + - failed + - succeeded + description: Current parsing status of the tool call + additionalProperties: false + required: + - type + - tool_call + - parse_status + title: ToolCallDelta + description: >- + A tool call content delta for streaming responses. + ResumeAgentTurnRequest: + type: object + properties: + tool_responses: + type: array + items: + $ref: '#/components/schemas/ToolResponse' + description: >- + The tool call responses to resume the turn with. + stream: + type: boolean + description: Whether to stream the response. + additionalProperties: false + required: + - tool_responses + title: ResumeAgentTurnRequest + AgentStepResponse: + type: object + properties: + step: + oneOf: + - $ref: '#/components/schemas/InferenceStep' + - $ref: '#/components/schemas/ToolExecutionStep' + - $ref: '#/components/schemas/ShieldCallStep' + - $ref: '#/components/schemas/MemoryRetrievalStep' + discriminator: + propertyName: step_type + mapping: + inference: '#/components/schemas/InferenceStep' + tool_execution: '#/components/schemas/ToolExecutionStep' + shield_call: '#/components/schemas/ShieldCallStep' + memory_retrieval: '#/components/schemas/MemoryRetrievalStep' + description: >- + The complete step data and execution details + additionalProperties: false + required: + - step + title: AgentStepResponse + description: >- + Response containing details of a specific agent step. + AppendRowsRequest: + type: object + properties: + rows: + type: array + items: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: The rows to append to the dataset. + additionalProperties: false + required: + - rows + title: AppendRowsRequest + Dataset: + type: object + properties: + identifier: + type: string + provider_resource_id: + type: string + provider_id: + type: string + type: + type: string + enum: + - model + - shield + - vector_db + - dataset + - scoring_function + - benchmark + - tool + - tool_group + - prompt + const: dataset + default: dataset + description: >- + Type of resource, always 'dataset' for datasets + purpose: + type: string + enum: + - post-training/messages + - eval/question-answer + - eval/messages-answer + description: >- + Purpose of the dataset indicating its intended use + source: + oneOf: + - $ref: '#/components/schemas/URIDataSource' + - $ref: '#/components/schemas/RowsDataSource' + discriminator: + propertyName: type + mapping: + uri: '#/components/schemas/URIDataSource' + rows: '#/components/schemas/RowsDataSource' + description: >- + Data source configuration for the dataset + metadata: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: Additional metadata for the dataset + additionalProperties: false + required: + - identifier + - provider_id + - type + - purpose + - source + - metadata + title: Dataset + description: >- + Dataset resource for storing and accessing training or evaluation data. + RowsDataSource: + type: object + properties: + type: + type: string + const: rows + default: rows + rows: + type: array + items: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + The dataset is stored in rows. E.g. - [ {"messages": [{"role": "user", + "content": "Hello, world!"}, {"role": "assistant", "content": "Hello, + world!"}]} ] + additionalProperties: false + required: + - type + - rows + title: RowsDataSource + description: A dataset stored in rows. + URIDataSource: + type: object + properties: + type: + type: string + const: uri + default: uri + uri: + type: string + description: >- + The dataset can be obtained from a URI. E.g. - "https://mywebsite.com/mydata.jsonl" + - "lsfs://mydata.jsonl" - "data:csv;base64,{base64_content}" + additionalProperties: false + required: + - type + - uri + title: URIDataSource + description: >- + A dataset that can be obtained from a URI. + ListDatasetsResponse: + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/Dataset' + description: List of datasets + additionalProperties: false + required: + - data + title: ListDatasetsResponse + description: Response from listing datasets. + DataSource: + oneOf: + - $ref: '#/components/schemas/URIDataSource' + - $ref: '#/components/schemas/RowsDataSource' + discriminator: + propertyName: type + mapping: + uri: '#/components/schemas/URIDataSource' + rows: '#/components/schemas/RowsDataSource' + RegisterDatasetRequest: + type: object + properties: + purpose: + type: string + enum: + - post-training/messages + - eval/question-answer + - eval/messages-answer + description: >- + The purpose of the dataset. One of: - "post-training/messages": The dataset + contains a messages column with list of messages for post-training. { + "messages": [ {"role": "user", "content": "Hello, world!"}, {"role": "assistant", + "content": "Hello, world!"}, ] } - "eval/question-answer": The dataset + contains a question column and an answer column for evaluation. { "question": + "What is the capital of France?", "answer": "Paris" } - "eval/messages-answer": + The dataset contains a messages column with list of messages and an answer + column for evaluation. { "messages": [ {"role": "user", "content": "Hello, + my name is John Doe."}, {"role": "assistant", "content": "Hello, John + Doe. How can I help you today?"}, {"role": "user", "content": "What's + my name?"}, ], "answer": "John Doe" } + source: + $ref: '#/components/schemas/DataSource' + description: >- + The data source of the dataset. Ensure that the data source schema is + compatible with the purpose of the dataset. Examples: - { "type": "uri", + "uri": "https://mywebsite.com/mydata.jsonl" } - { "type": "uri", "uri": + "lsfs://mydata.jsonl" } - { "type": "uri", "uri": "data:csv;base64,{base64_content}" + } - { "type": "uri", "uri": "huggingface://llamastack/simpleqa?split=train" + } - { "type": "rows", "rows": [ { "messages": [ {"role": "user", "content": + "Hello, world!"}, {"role": "assistant", "content": "Hello, world!"}, ] + } ] } + metadata: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + The metadata for the dataset. - E.g. {"description": "My dataset"}. + dataset_id: + type: string + description: >- + The ID of the dataset. If not provided, an ID will be generated. + additionalProperties: false + required: + - purpose + - source + title: RegisterDatasetRequest + Benchmark: + type: object + properties: + identifier: + type: string + provider_resource_id: + type: string + provider_id: + type: string + type: + type: string + enum: + - model + - shield + - vector_db + - dataset + - scoring_function + - benchmark + - tool + - tool_group + - prompt + const: benchmark + default: benchmark + description: The resource type, always benchmark + dataset_id: + type: string + description: >- + Identifier of the dataset to use for the benchmark evaluation + scoring_functions: + type: array + items: + type: string + description: >- + List of scoring function identifiers to apply during evaluation + metadata: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: Metadata for this evaluation task + additionalProperties: false + required: + - identifier + - provider_id + - type + - dataset_id + - scoring_functions + - metadata + title: Benchmark + description: >- + A benchmark resource for evaluating model performance. + ListBenchmarksResponse: + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/Benchmark' + additionalProperties: false + required: + - data + title: ListBenchmarksResponse + RegisterBenchmarkRequest: + type: object + properties: + benchmark_id: + type: string + description: The ID of the benchmark to register. + dataset_id: + type: string + description: >- + The ID of the dataset to use for the benchmark. + scoring_functions: + type: array + items: + type: string + description: >- + The scoring functions to use for the benchmark. + provider_benchmark_id: + type: string + description: >- + The ID of the provider benchmark to use for the benchmark. + provider_id: + type: string + description: >- + The ID of the provider to use for the benchmark. + metadata: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: The metadata to use for the benchmark. + additionalProperties: false + required: + - benchmark_id + - dataset_id + - scoring_functions + title: RegisterBenchmarkRequest + AgentCandidate: + type: object + properties: + type: + type: string + const: agent + default: agent + config: + $ref: '#/components/schemas/AgentConfig' + description: >- + The configuration for the agent candidate. + additionalProperties: false + required: + - type + - config + title: AgentCandidate + description: An agent candidate for evaluation. + AggregationFunctionType: + type: string + enum: + - average + - weighted_average + - median + - categorical_count + - accuracy + title: AggregationFunctionType + description: >- + Types of aggregation functions for scoring results. + BasicScoringFnParams: + type: object + properties: + type: + $ref: '#/components/schemas/ScoringFnParamsType' + const: basic + default: basic + description: >- + The type of scoring function parameters, always basic + aggregation_functions: + type: array + items: + $ref: '#/components/schemas/AggregationFunctionType' + description: >- + Aggregation functions to apply to the scores of each row + additionalProperties: false + required: + - type + - aggregation_functions + title: BasicScoringFnParams + description: >- + Parameters for basic scoring function configuration. + BenchmarkConfig: + type: object + properties: + eval_candidate: + oneOf: + - $ref: '#/components/schemas/ModelCandidate' + - $ref: '#/components/schemas/AgentCandidate' + discriminator: + propertyName: type + mapping: + model: '#/components/schemas/ModelCandidate' + agent: '#/components/schemas/AgentCandidate' + description: The candidate to evaluate. + scoring_params: + type: object + additionalProperties: + $ref: '#/components/schemas/ScoringFnParams' + description: >- + Map between scoring function id and parameters for each scoring function + you want to run + num_examples: + type: integer + description: >- + (Optional) The number of examples to evaluate. If not provided, all examples + in the dataset will be evaluated + additionalProperties: false + required: + - eval_candidate + - scoring_params + title: BenchmarkConfig + description: >- + A benchmark configuration for evaluation. + LLMAsJudgeScoringFnParams: + type: object + properties: + type: + $ref: '#/components/schemas/ScoringFnParamsType' + const: llm_as_judge + default: llm_as_judge + description: >- + The type of scoring function parameters, always llm_as_judge + judge_model: + type: string + description: >- + Identifier of the LLM model to use as a judge for scoring + prompt_template: + type: string + description: >- + (Optional) Custom prompt template for the judge model + judge_score_regexes: + type: array + items: + type: string + description: >- + Regexes to extract the answer from generated response + aggregation_functions: + type: array + items: + $ref: '#/components/schemas/AggregationFunctionType' + description: >- + Aggregation functions to apply to the scores of each row + additionalProperties: false + required: + - type + - judge_model + - judge_score_regexes + - aggregation_functions + title: LLMAsJudgeScoringFnParams + description: >- + Parameters for LLM-as-judge scoring function configuration. + ModelCandidate: + type: object + properties: + type: + type: string + const: model + default: model + model: + type: string + description: The model ID to evaluate. + sampling_params: + $ref: '#/components/schemas/SamplingParams' + description: The sampling parameters for the model. + system_message: + $ref: '#/components/schemas/SystemMessage' + description: >- + (Optional) The system message providing instructions or context to the + model. + additionalProperties: false + required: + - type + - model + - sampling_params + title: ModelCandidate + description: A model candidate for evaluation. + RegexParserScoringFnParams: + type: object + properties: + type: + $ref: '#/components/schemas/ScoringFnParamsType' + const: regex_parser + default: regex_parser + description: >- + The type of scoring function parameters, always regex_parser + parsing_regexes: + type: array + items: + type: string + description: >- + Regex to extract the answer from generated response + aggregation_functions: + type: array + items: + $ref: '#/components/schemas/AggregationFunctionType' + description: >- + Aggregation functions to apply to the scores of each row + additionalProperties: false + required: + - type + - parsing_regexes + - aggregation_functions + title: RegexParserScoringFnParams + description: >- + Parameters for regex parser scoring function configuration. + ScoringFnParams: + oneOf: + - $ref: '#/components/schemas/LLMAsJudgeScoringFnParams' + - $ref: '#/components/schemas/RegexParserScoringFnParams' + - $ref: '#/components/schemas/BasicScoringFnParams' + discriminator: + propertyName: type + mapping: + llm_as_judge: '#/components/schemas/LLMAsJudgeScoringFnParams' + regex_parser: '#/components/schemas/RegexParserScoringFnParams' + basic: '#/components/schemas/BasicScoringFnParams' + ScoringFnParamsType: + type: string + enum: + - llm_as_judge + - regex_parser + - basic + title: ScoringFnParamsType + description: >- + Types of scoring function parameter configurations. + SystemMessage: + type: object + properties: + role: + type: string + const: system + default: system + description: >- + Must be "system" to identify this as a system message + content: + $ref: '#/components/schemas/InterleavedContent' + description: >- + The content of the "system prompt". If multiple system messages are provided, + they are concatenated. The underlying Llama Stack code may also add other + system messages (for example, for formatting tool definitions). + additionalProperties: false + required: + - role + - content + title: SystemMessage + description: >- + A system message providing instructions or context to the model. + EvaluateRowsRequest: + type: object + properties: + input_rows: + type: array + items: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: The rows to evaluate. + scoring_functions: + type: array + items: + type: string + description: >- + The scoring functions to use for the evaluation. + benchmark_config: + $ref: '#/components/schemas/BenchmarkConfig' + description: The configuration for the benchmark. + additionalProperties: false + required: + - input_rows + - scoring_functions + - benchmark_config + title: EvaluateRowsRequest + EvaluateResponse: + type: object + properties: + generations: + type: array + items: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: The generations from the evaluation. + scores: + type: object + additionalProperties: + $ref: '#/components/schemas/ScoringResult' + description: The scores from the evaluation. + additionalProperties: false + required: + - generations + - scores + title: EvaluateResponse + description: The response from an evaluation. + ScoringResult: + type: object + properties: + score_rows: + type: array + items: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + The scoring result for each row. Each row is a map of column name to value. + aggregated_results: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: Map of metric name to aggregated value + additionalProperties: false + required: + - score_rows + - aggregated_results + title: ScoringResult + description: A scoring result for a single row. + RunEvalRequest: + type: object + properties: + benchmark_config: + $ref: '#/components/schemas/BenchmarkConfig' + description: The configuration for the benchmark. + additionalProperties: false + required: + - benchmark_config + title: RunEvalRequest + Job: + type: object + properties: + job_id: + type: string + description: Unique identifier for the job + status: + type: string + enum: + - completed + - in_progress + - failed + - scheduled + - cancelled + description: Current execution status of the job + additionalProperties: false + required: + - job_id + - status + title: Job + description: >- + A job execution instance with status tracking. + Checkpoint: + type: object + properties: + identifier: + type: string + description: Unique identifier for the checkpoint + created_at: + type: string + format: date-time + description: >- + Timestamp when the checkpoint was created + epoch: + type: integer + description: >- + Training epoch when the checkpoint was saved + post_training_job_id: + type: string + description: >- + Identifier of the training job that created this checkpoint + path: + type: string + description: >- + File system path where the checkpoint is stored + training_metrics: + $ref: '#/components/schemas/PostTrainingMetric' + description: >- + (Optional) Training metrics associated with this checkpoint + additionalProperties: false + required: + - identifier + - created_at + - epoch + - post_training_job_id + - path + title: Checkpoint + description: Checkpoint created during training runs. + PostTrainingJobArtifactsResponse: + type: object + properties: + job_uuid: + type: string + description: Unique identifier for the training job + checkpoints: + type: array + items: + $ref: '#/components/schemas/Checkpoint' + description: >- + List of model checkpoints created during training + additionalProperties: false + required: + - job_uuid + - checkpoints + title: PostTrainingJobArtifactsResponse + description: Artifacts of a finetuning job. + PostTrainingMetric: + type: object + properties: + epoch: + type: integer + description: Training epoch number + train_loss: + type: number + description: Loss value on the training dataset + validation_loss: + type: number + description: Loss value on the validation dataset + perplexity: + type: number + description: >- + Perplexity metric indicating model confidence + additionalProperties: false + required: + - epoch + - train_loss + - validation_loss + - perplexity + title: PostTrainingMetric + description: >- + Training metrics captured during post-training jobs. + CancelTrainingJobRequest: + type: object + properties: + job_uuid: + type: string + description: The UUID of the job to cancel. + additionalProperties: false + required: + - job_uuid + title: CancelTrainingJobRequest + PostTrainingJobStatusResponse: + type: object + properties: + job_uuid: + type: string + description: Unique identifier for the training job + status: + type: string + enum: + - completed + - in_progress + - failed + - scheduled + - cancelled + description: Current status of the training job + scheduled_at: + type: string + format: date-time + description: >- + (Optional) Timestamp when the job was scheduled + started_at: + type: string + format: date-time + description: >- + (Optional) Timestamp when the job execution began + completed_at: + type: string + format: date-time + description: >- + (Optional) Timestamp when the job finished, if completed + resources_allocated: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + (Optional) Information about computational resources allocated to the + job + checkpoints: + type: array + items: + $ref: '#/components/schemas/Checkpoint' + description: >- + List of model checkpoints created during training + additionalProperties: false + required: + - job_uuid + - status + - checkpoints + title: PostTrainingJobStatusResponse + description: Status of a finetuning job. + ListPostTrainingJobsResponse: + type: object + properties: + data: + type: array + items: + type: object + properties: + job_uuid: + type: string + additionalProperties: false + required: + - job_uuid + title: PostTrainingJob + additionalProperties: false + required: + - data + title: ListPostTrainingJobsResponse + DPOAlignmentConfig: + type: object + properties: + beta: + type: number + description: Temperature parameter for the DPO loss + loss_type: + $ref: '#/components/schemas/DPOLossType' + default: sigmoid + description: The type of loss function to use for DPO + additionalProperties: false + required: + - beta + - loss_type + title: DPOAlignmentConfig + description: >- + Configuration for Direct Preference Optimization (DPO) alignment. + DPOLossType: + type: string + enum: + - sigmoid + - hinge + - ipo + - kto_pair + title: DPOLossType + DataConfig: + type: object + properties: + dataset_id: + type: string + description: >- + Unique identifier for the training dataset + batch_size: + type: integer + description: Number of samples per training batch + shuffle: + type: boolean + description: >- + Whether to shuffle the dataset during training + data_format: + $ref: '#/components/schemas/DatasetFormat' + description: >- + Format of the dataset (instruct or dialog) + validation_dataset_id: + type: string + description: >- + (Optional) Unique identifier for the validation dataset + packed: + type: boolean + default: false + description: >- + (Optional) Whether to pack multiple samples into a single sequence for + efficiency + train_on_input: + type: boolean + default: false + description: >- + (Optional) Whether to compute loss on input tokens as well as output tokens + additionalProperties: false + required: + - dataset_id + - batch_size + - shuffle + - data_format + title: DataConfig + description: >- + Configuration for training data and data loading. + DatasetFormat: + type: string + enum: + - instruct + - dialog + title: DatasetFormat + description: Format of the training dataset. + EfficiencyConfig: + type: object + properties: + enable_activation_checkpointing: + type: boolean + default: false + description: >- + (Optional) Whether to use activation checkpointing to reduce memory usage + enable_activation_offloading: + type: boolean + default: false + description: >- + (Optional) Whether to offload activations to CPU to save GPU memory + memory_efficient_fsdp_wrap: + type: boolean + default: false + description: >- + (Optional) Whether to use memory-efficient FSDP wrapping + fsdp_cpu_offload: + type: boolean + default: false + description: >- + (Optional) Whether to offload FSDP parameters to CPU + additionalProperties: false + title: EfficiencyConfig + description: >- + Configuration for memory and compute efficiency optimizations. + OptimizerConfig: + type: object + properties: + optimizer_type: + $ref: '#/components/schemas/OptimizerType' + description: >- + Type of optimizer to use (adam, adamw, or sgd) + lr: + type: number + description: Learning rate for the optimizer + weight_decay: + type: number + description: >- + Weight decay coefficient for regularization + num_warmup_steps: + type: integer + description: Number of steps for learning rate warmup + additionalProperties: false + required: + - optimizer_type + - lr + - weight_decay + - num_warmup_steps + title: OptimizerConfig + description: >- + Configuration parameters for the optimization algorithm. + OptimizerType: + type: string + enum: + - adam + - adamw + - sgd + title: OptimizerType + description: >- + Available optimizer algorithms for training. + TrainingConfig: + type: object + properties: + n_epochs: + type: integer + description: Number of training epochs to run + max_steps_per_epoch: + type: integer + default: 1 + description: Maximum number of steps to run per epoch + gradient_accumulation_steps: + type: integer + default: 1 + description: >- + Number of steps to accumulate gradients before updating + max_validation_steps: + type: integer + default: 1 + description: >- + (Optional) Maximum number of validation steps per epoch + data_config: + $ref: '#/components/schemas/DataConfig' + description: >- + (Optional) Configuration for data loading and formatting + optimizer_config: + $ref: '#/components/schemas/OptimizerConfig' + description: >- + (Optional) Configuration for the optimization algorithm + efficiency_config: + $ref: '#/components/schemas/EfficiencyConfig' + description: >- + (Optional) Configuration for memory and compute optimizations + dtype: + type: string + default: bf16 + description: >- + (Optional) Data type for model parameters (bf16, fp16, fp32) + additionalProperties: false + required: + - n_epochs + - max_steps_per_epoch + - gradient_accumulation_steps + title: TrainingConfig + description: >- + Comprehensive configuration for the training process. + PreferenceOptimizeRequest: + type: object + properties: + job_uuid: + type: string + description: The UUID of the job to create. + finetuned_model: + type: string + description: The model to fine-tune. + algorithm_config: + $ref: '#/components/schemas/DPOAlignmentConfig' + description: The algorithm configuration. + training_config: + $ref: '#/components/schemas/TrainingConfig' + description: The training configuration. + hyperparam_search_config: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: The hyperparam search configuration. + logger_config: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: The logger configuration. + additionalProperties: false + required: + - job_uuid + - finetuned_model + - algorithm_config + - training_config + - hyperparam_search_config + - logger_config + title: PreferenceOptimizeRequest + PostTrainingJob: + type: object + properties: + job_uuid: + type: string + additionalProperties: false + required: + - job_uuid + title: PostTrainingJob + AlgorithmConfig: + oneOf: + - $ref: '#/components/schemas/LoraFinetuningConfig' + - $ref: '#/components/schemas/QATFinetuningConfig' + discriminator: + propertyName: type + mapping: + LoRA: '#/components/schemas/LoraFinetuningConfig' + QAT: '#/components/schemas/QATFinetuningConfig' + LoraFinetuningConfig: + type: object + properties: + type: + type: string + const: LoRA + default: LoRA + description: Algorithm type identifier, always "LoRA" + lora_attn_modules: + type: array + items: + type: string + description: >- + List of attention module names to apply LoRA to + apply_lora_to_mlp: + type: boolean + description: Whether to apply LoRA to MLP layers + apply_lora_to_output: + type: boolean + description: >- + Whether to apply LoRA to output projection layers + rank: + type: integer + description: >- + Rank of the LoRA adaptation (lower rank = fewer parameters) + alpha: + type: integer + description: >- + LoRA scaling parameter that controls adaptation strength + use_dora: + type: boolean + default: false + description: >- + (Optional) Whether to use DoRA (Weight-Decomposed Low-Rank Adaptation) + quantize_base: + type: boolean + default: false + description: >- + (Optional) Whether to quantize the base model weights + additionalProperties: false + required: + - type + - lora_attn_modules + - apply_lora_to_mlp + - apply_lora_to_output + - rank + - alpha + title: LoraFinetuningConfig + description: >- + Configuration for Low-Rank Adaptation (LoRA) fine-tuning. + QATFinetuningConfig: + type: object + properties: + type: + type: string + const: QAT + default: QAT + description: Algorithm type identifier, always "QAT" + quantizer_name: + type: string + description: >- + Name of the quantization algorithm to use + group_size: + type: integer + description: Size of groups for grouped quantization + additionalProperties: false + required: + - type + - quantizer_name + - group_size + title: QATFinetuningConfig + description: >- + Configuration for Quantization-Aware Training (QAT) fine-tuning. + SupervisedFineTuneRequest: + type: object + properties: + job_uuid: + type: string + description: The UUID of the job to create. + training_config: + $ref: '#/components/schemas/TrainingConfig' + description: The training configuration. + hyperparam_search_config: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: The hyperparam search configuration. + logger_config: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: The logger configuration. + model: + type: string + description: The model to fine-tune. + checkpoint_dir: + type: string + description: The directory to save checkpoint(s) to. + algorithm_config: + $ref: '#/components/schemas/AlgorithmConfig' + description: The algorithm configuration. + additionalProperties: false + required: + - job_uuid + - training_config + - hyperparam_search_config + - logger_config + title: SupervisedFineTuneRequest + QueryMetricsRequest: + type: object + properties: + start_time: + type: integer + description: The start time of the metric to query. + end_time: + type: integer + description: The end time of the metric to query. + granularity: + type: string + description: The granularity of the metric to query. + query_type: + type: string + enum: + - range + - instant + description: The type of query to perform. + label_matchers: + type: array + items: + type: object + properties: + name: + type: string + description: The name of the label to match + value: + type: string + description: The value to match against + operator: + type: string + enum: + - '=' + - '!=' + - =~ + - '!~' + description: >- + The comparison operator to use for matching + default: '=' + additionalProperties: false + required: + - name + - value + - operator + title: MetricLabelMatcher + description: >- + A matcher for filtering metrics by label values. + description: >- + The label matchers to apply to the metric. + additionalProperties: false + required: + - start_time + - query_type + title: QueryMetricsRequest + MetricDataPoint: + type: object + properties: + timestamp: + type: integer + description: >- + Unix timestamp when the metric value was recorded + value: + type: number + description: >- + The numeric value of the metric at this timestamp + unit: + type: string + additionalProperties: false + required: + - timestamp + - value + - unit + title: MetricDataPoint + description: >- + A single data point in a metric time series. + MetricLabel: + type: object + properties: + name: + type: string + description: The name of the label + value: + type: string + description: The value of the label + additionalProperties: false + required: + - name + - value + title: MetricLabel + description: A label associated with a metric. + MetricSeries: + type: object + properties: + metric: + type: string + description: The name of the metric + labels: + type: array + items: + $ref: '#/components/schemas/MetricLabel' + description: >- + List of labels associated with this metric series + values: + type: array + items: + $ref: '#/components/schemas/MetricDataPoint' + description: >- + List of data points in chronological order + additionalProperties: false + required: + - metric + - labels + - values + title: MetricSeries + description: A time series of metric data points. + QueryMetricsResponse: + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/MetricSeries' + description: >- + List of metric series matching the query criteria + additionalProperties: false + required: + - data + title: QueryMetricsResponse + description: >- + Response containing metric time series data. + QueryCondition: + type: object + properties: + key: + type: string + description: The attribute key to filter on + op: + $ref: '#/components/schemas/QueryConditionOp' + description: The comparison operator to apply + value: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: The value to compare against + additionalProperties: false + required: + - key + - op + - value + title: QueryCondition + description: A condition for filtering query results. + QueryConditionOp: + type: string + enum: + - eq + - ne + - gt + - lt + title: QueryConditionOp + description: >- + Comparison operators for query conditions. + QuerySpansRequest: + type: object + properties: + attribute_filters: + type: array + items: + $ref: '#/components/schemas/QueryCondition' + description: >- + The attribute filters to apply to the spans. + attributes_to_return: + type: array + items: + type: string + description: The attributes to return in the spans. + max_depth: + type: integer + description: The maximum depth of the tree. + additionalProperties: false + required: + - attribute_filters + - attributes_to_return + title: QuerySpansRequest + Span: + type: object + properties: + span_id: + type: string + description: Unique identifier for the span + trace_id: + type: string + description: >- + Unique identifier for the trace this span belongs to + parent_span_id: + type: string + description: >- + (Optional) Unique identifier for the parent span, if this is a child span + name: + type: string + description: >- + Human-readable name describing the operation this span represents + start_time: + type: string + format: date-time + description: Timestamp when the operation began + end_time: + type: string + format: date-time + description: >- + (Optional) Timestamp when the operation finished, if completed + attributes: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + (Optional) Key-value pairs containing additional metadata about the span + additionalProperties: false + required: + - span_id + - trace_id + - name + - start_time + title: Span + description: >- + A span representing a single operation within a trace. + QuerySpansResponse: + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/Span' + description: >- + List of spans matching the query criteria + additionalProperties: false + required: + - data + title: QuerySpansResponse + description: Response containing a list of spans. + SaveSpansToDatasetRequest: + type: object + properties: + attribute_filters: + type: array + items: + $ref: '#/components/schemas/QueryCondition' + description: >- + The attribute filters to apply to the spans. + attributes_to_save: + type: array + items: + type: string + description: The attributes to save to the dataset. + dataset_id: + type: string + description: >- + The ID of the dataset to save the spans to. + max_depth: + type: integer + description: The maximum depth of the tree. + additionalProperties: false + required: + - attribute_filters + - attributes_to_save + - dataset_id + title: SaveSpansToDatasetRequest + GetSpanTreeRequest: + type: object + properties: + attributes_to_return: + type: array + items: + type: string + description: The attributes to return in the tree. + max_depth: + type: integer + description: The maximum depth of the tree. + additionalProperties: false + title: GetSpanTreeRequest + SpanStatus: + type: string + enum: + - ok + - error + title: SpanStatus + description: >- + The status of a span indicating whether it completed successfully or with + an error. + SpanWithStatus: + type: object + properties: + span_id: + type: string + description: Unique identifier for the span + trace_id: + type: string + description: >- + Unique identifier for the trace this span belongs to + parent_span_id: + type: string + description: >- + (Optional) Unique identifier for the parent span, if this is a child span + name: + type: string + description: >- + Human-readable name describing the operation this span represents + start_time: + type: string + format: date-time + description: Timestamp when the operation began + end_time: + type: string + format: date-time + description: >- + (Optional) Timestamp when the operation finished, if completed + attributes: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + (Optional) Key-value pairs containing additional metadata about the span + status: + $ref: '#/components/schemas/SpanStatus' + description: >- + (Optional) The current status of the span + additionalProperties: false + required: + - span_id + - trace_id + - name + - start_time + title: SpanWithStatus + description: A span that includes status information. + QuerySpanTreeResponse: + type: object + properties: + data: + type: object + additionalProperties: + $ref: '#/components/schemas/SpanWithStatus' + description: >- + Dictionary mapping span IDs to spans with status information + additionalProperties: false + required: + - data + title: QuerySpanTreeResponse + description: >- + Response containing a tree structure of spans. + QueryTracesRequest: + type: object + properties: + attribute_filters: + type: array + items: + $ref: '#/components/schemas/QueryCondition' + description: >- + The attribute filters to apply to the traces. + limit: + type: integer + description: The limit of traces to return. + offset: + type: integer + description: The offset of the traces to return. + order_by: + type: array + items: + type: string + description: The order by of the traces to return. + additionalProperties: false + title: QueryTracesRequest + Trace: + type: object + properties: + trace_id: + type: string + description: Unique identifier for the trace + root_span_id: + type: string + description: >- + Unique identifier for the root span that started this trace + start_time: + type: string + format: date-time + description: Timestamp when the trace began + end_time: + type: string + format: date-time + description: >- + (Optional) Timestamp when the trace finished, if completed + additionalProperties: false + required: + - trace_id + - root_span_id + - start_time + title: Trace + description: >- + A trace representing the complete execution path of a request across multiple + operations. + QueryTracesResponse: + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/Trace' + description: >- + List of traces matching the query criteria + additionalProperties: false + required: + - data + title: QueryTracesResponse + description: Response containing a list of traces. + responses: + BadRequest400: + description: The request was invalid or malformed + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: + status: 400 + title: Bad Request + detail: The request was invalid or malformed + TooManyRequests429: + description: >- + The client has sent too many requests in a given amount of time + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: + status: 429 + title: Too Many Requests + detail: >- + You have exceeded the rate limit. Please try again later. + InternalServerError500: + description: >- + The server encountered an unexpected error + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: + status: 500 + title: Internal Server Error + detail: >- + An unexpected error occurred. Our team has been notified. + DefaultError: + description: An unexpected error occurred + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: + status: 0 + title: Error + detail: An unexpected error occurred +security: + - Default: [] +tags: + - name: Agents + description: > + APIs for creating and interacting with agentic systems. + + + ## Deprecated APIs + + + > **⚠️ DEPRECATED**: These APIs are provided for migration reference and will + be removed in future versions. Not recommended for new projects. + + + ### Migration Guidance + + + If you are using deprecated versions of the Agents or Responses APIs, please + migrate to: + + + - **Responses API**: Use the stable v1 Responses API endpoints + x-displayName: Agents + - name: Benchmarks + description: '' + - name: DatasetIO + description: '' + - name: Datasets + description: '' + - name: Eval + description: '' + x-displayName: >- + Llama Stack Evaluation API for running evaluations on model and agent candidates. + - name: PostTraining (Coming Soon) + description: '' + - name: Telemetry + description: '' +x-tagGroups: + - name: Operations + tags: + - Agents + - Benchmarks + - DatasetIO + - Datasets + - Eval + - PostTraining (Coming Soon) + - Telemetry diff --git a/docs/static/experimental-llama-stack-spec.html b/docs/static/experimental-llama-stack-spec.html new file mode 100644 index 000000000..fe57f9132 --- /dev/null +++ b/docs/static/experimental-llama-stack-spec.html @@ -0,0 +1,6536 @@ + + + + + + + OpenAPI specification + + + + + + + + + + + + + diff --git a/docs/static/experimental-llama-stack-spec.yaml b/docs/static/experimental-llama-stack-spec.yaml new file mode 100644 index 000000000..85129336f --- /dev/null +++ b/docs/static/experimental-llama-stack-spec.yaml @@ -0,0 +1,4847 @@ +openapi: 3.1.0 +info: + title: >- + Llama Stack Specification - Experimental APIs + version: v1 + description: >- + This is the specification of the Llama Stack that provides + a set of endpoints and their corresponding interfaces that are + tailored to + best leverage Llama Models. + + **🧪 EXPERIMENTAL**: Pre-release APIs (v1alpha, v1beta) that may change before + becoming stable. +servers: + - url: http://any-hosted-llama-stack.com +paths: + /v1beta/datasetio/append-rows/{dataset_id}: + post: + responses: + '200': + description: OK + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - DatasetIO + summary: Append rows to a dataset. + description: Append rows to a dataset. + parameters: + - name: dataset_id + in: path + description: >- + The ID of the dataset to append the rows to. + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/AppendRowsRequest' + required: true + deprecated: false + /v1beta/datasetio/iterrows/{dataset_id}: + get: + responses: + '200': + description: A PaginatedResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/PaginatedResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - DatasetIO + summary: >- + Get a paginated list of rows from a dataset. + description: >- + Get a paginated list of rows from a dataset. + + Uses offset-based pagination where: + + - start_index: The starting index (0-based). If None, starts from beginning. + + - limit: Number of items to return. If None or -1, returns all items. + + + The response includes: + + - data: List of items for the current page. + + - has_more: Whether there are more items available after this set. + parameters: + - name: dataset_id + in: path + description: >- + The ID of the dataset to get the rows from. + required: true + schema: + type: string + - name: start_index + in: query + description: >- + Index into dataset for the first row to get. Get all rows if None. + required: false + schema: + type: integer + - name: limit + in: query + description: The number of rows to get. + required: false + schema: + type: integer + deprecated: false + /v1beta/datasets: + get: + responses: + '200': + description: A ListDatasetsResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/ListDatasetsResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Datasets + summary: List all datasets. + description: List all datasets. + parameters: [] + deprecated: false + post: + responses: + '200': + description: A Dataset. + content: + application/json: + schema: + $ref: '#/components/schemas/Dataset' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Datasets + summary: Register a new dataset. + description: Register a new dataset. + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/RegisterDatasetRequest' + required: true + deprecated: false + /v1beta/datasets/{dataset_id}: + get: + responses: + '200': + description: A Dataset. + content: + application/json: + schema: + $ref: '#/components/schemas/Dataset' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Datasets + summary: Get a dataset by its ID. + description: Get a dataset by its ID. + parameters: + - name: dataset_id + in: path + description: The ID of the dataset to get. + required: true + schema: + type: string + deprecated: false + delete: + responses: + '200': + description: OK + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Datasets + summary: Unregister a dataset by its ID. + description: Unregister a dataset by its ID. + parameters: + - name: dataset_id + in: path + description: The ID of the dataset to unregister. + required: true + schema: + type: string + deprecated: false + /v1alpha/agents: + get: + responses: + '200': + description: A PaginatedResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/PaginatedResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Agents + summary: List all agents. + description: List all agents. + parameters: + - name: start_index + in: query + description: The index to start the pagination from. + required: false + schema: + type: integer + - name: limit + in: query + description: The number of agents to return. + required: false + schema: + type: integer + deprecated: false + post: + responses: + '200': + description: >- + An AgentCreateResponse with the agent ID. + content: + application/json: + schema: + $ref: '#/components/schemas/AgentCreateResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Agents + summary: >- + Create an agent with the given configuration. + description: >- + Create an agent with the given configuration. + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/CreateAgentRequest' + required: true + deprecated: false + /v1alpha/agents/{agent_id}: + get: + responses: + '200': + description: An Agent of the agent. + content: + application/json: + schema: + $ref: '#/components/schemas/Agent' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Agents + summary: Describe an agent by its ID. + description: Describe an agent by its ID. + parameters: + - name: agent_id + in: path + description: ID of the agent. + required: true + schema: + type: string + deprecated: false + delete: + responses: + '200': + description: OK + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Agents + summary: >- + Delete an agent by its ID and its associated sessions and turns. + description: >- + Delete an agent by its ID and its associated sessions and turns. + parameters: + - name: agent_id + in: path + description: The ID of the agent to delete. + required: true + schema: + type: string + deprecated: false + /v1alpha/agents/{agent_id}/session: + post: + responses: + '200': + description: An AgentSessionCreateResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/AgentSessionCreateResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Agents + summary: Create a new session for an agent. + description: Create a new session for an agent. + parameters: + - name: agent_id + in: path + description: >- + The ID of the agent to create the session for. + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/CreateAgentSessionRequest' + required: true + deprecated: false + /v1alpha/agents/{agent_id}/session/{session_id}: + get: + responses: + '200': + description: A Session. + content: + application/json: + schema: + $ref: '#/components/schemas/Session' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Agents + summary: Retrieve an agent session by its ID. + description: Retrieve an agent session by its ID. + parameters: + - name: session_id + in: path + description: The ID of the session to get. + required: true + schema: + type: string + - name: agent_id + in: path + description: >- + The ID of the agent to get the session for. + required: true + schema: + type: string + - name: turn_ids + in: query + description: >- + (Optional) List of turn IDs to filter the session by. + required: false + schema: + type: array + items: + type: string + deprecated: false + delete: + responses: + '200': + description: OK + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Agents + summary: >- + Delete an agent session by its ID and its associated turns. + description: >- + Delete an agent session by its ID and its associated turns. + parameters: + - name: session_id + in: path + description: The ID of the session to delete. + required: true + schema: + type: string + - name: agent_id + in: path + description: >- + The ID of the agent to delete the session for. + required: true + schema: + type: string + deprecated: false + /v1alpha/agents/{agent_id}/session/{session_id}/turn: + post: + responses: + '200': + description: >- + If stream=False, returns a Turn object. If stream=True, returns an SSE + event stream of AgentTurnResponseStreamChunk. + content: + application/json: + schema: + $ref: '#/components/schemas/Turn' + text/event-stream: + schema: + $ref: '#/components/schemas/AgentTurnResponseStreamChunk' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Agents + summary: Create a new turn for an agent. + description: Create a new turn for an agent. + parameters: + - name: agent_id + in: path + description: >- + The ID of the agent to create the turn for. + required: true + schema: + type: string + - name: session_id + in: path + description: >- + The ID of the session to create the turn for. + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/CreateAgentTurnRequest' + required: true + deprecated: false + /v1alpha/agents/{agent_id}/session/{session_id}/turn/{turn_id}: + get: + responses: + '200': + description: A Turn. + content: + application/json: + schema: + $ref: '#/components/schemas/Turn' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Agents + summary: Retrieve an agent turn by its ID. + description: Retrieve an agent turn by its ID. + parameters: + - name: agent_id + in: path + description: The ID of the agent to get the turn for. + required: true + schema: + type: string + - name: session_id + in: path + description: >- + The ID of the session to get the turn for. + required: true + schema: + type: string + - name: turn_id + in: path + description: The ID of the turn to get. + required: true + schema: + type: string + deprecated: false + /v1alpha/agents/{agent_id}/session/{session_id}/turn/{turn_id}/resume: + post: + responses: + '200': + description: >- + A Turn object if stream is False, otherwise an AsyncIterator of AgentTurnResponseStreamChunk + objects. + content: + application/json: + schema: + $ref: '#/components/schemas/Turn' + text/event-stream: + schema: + $ref: '#/components/schemas/AgentTurnResponseStreamChunk' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Agents + summary: >- + Resume an agent turn with executed tool call responses. + description: >- + Resume an agent turn with executed tool call responses. + + When a Turn has the status `awaiting_input` due to pending input from client + side tool calls, this endpoint can be used to submit the outputs from the + tool calls once they are ready. + parameters: + - name: agent_id + in: path + description: The ID of the agent to resume. + required: true + schema: + type: string + - name: session_id + in: path + description: The ID of the session to resume. + required: true + schema: + type: string + - name: turn_id + in: path + description: The ID of the turn to resume. + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/ResumeAgentTurnRequest' + required: true + deprecated: false + /v1alpha/agents/{agent_id}/session/{session_id}/turn/{turn_id}/step/{step_id}: + get: + responses: + '200': + description: An AgentStepResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/AgentStepResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Agents + summary: Retrieve an agent step by its ID. + description: Retrieve an agent step by its ID. + parameters: + - name: agent_id + in: path + description: The ID of the agent to get the step for. + required: true + schema: + type: string + - name: session_id + in: path + description: >- + The ID of the session to get the step for. + required: true + schema: + type: string + - name: turn_id + in: path + description: The ID of the turn to get the step for. + required: true + schema: + type: string + - name: step_id + in: path + description: The ID of the step to get. + required: true + schema: + type: string + deprecated: false + /v1alpha/agents/{agent_id}/sessions: + get: + responses: + '200': + description: A PaginatedResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/PaginatedResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Agents + summary: List all session(s) of a given agent. + description: List all session(s) of a given agent. + parameters: + - name: agent_id + in: path + description: >- + The ID of the agent to list sessions for. + required: true + schema: + type: string + - name: start_index + in: query + description: The index to start the pagination from. + required: false + schema: + type: integer + - name: limit + in: query + description: The number of sessions to return. + required: false + schema: + type: integer + deprecated: false + /v1alpha/eval/benchmarks: + get: + responses: + '200': + description: A ListBenchmarksResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/ListBenchmarksResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Benchmarks + summary: List all benchmarks. + description: List all benchmarks. + parameters: [] + deprecated: false + post: + responses: + '200': + description: OK + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Benchmarks + summary: Register a benchmark. + description: Register a benchmark. + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/RegisterBenchmarkRequest' + required: true + deprecated: false + /v1alpha/eval/benchmarks/{benchmark_id}: + get: + responses: + '200': + description: A Benchmark. + content: + application/json: + schema: + $ref: '#/components/schemas/Benchmark' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Benchmarks + summary: Get a benchmark by its ID. + description: Get a benchmark by its ID. + parameters: + - name: benchmark_id + in: path + description: The ID of the benchmark to get. + required: true + schema: + type: string + deprecated: false + delete: + responses: + '200': + description: OK + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Benchmarks + summary: Unregister a benchmark. + description: Unregister a benchmark. + parameters: + - name: benchmark_id + in: path + description: The ID of the benchmark to unregister. + required: true + schema: + type: string + deprecated: false + /v1alpha/eval/benchmarks/{benchmark_id}/evaluations: + post: + responses: + '200': + description: >- + EvaluateResponse object containing generations and scores. + content: + application/json: + schema: + $ref: '#/components/schemas/EvaluateResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Eval + summary: Evaluate a list of rows on a benchmark. + description: Evaluate a list of rows on a benchmark. + parameters: + - name: benchmark_id + in: path + description: >- + The ID of the benchmark to run the evaluation on. + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/EvaluateRowsRequest' + required: true + deprecated: false + /v1alpha/eval/benchmarks/{benchmark_id}/jobs: + post: + responses: + '200': + description: >- + The job that was created to run the evaluation. + content: + application/json: + schema: + $ref: '#/components/schemas/Job' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Eval + summary: Run an evaluation on a benchmark. + description: Run an evaluation on a benchmark. + parameters: + - name: benchmark_id + in: path + description: >- + The ID of the benchmark to run the evaluation on. + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/RunEvalRequest' + required: true + deprecated: false + /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}: + get: + responses: + '200': + description: The status of the evaluation job. + content: + application/json: + schema: + $ref: '#/components/schemas/Job' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Eval + summary: Get the status of a job. + description: Get the status of a job. + parameters: + - name: benchmark_id + in: path + description: >- + The ID of the benchmark to run the evaluation on. + required: true + schema: + type: string + - name: job_id + in: path + description: The ID of the job to get the status of. + required: true + schema: + type: string + deprecated: false + delete: + responses: + '200': + description: OK + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Eval + summary: Cancel a job. + description: Cancel a job. + parameters: + - name: benchmark_id + in: path + description: >- + The ID of the benchmark to run the evaluation on. + required: true + schema: + type: string + - name: job_id + in: path + description: The ID of the job to cancel. + required: true + schema: + type: string + deprecated: false + /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}/result: + get: + responses: + '200': + description: The result of the job. + content: + application/json: + schema: + $ref: '#/components/schemas/EvaluateResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Eval + summary: Get the result of a job. + description: Get the result of a job. + parameters: + - name: benchmark_id + in: path + description: >- + The ID of the benchmark to run the evaluation on. + required: true + schema: + type: string + - name: job_id + in: path + description: The ID of the job to get the result of. + required: true + schema: + type: string + deprecated: false + /v1alpha/inference/rerank: + post: + responses: + '200': + description: >- + RerankResponse with indices sorted by relevance score (descending). + content: + application/json: + schema: + $ref: '#/components/schemas/RerankResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Inference + summary: >- + Rerank a list of documents based on their relevance to a query. + description: >- + Rerank a list of documents based on their relevance to a query. + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/RerankRequest' + required: true + deprecated: false + /v1alpha/post-training/job/artifacts: + get: + responses: + '200': + description: A PostTrainingJobArtifactsResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/PostTrainingJobArtifactsResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - PostTraining (Coming Soon) + summary: Get the artifacts of a training job. + description: Get the artifacts of a training job. + parameters: + - name: job_uuid + in: query + description: >- + The UUID of the job to get the artifacts of. + required: true + schema: + type: string + deprecated: false + /v1alpha/post-training/job/cancel: + post: + responses: + '200': + description: OK + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - PostTraining (Coming Soon) + summary: Cancel a training job. + description: Cancel a training job. + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/CancelTrainingJobRequest' + required: true + deprecated: false + /v1alpha/post-training/job/status: + get: + responses: + '200': + description: A PostTrainingJobStatusResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/PostTrainingJobStatusResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - PostTraining (Coming Soon) + summary: Get the status of a training job. + description: Get the status of a training job. + parameters: + - name: job_uuid + in: query + description: >- + The UUID of the job to get the status of. + required: true + schema: + type: string + deprecated: false + /v1alpha/post-training/jobs: + get: + responses: + '200': + description: A ListPostTrainingJobsResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/ListPostTrainingJobsResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - PostTraining (Coming Soon) + summary: Get all training jobs. + description: Get all training jobs. + parameters: [] + deprecated: false + /v1alpha/post-training/preference-optimize: + post: + responses: + '200': + description: A PostTrainingJob. + content: + application/json: + schema: + $ref: '#/components/schemas/PostTrainingJob' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - PostTraining (Coming Soon) + summary: Run preference optimization of a model. + description: Run preference optimization of a model. + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/PreferenceOptimizeRequest' + required: true + deprecated: false + /v1alpha/post-training/supervised-fine-tune: + post: + responses: + '200': + description: A PostTrainingJob. + content: + application/json: + schema: + $ref: '#/components/schemas/PostTrainingJob' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - PostTraining (Coming Soon) + summary: Run supervised fine-tuning of a model. + description: Run supervised fine-tuning of a model. + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/SupervisedFineTuneRequest' + required: true + deprecated: false + /v1alpha/telemetry/metrics/{metric_name}: + post: + responses: + '200': + description: A QueryMetricsResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/QueryMetricsResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Telemetry + summary: Query metrics. + description: Query metrics. + parameters: + - name: metric_name + in: path + description: The name of the metric to query. + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/QueryMetricsRequest' + required: true + deprecated: false + /v1alpha/telemetry/spans: + post: + responses: + '200': + description: A QuerySpansResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/QuerySpansResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Telemetry + summary: Query spans. + description: Query spans. + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/QuerySpansRequest' + required: true + deprecated: false + /v1alpha/telemetry/spans/export: + post: + responses: + '200': + description: OK + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Telemetry + summary: Save spans to a dataset. + description: Save spans to a dataset. + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/SaveSpansToDatasetRequest' + required: true + deprecated: false + /v1alpha/telemetry/spans/{span_id}/tree: + post: + responses: + '200': + description: A QuerySpanTreeResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/QuerySpanTreeResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Telemetry + summary: Get a span tree by its ID. + description: Get a span tree by its ID. + parameters: + - name: span_id + in: path + description: The ID of the span to get the tree from. + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/GetSpanTreeRequest' + required: true + deprecated: false + /v1alpha/telemetry/traces: + post: + responses: + '200': + description: A QueryTracesResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/QueryTracesResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Telemetry + summary: Query traces. + description: Query traces. + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/QueryTracesRequest' + required: true + deprecated: false + /v1alpha/telemetry/traces/{trace_id}: + get: + responses: + '200': + description: A Trace. + content: + application/json: + schema: + $ref: '#/components/schemas/Trace' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Telemetry + summary: Get a trace by its ID. + description: Get a trace by its ID. + parameters: + - name: trace_id + in: path + description: The ID of the trace to get. + required: true + schema: + type: string + deprecated: false + /v1alpha/telemetry/traces/{trace_id}/spans/{span_id}: + get: + responses: + '200': + description: A Span. + content: + application/json: + schema: + $ref: '#/components/schemas/Span' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Telemetry + summary: Get a span by its ID. + description: Get a span by its ID. + parameters: + - name: trace_id + in: path + description: >- + The ID of the trace to get the span from. + required: true + schema: + type: string + - name: span_id + in: path + description: The ID of the span to get. + required: true + schema: + type: string + deprecated: false +jsonSchemaDialect: >- + https://json-schema.org/draft/2020-12/schema +components: + schemas: + Error: + type: object + properties: + status: + type: integer + description: HTTP status code + title: + type: string + description: >- + Error title, a short summary of the error which is invariant for an error + type + detail: + type: string + description: >- + Error detail, a longer human-readable description of the error + instance: + type: string + description: >- + (Optional) A URL which can be used to retrieve more information about + the specific occurrence of the error + additionalProperties: false + required: + - status + - title + - detail + title: Error + description: >- + Error response from the API. Roughly follows RFC 7807. + AppendRowsRequest: + type: object + properties: + rows: + type: array + items: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: The rows to append to the dataset. + additionalProperties: false + required: + - rows + title: AppendRowsRequest + PaginatedResponse: + type: object + properties: + data: + type: array + items: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: The list of items for the current page + has_more: + type: boolean + description: >- + Whether there are more items available after this set + url: + type: string + description: The URL for accessing this list + additionalProperties: false + required: + - data + - has_more + title: PaginatedResponse + description: >- + A generic paginated response that follows a simple format. + Dataset: + type: object + properties: + identifier: + type: string + provider_resource_id: + type: string + provider_id: + type: string + type: + type: string + enum: + - model + - shield + - vector_db + - dataset + - scoring_function + - benchmark + - tool + - tool_group + - prompt + const: dataset + default: dataset + description: >- + Type of resource, always 'dataset' for datasets + purpose: + type: string + enum: + - post-training/messages + - eval/question-answer + - eval/messages-answer + description: >- + Purpose of the dataset indicating its intended use + source: + oneOf: + - $ref: '#/components/schemas/URIDataSource' + - $ref: '#/components/schemas/RowsDataSource' + discriminator: + propertyName: type + mapping: + uri: '#/components/schemas/URIDataSource' + rows: '#/components/schemas/RowsDataSource' + description: >- + Data source configuration for the dataset + metadata: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: Additional metadata for the dataset + additionalProperties: false + required: + - identifier + - provider_id + - type + - purpose + - source + - metadata + title: Dataset + description: >- + Dataset resource for storing and accessing training or evaluation data. + RowsDataSource: + type: object + properties: + type: + type: string + const: rows + default: rows + rows: + type: array + items: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + The dataset is stored in rows. E.g. - [ {"messages": [{"role": "user", + "content": "Hello, world!"}, {"role": "assistant", "content": "Hello, + world!"}]} ] + additionalProperties: false + required: + - type + - rows + title: RowsDataSource + description: A dataset stored in rows. + URIDataSource: + type: object + properties: + type: + type: string + const: uri + default: uri + uri: + type: string + description: >- + The dataset can be obtained from a URI. E.g. - "https://mywebsite.com/mydata.jsonl" + - "lsfs://mydata.jsonl" - "data:csv;base64,{base64_content}" + additionalProperties: false + required: + - type + - uri + title: URIDataSource + description: >- + A dataset that can be obtained from a URI. + ListDatasetsResponse: + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/Dataset' + description: List of datasets + additionalProperties: false + required: + - data + title: ListDatasetsResponse + description: Response from listing datasets. + DataSource: + oneOf: + - $ref: '#/components/schemas/URIDataSource' + - $ref: '#/components/schemas/RowsDataSource' + discriminator: + propertyName: type + mapping: + uri: '#/components/schemas/URIDataSource' + rows: '#/components/schemas/RowsDataSource' + RegisterDatasetRequest: + type: object + properties: + purpose: + type: string + enum: + - post-training/messages + - eval/question-answer + - eval/messages-answer + description: >- + The purpose of the dataset. One of: - "post-training/messages": The dataset + contains a messages column with list of messages for post-training. { + "messages": [ {"role": "user", "content": "Hello, world!"}, {"role": "assistant", + "content": "Hello, world!"}, ] } - "eval/question-answer": The dataset + contains a question column and an answer column for evaluation. { "question": + "What is the capital of France?", "answer": "Paris" } - "eval/messages-answer": + The dataset contains a messages column with list of messages and an answer + column for evaluation. { "messages": [ {"role": "user", "content": "Hello, + my name is John Doe."}, {"role": "assistant", "content": "Hello, John + Doe. How can I help you today?"}, {"role": "user", "content": "What's + my name?"}, ], "answer": "John Doe" } + source: + $ref: '#/components/schemas/DataSource' + description: >- + The data source of the dataset. Ensure that the data source schema is + compatible with the purpose of the dataset. Examples: - { "type": "uri", + "uri": "https://mywebsite.com/mydata.jsonl" } - { "type": "uri", "uri": + "lsfs://mydata.jsonl" } - { "type": "uri", "uri": "data:csv;base64,{base64_content}" + } - { "type": "uri", "uri": "huggingface://llamastack/simpleqa?split=train" + } - { "type": "rows", "rows": [ { "messages": [ {"role": "user", "content": + "Hello, world!"}, {"role": "assistant", "content": "Hello, world!"}, ] + } ] } + metadata: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + The metadata for the dataset. - E.g. {"description": "My dataset"}. + dataset_id: + type: string + description: >- + The ID of the dataset. If not provided, an ID will be generated. + additionalProperties: false + required: + - purpose + - source + title: RegisterDatasetRequest + AgentConfig: + type: object + properties: + sampling_params: + $ref: '#/components/schemas/SamplingParams' + input_shields: + type: array + items: + type: string + output_shields: + type: array + items: + type: string + toolgroups: + type: array + items: + $ref: '#/components/schemas/AgentTool' + client_tools: + type: array + items: + $ref: '#/components/schemas/ToolDef' + tool_choice: + type: string + enum: + - auto + - required + - none + title: ToolChoice + description: >- + Whether tool use is required or automatic. This is a hint to the model + which may not be followed. It depends on the Instruction Following capabilities + of the model. + deprecated: true + tool_prompt_format: + type: string + enum: + - json + - function_tag + - python_list + title: ToolPromptFormat + description: >- + Prompt format for calling custom / zero shot tools. + deprecated: true + tool_config: + $ref: '#/components/schemas/ToolConfig' + max_infer_iters: + type: integer + default: 10 + model: + type: string + description: >- + The model identifier to use for the agent + instructions: + type: string + description: The system instructions for the agent + name: + type: string + description: >- + Optional name for the agent, used in telemetry and identification + enable_session_persistence: + type: boolean + default: false + description: >- + Optional flag indicating whether session data has to be persisted + response_format: + $ref: '#/components/schemas/ResponseFormat' + description: Optional response format configuration + additionalProperties: false + required: + - model + - instructions + title: AgentConfig + description: Configuration for an agent. + AgentTool: + oneOf: + - type: string + - type: object + properties: + name: + type: string + args: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + additionalProperties: false + required: + - name + - args + title: AgentToolGroupWithArgs + GrammarResponseFormat: + type: object + properties: + type: + type: string + enum: + - json_schema + - grammar + description: >- + Must be "grammar" to identify this format type + const: grammar + default: grammar + bnf: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + The BNF grammar specification the response should conform to + additionalProperties: false + required: + - type + - bnf + title: GrammarResponseFormat + description: >- + Configuration for grammar-guided response generation. + GreedySamplingStrategy: + type: object + properties: + type: + type: string + const: greedy + default: greedy + description: >- + Must be "greedy" to identify this sampling strategy + additionalProperties: false + required: + - type + title: GreedySamplingStrategy + description: >- + Greedy sampling strategy that selects the highest probability token at each + step. + JsonSchemaResponseFormat: + type: object + properties: + type: + type: string + enum: + - json_schema + - grammar + description: >- + Must be "json_schema" to identify this format type + const: json_schema + default: json_schema + json_schema: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + The JSON schema the response should conform to. In a Python SDK, this + is often a `pydantic` model. + additionalProperties: false + required: + - type + - json_schema + title: JsonSchemaResponseFormat + description: >- + Configuration for JSON schema-guided response generation. + ResponseFormat: + oneOf: + - $ref: '#/components/schemas/JsonSchemaResponseFormat' + - $ref: '#/components/schemas/GrammarResponseFormat' + discriminator: + propertyName: type + mapping: + json_schema: '#/components/schemas/JsonSchemaResponseFormat' + grammar: '#/components/schemas/GrammarResponseFormat' + SamplingParams: + type: object + properties: + strategy: + oneOf: + - $ref: '#/components/schemas/GreedySamplingStrategy' + - $ref: '#/components/schemas/TopPSamplingStrategy' + - $ref: '#/components/schemas/TopKSamplingStrategy' + discriminator: + propertyName: type + mapping: + greedy: '#/components/schemas/GreedySamplingStrategy' + top_p: '#/components/schemas/TopPSamplingStrategy' + top_k: '#/components/schemas/TopKSamplingStrategy' + description: The sampling strategy. + max_tokens: + type: integer + default: 0 + description: >- + The maximum number of tokens that can be generated in the completion. + The token count of your prompt plus max_tokens cannot exceed the model's + context length. + repetition_penalty: + type: number + default: 1.0 + description: >- + Number between -2.0 and 2.0. Positive values penalize new tokens based + on whether they appear in the text so far, increasing the model's likelihood + to talk about new topics. + stop: + type: array + items: + type: string + description: >- + Up to 4 sequences where the API will stop generating further tokens. The + returned text will not contain the stop sequence. + additionalProperties: false + required: + - strategy + title: SamplingParams + description: Sampling parameters. + ToolConfig: + type: object + properties: + tool_choice: + oneOf: + - type: string + enum: + - auto + - required + - none + title: ToolChoice + description: >- + Whether tool use is required or automatic. This is a hint to the model + which may not be followed. It depends on the Instruction Following + capabilities of the model. + - type: string + default: auto + description: >- + (Optional) Whether tool use is automatic, required, or none. Can also + specify a tool name to use a specific tool. Defaults to ToolChoice.auto. + tool_prompt_format: + type: string + enum: + - json + - function_tag + - python_list + description: >- + (Optional) Instructs the model how to format tool calls. By default, Llama + Stack will attempt to use a format that is best adapted to the model. + - `ToolPromptFormat.json`: The tool calls are formatted as a JSON object. + - `ToolPromptFormat.function_tag`: The tool calls are enclosed in a + tag. - `ToolPromptFormat.python_list`: The tool calls are output as Python + syntax -- a list of function calls. + system_message_behavior: + type: string + enum: + - append + - replace + description: >- + (Optional) Config for how to override the default system prompt. - `SystemMessageBehavior.append`: + Appends the provided system message to the default system prompt. - `SystemMessageBehavior.replace`: + Replaces the default system prompt with the provided system message. The + system message can include the string '{{function_definitions}}' to indicate + where the function definitions should be inserted. + default: append + additionalProperties: false + title: ToolConfig + description: Configuration for tool use. + ToolDef: + type: object + properties: + name: + type: string + description: Name of the tool + description: + type: string + description: >- + (Optional) Human-readable description of what the tool does + parameters: + type: array + items: + $ref: '#/components/schemas/ToolParameter' + description: >- + (Optional) List of parameters this tool accepts + metadata: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + (Optional) Additional metadata about the tool + additionalProperties: false + required: + - name + title: ToolDef + description: >- + Tool definition used in runtime contexts. + ToolParameter: + type: object + properties: + name: + type: string + description: Name of the parameter + parameter_type: + type: string + description: >- + Type of the parameter (e.g., string, integer) + description: + type: string + description: >- + Human-readable description of what the parameter does + required: + type: boolean + default: true + description: >- + Whether this parameter is required for tool invocation + items: + type: object + description: >- + Type of the elements when parameter_type is array + title: + type: string + description: (Optional) Title of the parameter + default: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + (Optional) Default value for the parameter if not provided + additionalProperties: false + required: + - name + - parameter_type + - description + - required + title: ToolParameter + description: Parameter definition for a tool. + TopKSamplingStrategy: + type: object + properties: + type: + type: string + const: top_k + default: top_k + description: >- + Must be "top_k" to identify this sampling strategy + top_k: + type: integer + description: >- + Number of top tokens to consider for sampling. Must be at least 1 + additionalProperties: false + required: + - type + - top_k + title: TopKSamplingStrategy + description: >- + Top-k sampling strategy that restricts sampling to the k most likely tokens. + TopPSamplingStrategy: + type: object + properties: + type: + type: string + const: top_p + default: top_p + description: >- + Must be "top_p" to identify this sampling strategy + temperature: + type: number + description: >- + Controls randomness in sampling. Higher values increase randomness + top_p: + type: number + default: 0.95 + description: >- + Cumulative probability threshold for nucleus sampling. Defaults to 0.95 + additionalProperties: false + required: + - type + title: TopPSamplingStrategy + description: >- + Top-p (nucleus) sampling strategy that samples from the smallest set of tokens + with cumulative probability >= p. + CreateAgentRequest: + type: object + properties: + agent_config: + $ref: '#/components/schemas/AgentConfig' + description: The configuration for the agent. + additionalProperties: false + required: + - agent_config + title: CreateAgentRequest + AgentCreateResponse: + type: object + properties: + agent_id: + type: string + description: Unique identifier for the created agent + additionalProperties: false + required: + - agent_id + title: AgentCreateResponse + description: >- + Response returned when creating a new agent. + Agent: + type: object + properties: + agent_id: + type: string + description: Unique identifier for the agent + agent_config: + $ref: '#/components/schemas/AgentConfig' + description: Configuration settings for the agent + created_at: + type: string + format: date-time + description: Timestamp when the agent was created + additionalProperties: false + required: + - agent_id + - agent_config + - created_at + title: Agent + description: >- + An agent instance with configuration and metadata. + CreateAgentSessionRequest: + type: object + properties: + session_name: + type: string + description: The name of the session to create. + additionalProperties: false + required: + - session_name + title: CreateAgentSessionRequest + AgentSessionCreateResponse: + type: object + properties: + session_id: + type: string + description: >- + Unique identifier for the created session + additionalProperties: false + required: + - session_id + title: AgentSessionCreateResponse + description: >- + Response returned when creating a new agent session. + CompletionMessage: + type: object + properties: + role: + type: string + const: assistant + default: assistant + description: >- + Must be "assistant" to identify this as the model's response + content: + $ref: '#/components/schemas/InterleavedContent' + description: The content of the model's response + stop_reason: + type: string + enum: + - end_of_turn + - end_of_message + - out_of_tokens + description: >- + Reason why the model stopped generating. Options are: - `StopReason.end_of_turn`: + The model finished generating the entire response. - `StopReason.end_of_message`: + The model finished generating but generated a partial response -- usually, + a tool call. The user may call the tool and continue the conversation + with the tool's response. - `StopReason.out_of_tokens`: The model ran + out of token budget. + tool_calls: + type: array + items: + $ref: '#/components/schemas/ToolCall' + description: >- + List of tool calls. Each tool call is a ToolCall object. + additionalProperties: false + required: + - role + - content + - stop_reason + title: CompletionMessage + description: >- + A message containing the model's (assistant) response in a chat conversation. + ImageContentItem: + type: object + properties: + type: + type: string + const: image + default: image + description: >- + Discriminator type of the content item. Always "image" + image: + type: object + properties: + url: + $ref: '#/components/schemas/URL' + description: >- + A URL of the image or data URL in the format of data:image/{type};base64,{data}. + Note that URL could have length limits. + data: + type: string + contentEncoding: base64 + description: base64 encoded image data as string + additionalProperties: false + description: >- + Image as a base64 encoded string or an URL + additionalProperties: false + required: + - type + - image + title: ImageContentItem + description: A image content item + InferenceStep: + type: object + properties: + turn_id: + type: string + description: The ID of the turn. + step_id: + type: string + description: The ID of the step. + started_at: + type: string + format: date-time + description: The time the step started. + completed_at: + type: string + format: date-time + description: The time the step completed. + step_type: + type: string + enum: + - inference + - tool_execution + - shield_call + - memory_retrieval + title: StepType + description: Type of the step in an agent turn. + const: inference + default: inference + model_response: + $ref: '#/components/schemas/CompletionMessage' + description: The response from the LLM. + additionalProperties: false + required: + - turn_id + - step_id + - step_type + - model_response + title: InferenceStep + description: An inference step in an agent turn. + InterleavedContent: + oneOf: + - type: string + - $ref: '#/components/schemas/InterleavedContentItem' + - type: array + items: + $ref: '#/components/schemas/InterleavedContentItem' + InterleavedContentItem: + oneOf: + - $ref: '#/components/schemas/ImageContentItem' + - $ref: '#/components/schemas/TextContentItem' + discriminator: + propertyName: type + mapping: + image: '#/components/schemas/ImageContentItem' + text: '#/components/schemas/TextContentItem' + MemoryRetrievalStep: + type: object + properties: + turn_id: + type: string + description: The ID of the turn. + step_id: + type: string + description: The ID of the step. + started_at: + type: string + format: date-time + description: The time the step started. + completed_at: + type: string + format: date-time + description: The time the step completed. + step_type: + type: string + enum: + - inference + - tool_execution + - shield_call + - memory_retrieval + title: StepType + description: Type of the step in an agent turn. + const: memory_retrieval + default: memory_retrieval + vector_db_ids: + type: string + description: >- + The IDs of the vector databases to retrieve context from. + inserted_context: + $ref: '#/components/schemas/InterleavedContent' + description: >- + The context retrieved from the vector databases. + additionalProperties: false + required: + - turn_id + - step_id + - step_type + - vector_db_ids + - inserted_context + title: MemoryRetrievalStep + description: >- + A memory retrieval step in an agent turn. + SafetyViolation: + type: object + properties: + violation_level: + $ref: '#/components/schemas/ViolationLevel' + description: Severity level of the violation + user_message: + type: string + description: >- + (Optional) Message to convey to the user about the violation + metadata: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + Additional metadata including specific violation codes for debugging and + telemetry + additionalProperties: false + required: + - violation_level + - metadata + title: SafetyViolation + description: >- + Details of a safety violation detected by content moderation. + Session: + type: object + properties: + session_id: + type: string + description: >- + Unique identifier for the conversation session + session_name: + type: string + description: Human-readable name for the session + turns: + type: array + items: + $ref: '#/components/schemas/Turn' + description: >- + List of all turns that have occurred in this session + started_at: + type: string + format: date-time + description: Timestamp when the session was created + additionalProperties: false + required: + - session_id + - session_name + - turns + - started_at + title: Session + description: >- + A single session of an interaction with an Agentic System. + ShieldCallStep: + type: object + properties: + turn_id: + type: string + description: The ID of the turn. + step_id: + type: string + description: The ID of the step. + started_at: + type: string + format: date-time + description: The time the step started. + completed_at: + type: string + format: date-time + description: The time the step completed. + step_type: + type: string + enum: + - inference + - tool_execution + - shield_call + - memory_retrieval + title: StepType + description: Type of the step in an agent turn. + const: shield_call + default: shield_call + violation: + $ref: '#/components/schemas/SafetyViolation' + description: The violation from the shield call. + additionalProperties: false + required: + - turn_id + - step_id + - step_type + title: ShieldCallStep + description: A shield call step in an agent turn. + TextContentItem: + type: object + properties: + type: + type: string + const: text + default: text + description: >- + Discriminator type of the content item. Always "text" + text: + type: string + description: Text content + additionalProperties: false + required: + - type + - text + title: TextContentItem + description: A text content item + ToolCall: + type: object + properties: + call_id: + type: string + tool_name: + oneOf: + - type: string + enum: + - brave_search + - wolfram_alpha + - photogen + - code_interpreter + title: BuiltinTool + - type: string + arguments: + oneOf: + - type: string + - type: object + additionalProperties: + oneOf: + - type: string + - type: integer + - type: number + - type: boolean + - type: 'null' + - type: array + items: + oneOf: + - type: string + - type: integer + - type: number + - type: boolean + - type: 'null' + - type: object + additionalProperties: + oneOf: + - type: string + - type: integer + - type: number + - type: boolean + - type: 'null' + arguments_json: + type: string + additionalProperties: false + required: + - call_id + - tool_name + - arguments + title: ToolCall + ToolExecutionStep: + type: object + properties: + turn_id: + type: string + description: The ID of the turn. + step_id: + type: string + description: The ID of the step. + started_at: + type: string + format: date-time + description: The time the step started. + completed_at: + type: string + format: date-time + description: The time the step completed. + step_type: + type: string + enum: + - inference + - tool_execution + - shield_call + - memory_retrieval + title: StepType + description: Type of the step in an agent turn. + const: tool_execution + default: tool_execution + tool_calls: + type: array + items: + $ref: '#/components/schemas/ToolCall' + description: The tool calls to execute. + tool_responses: + type: array + items: + $ref: '#/components/schemas/ToolResponse' + description: The tool responses from the tool calls. + additionalProperties: false + required: + - turn_id + - step_id + - step_type + - tool_calls + - tool_responses + title: ToolExecutionStep + description: A tool execution step in an agent turn. + ToolResponse: + type: object + properties: + call_id: + type: string + description: >- + Unique identifier for the tool call this response is for + tool_name: + oneOf: + - type: string + enum: + - brave_search + - wolfram_alpha + - photogen + - code_interpreter + title: BuiltinTool + - type: string + description: Name of the tool that was invoked + content: + $ref: '#/components/schemas/InterleavedContent' + description: The response content from the tool + metadata: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + (Optional) Additional metadata about the tool response + additionalProperties: false + required: + - call_id + - tool_name + - content + title: ToolResponse + description: Response from a tool invocation. + ToolResponseMessage: + type: object + properties: + role: + type: string + const: tool + default: tool + description: >- + Must be "tool" to identify this as a tool response + call_id: + type: string + description: >- + Unique identifier for the tool call this response is for + content: + $ref: '#/components/schemas/InterleavedContent' + description: The response content from the tool + additionalProperties: false + required: + - role + - call_id + - content + title: ToolResponseMessage + description: >- + A message representing the result of a tool invocation. + Turn: + type: object + properties: + turn_id: + type: string + description: >- + Unique identifier for the turn within a session + session_id: + type: string + description: >- + Unique identifier for the conversation session + input_messages: + type: array + items: + oneOf: + - $ref: '#/components/schemas/UserMessage' + - $ref: '#/components/schemas/ToolResponseMessage' + description: >- + List of messages that initiated this turn + steps: + type: array + items: + oneOf: + - $ref: '#/components/schemas/InferenceStep' + - $ref: '#/components/schemas/ToolExecutionStep' + - $ref: '#/components/schemas/ShieldCallStep' + - $ref: '#/components/schemas/MemoryRetrievalStep' + discriminator: + propertyName: step_type + mapping: + inference: '#/components/schemas/InferenceStep' + tool_execution: '#/components/schemas/ToolExecutionStep' + shield_call: '#/components/schemas/ShieldCallStep' + memory_retrieval: '#/components/schemas/MemoryRetrievalStep' + description: >- + Ordered list of processing steps executed during this turn + output_message: + $ref: '#/components/schemas/CompletionMessage' + description: >- + The model's generated response containing content and metadata + output_attachments: + type: array + items: + type: object + properties: + content: + oneOf: + - type: string + - $ref: '#/components/schemas/InterleavedContentItem' + - type: array + items: + $ref: '#/components/schemas/InterleavedContentItem' + - $ref: '#/components/schemas/URL' + description: The content of the attachment. + mime_type: + type: string + description: The MIME type of the attachment. + additionalProperties: false + required: + - content + - mime_type + title: Attachment + description: An attachment to an agent turn. + description: >- + (Optional) Files or media attached to the agent's response + started_at: + type: string + format: date-time + description: Timestamp when the turn began + completed_at: + type: string + format: date-time + description: >- + (Optional) Timestamp when the turn finished, if completed + additionalProperties: false + required: + - turn_id + - session_id + - input_messages + - steps + - output_message + - started_at + title: Turn + description: >- + A single turn in an interaction with an Agentic System. + URL: + type: object + properties: + uri: + type: string + description: The URL string pointing to the resource + additionalProperties: false + required: + - uri + title: URL + description: A URL reference to external content. + UserMessage: + type: object + properties: + role: + type: string + const: user + default: user + description: >- + Must be "user" to identify this as a user message + content: + $ref: '#/components/schemas/InterleavedContent' + description: >- + The content of the message, which can include text and other media + context: + $ref: '#/components/schemas/InterleavedContent' + description: >- + (Optional) This field is used internally by Llama Stack to pass RAG context. + This field may be removed in the API in the future. + additionalProperties: false + required: + - role + - content + title: UserMessage + description: >- + A message from the user in a chat conversation. + ViolationLevel: + type: string + enum: + - info + - warn + - error + title: ViolationLevel + description: Severity level of a safety violation. + CreateAgentTurnRequest: + type: object + properties: + messages: + type: array + items: + oneOf: + - $ref: '#/components/schemas/UserMessage' + - $ref: '#/components/schemas/ToolResponseMessage' + description: List of messages to start the turn with. + stream: + type: boolean + description: >- + (Optional) If True, generate an SSE event stream of the response. Defaults + to False. + documents: + type: array + items: + type: object + properties: + content: + oneOf: + - type: string + - $ref: '#/components/schemas/InterleavedContentItem' + - type: array + items: + $ref: '#/components/schemas/InterleavedContentItem' + - $ref: '#/components/schemas/URL' + description: The content of the document. + mime_type: + type: string + description: The MIME type of the document. + additionalProperties: false + required: + - content + - mime_type + title: Document + description: A document to be used by an agent. + description: >- + (Optional) List of documents to create the turn with. + toolgroups: + type: array + items: + $ref: '#/components/schemas/AgentTool' + description: >- + (Optional) List of toolgroups to create the turn with, will be used in + addition to the agent's config toolgroups for the request. + tool_config: + $ref: '#/components/schemas/ToolConfig' + description: >- + (Optional) The tool configuration to create the turn with, will be used + to override the agent's tool_config. + additionalProperties: false + required: + - messages + title: CreateAgentTurnRequest + AgentTurnResponseEvent: + type: object + properties: + payload: + oneOf: + - $ref: '#/components/schemas/AgentTurnResponseStepStartPayload' + - $ref: '#/components/schemas/AgentTurnResponseStepProgressPayload' + - $ref: '#/components/schemas/AgentTurnResponseStepCompletePayload' + - $ref: '#/components/schemas/AgentTurnResponseTurnStartPayload' + - $ref: '#/components/schemas/AgentTurnResponseTurnCompletePayload' + - $ref: '#/components/schemas/AgentTurnResponseTurnAwaitingInputPayload' + discriminator: + propertyName: event_type + mapping: + step_start: '#/components/schemas/AgentTurnResponseStepStartPayload' + step_progress: '#/components/schemas/AgentTurnResponseStepProgressPayload' + step_complete: '#/components/schemas/AgentTurnResponseStepCompletePayload' + turn_start: '#/components/schemas/AgentTurnResponseTurnStartPayload' + turn_complete: '#/components/schemas/AgentTurnResponseTurnCompletePayload' + turn_awaiting_input: '#/components/schemas/AgentTurnResponseTurnAwaitingInputPayload' + description: >- + Event-specific payload containing event data + additionalProperties: false + required: + - payload + title: AgentTurnResponseEvent + description: >- + An event in an agent turn response stream. + AgentTurnResponseStepCompletePayload: + type: object + properties: + event_type: + type: string + enum: + - step_start + - step_complete + - step_progress + - turn_start + - turn_complete + - turn_awaiting_input + const: step_complete + default: step_complete + description: Type of event being reported + step_type: + type: string + enum: + - inference + - tool_execution + - shield_call + - memory_retrieval + description: Type of step being executed + step_id: + type: string + description: >- + Unique identifier for the step within a turn + step_details: + oneOf: + - $ref: '#/components/schemas/InferenceStep' + - $ref: '#/components/schemas/ToolExecutionStep' + - $ref: '#/components/schemas/ShieldCallStep' + - $ref: '#/components/schemas/MemoryRetrievalStep' + discriminator: + propertyName: step_type + mapping: + inference: '#/components/schemas/InferenceStep' + tool_execution: '#/components/schemas/ToolExecutionStep' + shield_call: '#/components/schemas/ShieldCallStep' + memory_retrieval: '#/components/schemas/MemoryRetrievalStep' + description: Complete details of the executed step + additionalProperties: false + required: + - event_type + - step_type + - step_id + - step_details + title: AgentTurnResponseStepCompletePayload + description: >- + Payload for step completion events in agent turn responses. + AgentTurnResponseStepProgressPayload: + type: object + properties: + event_type: + type: string + enum: + - step_start + - step_complete + - step_progress + - turn_start + - turn_complete + - turn_awaiting_input + const: step_progress + default: step_progress + description: Type of event being reported + step_type: + type: string + enum: + - inference + - tool_execution + - shield_call + - memory_retrieval + description: Type of step being executed + step_id: + type: string + description: >- + Unique identifier for the step within a turn + delta: + oneOf: + - $ref: '#/components/schemas/TextDelta' + - $ref: '#/components/schemas/ImageDelta' + - $ref: '#/components/schemas/ToolCallDelta' + discriminator: + propertyName: type + mapping: + text: '#/components/schemas/TextDelta' + image: '#/components/schemas/ImageDelta' + tool_call: '#/components/schemas/ToolCallDelta' + description: >- + Incremental content changes during step execution + additionalProperties: false + required: + - event_type + - step_type + - step_id + - delta + title: AgentTurnResponseStepProgressPayload + description: >- + Payload for step progress events in agent turn responses. + AgentTurnResponseStepStartPayload: + type: object + properties: + event_type: + type: string + enum: + - step_start + - step_complete + - step_progress + - turn_start + - turn_complete + - turn_awaiting_input + const: step_start + default: step_start + description: Type of event being reported + step_type: + type: string + enum: + - inference + - tool_execution + - shield_call + - memory_retrieval + description: Type of step being executed + step_id: + type: string + description: >- + Unique identifier for the step within a turn + metadata: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + (Optional) Additional metadata for the step + additionalProperties: false + required: + - event_type + - step_type + - step_id + title: AgentTurnResponseStepStartPayload + description: >- + Payload for step start events in agent turn responses. + AgentTurnResponseStreamChunk: + type: object + properties: + event: + $ref: '#/components/schemas/AgentTurnResponseEvent' + description: >- + Individual event in the agent turn response stream + additionalProperties: false + required: + - event + title: AgentTurnResponseStreamChunk + description: Streamed agent turn completion response. + "AgentTurnResponseTurnAwaitingInputPayload": + type: object + properties: + event_type: + type: string + enum: + - step_start + - step_complete + - step_progress + - turn_start + - turn_complete + - turn_awaiting_input + const: turn_awaiting_input + default: turn_awaiting_input + description: Type of event being reported + turn: + $ref: '#/components/schemas/Turn' + description: >- + Turn data when waiting for external tool responses + additionalProperties: false + required: + - event_type + - turn + title: >- + AgentTurnResponseTurnAwaitingInputPayload + description: >- + Payload for turn awaiting input events in agent turn responses. + AgentTurnResponseTurnCompletePayload: + type: object + properties: + event_type: + type: string + enum: + - step_start + - step_complete + - step_progress + - turn_start + - turn_complete + - turn_awaiting_input + const: turn_complete + default: turn_complete + description: Type of event being reported + turn: + $ref: '#/components/schemas/Turn' + description: >- + Complete turn data including all steps and results + additionalProperties: false + required: + - event_type + - turn + title: AgentTurnResponseTurnCompletePayload + description: >- + Payload for turn completion events in agent turn responses. + AgentTurnResponseTurnStartPayload: + type: object + properties: + event_type: + type: string + enum: + - step_start + - step_complete + - step_progress + - turn_start + - turn_complete + - turn_awaiting_input + const: turn_start + default: turn_start + description: Type of event being reported + turn_id: + type: string + description: >- + Unique identifier for the turn within a session + additionalProperties: false + required: + - event_type + - turn_id + title: AgentTurnResponseTurnStartPayload + description: >- + Payload for turn start events in agent turn responses. + ImageDelta: + type: object + properties: + type: + type: string + const: image + default: image + description: >- + Discriminator type of the delta. Always "image" + image: + type: string + contentEncoding: base64 + description: The incremental image data as bytes + additionalProperties: false + required: + - type + - image + title: ImageDelta + description: >- + An image content delta for streaming responses. + TextDelta: + type: object + properties: + type: + type: string + const: text + default: text + description: >- + Discriminator type of the delta. Always "text" + text: + type: string + description: The incremental text content + additionalProperties: false + required: + - type + - text + title: TextDelta + description: >- + A text content delta for streaming responses. + ToolCallDelta: + type: object + properties: + type: + type: string + const: tool_call + default: tool_call + description: >- + Discriminator type of the delta. Always "tool_call" + tool_call: + oneOf: + - type: string + - $ref: '#/components/schemas/ToolCall' + description: >- + Either an in-progress tool call string or the final parsed tool call + parse_status: + type: string + enum: + - started + - in_progress + - failed + - succeeded + description: Current parsing status of the tool call + additionalProperties: false + required: + - type + - tool_call + - parse_status + title: ToolCallDelta + description: >- + A tool call content delta for streaming responses. + ResumeAgentTurnRequest: + type: object + properties: + tool_responses: + type: array + items: + $ref: '#/components/schemas/ToolResponse' + description: >- + The tool call responses to resume the turn with. + stream: + type: boolean + description: Whether to stream the response. + additionalProperties: false + required: + - tool_responses + title: ResumeAgentTurnRequest + AgentStepResponse: + type: object + properties: + step: + oneOf: + - $ref: '#/components/schemas/InferenceStep' + - $ref: '#/components/schemas/ToolExecutionStep' + - $ref: '#/components/schemas/ShieldCallStep' + - $ref: '#/components/schemas/MemoryRetrievalStep' + discriminator: + propertyName: step_type + mapping: + inference: '#/components/schemas/InferenceStep' + tool_execution: '#/components/schemas/ToolExecutionStep' + shield_call: '#/components/schemas/ShieldCallStep' + memory_retrieval: '#/components/schemas/MemoryRetrievalStep' + description: >- + The complete step data and execution details + additionalProperties: false + required: + - step + title: AgentStepResponse + description: >- + Response containing details of a specific agent step. + Benchmark: + type: object + properties: + identifier: + type: string + provider_resource_id: + type: string + provider_id: + type: string + type: + type: string + enum: + - model + - shield + - vector_db + - dataset + - scoring_function + - benchmark + - tool + - tool_group + - prompt + const: benchmark + default: benchmark + description: The resource type, always benchmark + dataset_id: + type: string + description: >- + Identifier of the dataset to use for the benchmark evaluation + scoring_functions: + type: array + items: + type: string + description: >- + List of scoring function identifiers to apply during evaluation + metadata: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: Metadata for this evaluation task + additionalProperties: false + required: + - identifier + - provider_id + - type + - dataset_id + - scoring_functions + - metadata + title: Benchmark + description: >- + A benchmark resource for evaluating model performance. + ListBenchmarksResponse: + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/Benchmark' + additionalProperties: false + required: + - data + title: ListBenchmarksResponse + RegisterBenchmarkRequest: + type: object + properties: + benchmark_id: + type: string + description: The ID of the benchmark to register. + dataset_id: + type: string + description: >- + The ID of the dataset to use for the benchmark. + scoring_functions: + type: array + items: + type: string + description: >- + The scoring functions to use for the benchmark. + provider_benchmark_id: + type: string + description: >- + The ID of the provider benchmark to use for the benchmark. + provider_id: + type: string + description: >- + The ID of the provider to use for the benchmark. + metadata: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: The metadata to use for the benchmark. + additionalProperties: false + required: + - benchmark_id + - dataset_id + - scoring_functions + title: RegisterBenchmarkRequest + AgentCandidate: + type: object + properties: + type: + type: string + const: agent + default: agent + config: + $ref: '#/components/schemas/AgentConfig' + description: >- + The configuration for the agent candidate. + additionalProperties: false + required: + - type + - config + title: AgentCandidate + description: An agent candidate for evaluation. + AggregationFunctionType: + type: string + enum: + - average + - weighted_average + - median + - categorical_count + - accuracy + title: AggregationFunctionType + description: >- + Types of aggregation functions for scoring results. + BasicScoringFnParams: + type: object + properties: + type: + $ref: '#/components/schemas/ScoringFnParamsType' + const: basic + default: basic + description: >- + The type of scoring function parameters, always basic + aggregation_functions: + type: array + items: + $ref: '#/components/schemas/AggregationFunctionType' + description: >- + Aggregation functions to apply to the scores of each row + additionalProperties: false + required: + - type + - aggregation_functions + title: BasicScoringFnParams + description: >- + Parameters for basic scoring function configuration. + BenchmarkConfig: + type: object + properties: + eval_candidate: + oneOf: + - $ref: '#/components/schemas/ModelCandidate' + - $ref: '#/components/schemas/AgentCandidate' + discriminator: + propertyName: type + mapping: + model: '#/components/schemas/ModelCandidate' + agent: '#/components/schemas/AgentCandidate' + description: The candidate to evaluate. + scoring_params: + type: object + additionalProperties: + $ref: '#/components/schemas/ScoringFnParams' + description: >- + Map between scoring function id and parameters for each scoring function + you want to run + num_examples: + type: integer + description: >- + (Optional) The number of examples to evaluate. If not provided, all examples + in the dataset will be evaluated + additionalProperties: false + required: + - eval_candidate + - scoring_params + title: BenchmarkConfig + description: >- + A benchmark configuration for evaluation. + LLMAsJudgeScoringFnParams: + type: object + properties: + type: + $ref: '#/components/schemas/ScoringFnParamsType' + const: llm_as_judge + default: llm_as_judge + description: >- + The type of scoring function parameters, always llm_as_judge + judge_model: + type: string + description: >- + Identifier of the LLM model to use as a judge for scoring + prompt_template: + type: string + description: >- + (Optional) Custom prompt template for the judge model + judge_score_regexes: + type: array + items: + type: string + description: >- + Regexes to extract the answer from generated response + aggregation_functions: + type: array + items: + $ref: '#/components/schemas/AggregationFunctionType' + description: >- + Aggregation functions to apply to the scores of each row + additionalProperties: false + required: + - type + - judge_model + - judge_score_regexes + - aggregation_functions + title: LLMAsJudgeScoringFnParams + description: >- + Parameters for LLM-as-judge scoring function configuration. + ModelCandidate: + type: object + properties: + type: + type: string + const: model + default: model + model: + type: string + description: The model ID to evaluate. + sampling_params: + $ref: '#/components/schemas/SamplingParams' + description: The sampling parameters for the model. + system_message: + $ref: '#/components/schemas/SystemMessage' + description: >- + (Optional) The system message providing instructions or context to the + model. + additionalProperties: false + required: + - type + - model + - sampling_params + title: ModelCandidate + description: A model candidate for evaluation. + RegexParserScoringFnParams: + type: object + properties: + type: + $ref: '#/components/schemas/ScoringFnParamsType' + const: regex_parser + default: regex_parser + description: >- + The type of scoring function parameters, always regex_parser + parsing_regexes: + type: array + items: + type: string + description: >- + Regex to extract the answer from generated response + aggregation_functions: + type: array + items: + $ref: '#/components/schemas/AggregationFunctionType' + description: >- + Aggregation functions to apply to the scores of each row + additionalProperties: false + required: + - type + - parsing_regexes + - aggregation_functions + title: RegexParserScoringFnParams + description: >- + Parameters for regex parser scoring function configuration. + ScoringFnParams: + oneOf: + - $ref: '#/components/schemas/LLMAsJudgeScoringFnParams' + - $ref: '#/components/schemas/RegexParserScoringFnParams' + - $ref: '#/components/schemas/BasicScoringFnParams' + discriminator: + propertyName: type + mapping: + llm_as_judge: '#/components/schemas/LLMAsJudgeScoringFnParams' + regex_parser: '#/components/schemas/RegexParserScoringFnParams' + basic: '#/components/schemas/BasicScoringFnParams' + ScoringFnParamsType: + type: string + enum: + - llm_as_judge + - regex_parser + - basic + title: ScoringFnParamsType + description: >- + Types of scoring function parameter configurations. + SystemMessage: + type: object + properties: + role: + type: string + const: system + default: system + description: >- + Must be "system" to identify this as a system message + content: + $ref: '#/components/schemas/InterleavedContent' + description: >- + The content of the "system prompt". If multiple system messages are provided, + they are concatenated. The underlying Llama Stack code may also add other + system messages (for example, for formatting tool definitions). + additionalProperties: false + required: + - role + - content + title: SystemMessage + description: >- + A system message providing instructions or context to the model. + EvaluateRowsRequest: + type: object + properties: + input_rows: + type: array + items: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: The rows to evaluate. + scoring_functions: + type: array + items: + type: string + description: >- + The scoring functions to use for the evaluation. + benchmark_config: + $ref: '#/components/schemas/BenchmarkConfig' + description: The configuration for the benchmark. + additionalProperties: false + required: + - input_rows + - scoring_functions + - benchmark_config + title: EvaluateRowsRequest + EvaluateResponse: + type: object + properties: + generations: + type: array + items: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: The generations from the evaluation. + scores: + type: object + additionalProperties: + $ref: '#/components/schemas/ScoringResult' + description: The scores from the evaluation. + additionalProperties: false + required: + - generations + - scores + title: EvaluateResponse + description: The response from an evaluation. + ScoringResult: + type: object + properties: + score_rows: + type: array + items: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + The scoring result for each row. Each row is a map of column name to value. + aggregated_results: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: Map of metric name to aggregated value + additionalProperties: false + required: + - score_rows + - aggregated_results + title: ScoringResult + description: A scoring result for a single row. + RunEvalRequest: + type: object + properties: + benchmark_config: + $ref: '#/components/schemas/BenchmarkConfig' + description: The configuration for the benchmark. + additionalProperties: false + required: + - benchmark_config + title: RunEvalRequest + Job: + type: object + properties: + job_id: + type: string + description: Unique identifier for the job + status: + type: string + enum: + - completed + - in_progress + - failed + - scheduled + - cancelled + description: Current execution status of the job + additionalProperties: false + required: + - job_id + - status + title: Job + description: >- + A job execution instance with status tracking. + "OpenAIChatCompletionContentPartImageParam": + type: object + properties: + type: + type: string + const: image_url + default: image_url + description: >- + Must be "image_url" to identify this as image content + image_url: + $ref: '#/components/schemas/OpenAIImageURL' + description: >- + Image URL specification and processing details + additionalProperties: false + required: + - type + - image_url + title: >- + OpenAIChatCompletionContentPartImageParam + description: >- + Image content part for OpenAI-compatible chat completion messages. + OpenAIChatCompletionContentPartTextParam: + type: object + properties: + type: + type: string + const: text + default: text + description: >- + Must be "text" to identify this as text content + text: + type: string + description: The text content of the message + additionalProperties: false + required: + - type + - text + title: OpenAIChatCompletionContentPartTextParam + description: >- + Text content part for OpenAI-compatible chat completion messages. + OpenAIImageURL: + type: object + properties: + url: + type: string + description: >- + URL of the image to include in the message + detail: + type: string + description: >- + (Optional) Level of detail for image processing. Can be "low", "high", + or "auto" + additionalProperties: false + required: + - url + title: OpenAIImageURL + description: >- + Image URL specification for OpenAI-compatible chat completion messages. + RerankRequest: + type: object + properties: + model: + type: string + description: >- + The identifier of the reranking model to use. + query: + oneOf: + - type: string + - $ref: '#/components/schemas/OpenAIChatCompletionContentPartTextParam' + - $ref: '#/components/schemas/OpenAIChatCompletionContentPartImageParam' + description: >- + The search query to rank items against. Can be a string, text content + part, or image content part. The input must not exceed the model's max + input token length. + items: + type: array + items: + oneOf: + - type: string + - $ref: '#/components/schemas/OpenAIChatCompletionContentPartTextParam' + - $ref: '#/components/schemas/OpenAIChatCompletionContentPartImageParam' + description: >- + List of items to rerank. Each item can be a string, text content part, + or image content part. Each input must not exceed the model's max input + token length. + max_num_results: + type: integer + description: >- + (Optional) Maximum number of results to return. Default: returns all. + additionalProperties: false + required: + - model + - query + - items + title: RerankRequest + RerankData: + type: object + properties: + index: + type: integer + description: >- + The original index of the document in the input list + relevance_score: + type: number + description: >- + The relevance score from the model output. Values are inverted when applicable + so that higher scores indicate greater relevance. + additionalProperties: false + required: + - index + - relevance_score + title: RerankData + description: >- + A single rerank result from a reranking response. + RerankResponse: + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/RerankData' + description: >- + List of rerank result objects, sorted by relevance score (descending) + additionalProperties: false + required: + - data + title: RerankResponse + description: Response from a reranking request. + Checkpoint: + type: object + properties: + identifier: + type: string + description: Unique identifier for the checkpoint + created_at: + type: string + format: date-time + description: >- + Timestamp when the checkpoint was created + epoch: + type: integer + description: >- + Training epoch when the checkpoint was saved + post_training_job_id: + type: string + description: >- + Identifier of the training job that created this checkpoint + path: + type: string + description: >- + File system path where the checkpoint is stored + training_metrics: + $ref: '#/components/schemas/PostTrainingMetric' + description: >- + (Optional) Training metrics associated with this checkpoint + additionalProperties: false + required: + - identifier + - created_at + - epoch + - post_training_job_id + - path + title: Checkpoint + description: Checkpoint created during training runs. + PostTrainingJobArtifactsResponse: + type: object + properties: + job_uuid: + type: string + description: Unique identifier for the training job + checkpoints: + type: array + items: + $ref: '#/components/schemas/Checkpoint' + description: >- + List of model checkpoints created during training + additionalProperties: false + required: + - job_uuid + - checkpoints + title: PostTrainingJobArtifactsResponse + description: Artifacts of a finetuning job. + PostTrainingMetric: + type: object + properties: + epoch: + type: integer + description: Training epoch number + train_loss: + type: number + description: Loss value on the training dataset + validation_loss: + type: number + description: Loss value on the validation dataset + perplexity: + type: number + description: >- + Perplexity metric indicating model confidence + additionalProperties: false + required: + - epoch + - train_loss + - validation_loss + - perplexity + title: PostTrainingMetric + description: >- + Training metrics captured during post-training jobs. + CancelTrainingJobRequest: + type: object + properties: + job_uuid: + type: string + description: The UUID of the job to cancel. + additionalProperties: false + required: + - job_uuid + title: CancelTrainingJobRequest + PostTrainingJobStatusResponse: + type: object + properties: + job_uuid: + type: string + description: Unique identifier for the training job + status: + type: string + enum: + - completed + - in_progress + - failed + - scheduled + - cancelled + description: Current status of the training job + scheduled_at: + type: string + format: date-time + description: >- + (Optional) Timestamp when the job was scheduled + started_at: + type: string + format: date-time + description: >- + (Optional) Timestamp when the job execution began + completed_at: + type: string + format: date-time + description: >- + (Optional) Timestamp when the job finished, if completed + resources_allocated: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + (Optional) Information about computational resources allocated to the + job + checkpoints: + type: array + items: + $ref: '#/components/schemas/Checkpoint' + description: >- + List of model checkpoints created during training + additionalProperties: false + required: + - job_uuid + - status + - checkpoints + title: PostTrainingJobStatusResponse + description: Status of a finetuning job. + ListPostTrainingJobsResponse: + type: object + properties: + data: + type: array + items: + type: object + properties: + job_uuid: + type: string + additionalProperties: false + required: + - job_uuid + title: PostTrainingJob + additionalProperties: false + required: + - data + title: ListPostTrainingJobsResponse + DPOAlignmentConfig: + type: object + properties: + beta: + type: number + description: Temperature parameter for the DPO loss + loss_type: + $ref: '#/components/schemas/DPOLossType' + default: sigmoid + description: The type of loss function to use for DPO + additionalProperties: false + required: + - beta + - loss_type + title: DPOAlignmentConfig + description: >- + Configuration for Direct Preference Optimization (DPO) alignment. + DPOLossType: + type: string + enum: + - sigmoid + - hinge + - ipo + - kto_pair + title: DPOLossType + DataConfig: + type: object + properties: + dataset_id: + type: string + description: >- + Unique identifier for the training dataset + batch_size: + type: integer + description: Number of samples per training batch + shuffle: + type: boolean + description: >- + Whether to shuffle the dataset during training + data_format: + $ref: '#/components/schemas/DatasetFormat' + description: >- + Format of the dataset (instruct or dialog) + validation_dataset_id: + type: string + description: >- + (Optional) Unique identifier for the validation dataset + packed: + type: boolean + default: false + description: >- + (Optional) Whether to pack multiple samples into a single sequence for + efficiency + train_on_input: + type: boolean + default: false + description: >- + (Optional) Whether to compute loss on input tokens as well as output tokens + additionalProperties: false + required: + - dataset_id + - batch_size + - shuffle + - data_format + title: DataConfig + description: >- + Configuration for training data and data loading. + DatasetFormat: + type: string + enum: + - instruct + - dialog + title: DatasetFormat + description: Format of the training dataset. + EfficiencyConfig: + type: object + properties: + enable_activation_checkpointing: + type: boolean + default: false + description: >- + (Optional) Whether to use activation checkpointing to reduce memory usage + enable_activation_offloading: + type: boolean + default: false + description: >- + (Optional) Whether to offload activations to CPU to save GPU memory + memory_efficient_fsdp_wrap: + type: boolean + default: false + description: >- + (Optional) Whether to use memory-efficient FSDP wrapping + fsdp_cpu_offload: + type: boolean + default: false + description: >- + (Optional) Whether to offload FSDP parameters to CPU + additionalProperties: false + title: EfficiencyConfig + description: >- + Configuration for memory and compute efficiency optimizations. + OptimizerConfig: + type: object + properties: + optimizer_type: + $ref: '#/components/schemas/OptimizerType' + description: >- + Type of optimizer to use (adam, adamw, or sgd) + lr: + type: number + description: Learning rate for the optimizer + weight_decay: + type: number + description: >- + Weight decay coefficient for regularization + num_warmup_steps: + type: integer + description: Number of steps for learning rate warmup + additionalProperties: false + required: + - optimizer_type + - lr + - weight_decay + - num_warmup_steps + title: OptimizerConfig + description: >- + Configuration parameters for the optimization algorithm. + OptimizerType: + type: string + enum: + - adam + - adamw + - sgd + title: OptimizerType + description: >- + Available optimizer algorithms for training. + TrainingConfig: + type: object + properties: + n_epochs: + type: integer + description: Number of training epochs to run + max_steps_per_epoch: + type: integer + default: 1 + description: Maximum number of steps to run per epoch + gradient_accumulation_steps: + type: integer + default: 1 + description: >- + Number of steps to accumulate gradients before updating + max_validation_steps: + type: integer + default: 1 + description: >- + (Optional) Maximum number of validation steps per epoch + data_config: + $ref: '#/components/schemas/DataConfig' + description: >- + (Optional) Configuration for data loading and formatting + optimizer_config: + $ref: '#/components/schemas/OptimizerConfig' + description: >- + (Optional) Configuration for the optimization algorithm + efficiency_config: + $ref: '#/components/schemas/EfficiencyConfig' + description: >- + (Optional) Configuration for memory and compute optimizations + dtype: + type: string + default: bf16 + description: >- + (Optional) Data type for model parameters (bf16, fp16, fp32) + additionalProperties: false + required: + - n_epochs + - max_steps_per_epoch + - gradient_accumulation_steps + title: TrainingConfig + description: >- + Comprehensive configuration for the training process. + PreferenceOptimizeRequest: + type: object + properties: + job_uuid: + type: string + description: The UUID of the job to create. + finetuned_model: + type: string + description: The model to fine-tune. + algorithm_config: + $ref: '#/components/schemas/DPOAlignmentConfig' + description: The algorithm configuration. + training_config: + $ref: '#/components/schemas/TrainingConfig' + description: The training configuration. + hyperparam_search_config: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: The hyperparam search configuration. + logger_config: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: The logger configuration. + additionalProperties: false + required: + - job_uuid + - finetuned_model + - algorithm_config + - training_config + - hyperparam_search_config + - logger_config + title: PreferenceOptimizeRequest + PostTrainingJob: + type: object + properties: + job_uuid: + type: string + additionalProperties: false + required: + - job_uuid + title: PostTrainingJob + AlgorithmConfig: + oneOf: + - $ref: '#/components/schemas/LoraFinetuningConfig' + - $ref: '#/components/schemas/QATFinetuningConfig' + discriminator: + propertyName: type + mapping: + LoRA: '#/components/schemas/LoraFinetuningConfig' + QAT: '#/components/schemas/QATFinetuningConfig' + LoraFinetuningConfig: + type: object + properties: + type: + type: string + const: LoRA + default: LoRA + description: Algorithm type identifier, always "LoRA" + lora_attn_modules: + type: array + items: + type: string + description: >- + List of attention module names to apply LoRA to + apply_lora_to_mlp: + type: boolean + description: Whether to apply LoRA to MLP layers + apply_lora_to_output: + type: boolean + description: >- + Whether to apply LoRA to output projection layers + rank: + type: integer + description: >- + Rank of the LoRA adaptation (lower rank = fewer parameters) + alpha: + type: integer + description: >- + LoRA scaling parameter that controls adaptation strength + use_dora: + type: boolean + default: false + description: >- + (Optional) Whether to use DoRA (Weight-Decomposed Low-Rank Adaptation) + quantize_base: + type: boolean + default: false + description: >- + (Optional) Whether to quantize the base model weights + additionalProperties: false + required: + - type + - lora_attn_modules + - apply_lora_to_mlp + - apply_lora_to_output + - rank + - alpha + title: LoraFinetuningConfig + description: >- + Configuration for Low-Rank Adaptation (LoRA) fine-tuning. + QATFinetuningConfig: + type: object + properties: + type: + type: string + const: QAT + default: QAT + description: Algorithm type identifier, always "QAT" + quantizer_name: + type: string + description: >- + Name of the quantization algorithm to use + group_size: + type: integer + description: Size of groups for grouped quantization + additionalProperties: false + required: + - type + - quantizer_name + - group_size + title: QATFinetuningConfig + description: >- + Configuration for Quantization-Aware Training (QAT) fine-tuning. + SupervisedFineTuneRequest: + type: object + properties: + job_uuid: + type: string + description: The UUID of the job to create. + training_config: + $ref: '#/components/schemas/TrainingConfig' + description: The training configuration. + hyperparam_search_config: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: The hyperparam search configuration. + logger_config: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: The logger configuration. + model: + type: string + description: The model to fine-tune. + checkpoint_dir: + type: string + description: The directory to save checkpoint(s) to. + algorithm_config: + $ref: '#/components/schemas/AlgorithmConfig' + description: The algorithm configuration. + additionalProperties: false + required: + - job_uuid + - training_config + - hyperparam_search_config + - logger_config + title: SupervisedFineTuneRequest + QueryMetricsRequest: + type: object + properties: + start_time: + type: integer + description: The start time of the metric to query. + end_time: + type: integer + description: The end time of the metric to query. + granularity: + type: string + description: The granularity of the metric to query. + query_type: + type: string + enum: + - range + - instant + description: The type of query to perform. + label_matchers: + type: array + items: + type: object + properties: + name: + type: string + description: The name of the label to match + value: + type: string + description: The value to match against + operator: + type: string + enum: + - '=' + - '!=' + - =~ + - '!~' + description: >- + The comparison operator to use for matching + default: '=' + additionalProperties: false + required: + - name + - value + - operator + title: MetricLabelMatcher + description: >- + A matcher for filtering metrics by label values. + description: >- + The label matchers to apply to the metric. + additionalProperties: false + required: + - start_time + - query_type + title: QueryMetricsRequest + MetricDataPoint: + type: object + properties: + timestamp: + type: integer + description: >- + Unix timestamp when the metric value was recorded + value: + type: number + description: >- + The numeric value of the metric at this timestamp + unit: + type: string + additionalProperties: false + required: + - timestamp + - value + - unit + title: MetricDataPoint + description: >- + A single data point in a metric time series. + MetricLabel: + type: object + properties: + name: + type: string + description: The name of the label + value: + type: string + description: The value of the label + additionalProperties: false + required: + - name + - value + title: MetricLabel + description: A label associated with a metric. + MetricSeries: + type: object + properties: + metric: + type: string + description: The name of the metric + labels: + type: array + items: + $ref: '#/components/schemas/MetricLabel' + description: >- + List of labels associated with this metric series + values: + type: array + items: + $ref: '#/components/schemas/MetricDataPoint' + description: >- + List of data points in chronological order + additionalProperties: false + required: + - metric + - labels + - values + title: MetricSeries + description: A time series of metric data points. + QueryMetricsResponse: + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/MetricSeries' + description: >- + List of metric series matching the query criteria + additionalProperties: false + required: + - data + title: QueryMetricsResponse + description: >- + Response containing metric time series data. + QueryCondition: + type: object + properties: + key: + type: string + description: The attribute key to filter on + op: + $ref: '#/components/schemas/QueryConditionOp' + description: The comparison operator to apply + value: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: The value to compare against + additionalProperties: false + required: + - key + - op + - value + title: QueryCondition + description: A condition for filtering query results. + QueryConditionOp: + type: string + enum: + - eq + - ne + - gt + - lt + title: QueryConditionOp + description: >- + Comparison operators for query conditions. + QuerySpansRequest: + type: object + properties: + attribute_filters: + type: array + items: + $ref: '#/components/schemas/QueryCondition' + description: >- + The attribute filters to apply to the spans. + attributes_to_return: + type: array + items: + type: string + description: The attributes to return in the spans. + max_depth: + type: integer + description: The maximum depth of the tree. + additionalProperties: false + required: + - attribute_filters + - attributes_to_return + title: QuerySpansRequest + Span: + type: object + properties: + span_id: + type: string + description: Unique identifier for the span + trace_id: + type: string + description: >- + Unique identifier for the trace this span belongs to + parent_span_id: + type: string + description: >- + (Optional) Unique identifier for the parent span, if this is a child span + name: + type: string + description: >- + Human-readable name describing the operation this span represents + start_time: + type: string + format: date-time + description: Timestamp when the operation began + end_time: + type: string + format: date-time + description: >- + (Optional) Timestamp when the operation finished, if completed + attributes: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + (Optional) Key-value pairs containing additional metadata about the span + additionalProperties: false + required: + - span_id + - trace_id + - name + - start_time + title: Span + description: >- + A span representing a single operation within a trace. + QuerySpansResponse: + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/Span' + description: >- + List of spans matching the query criteria + additionalProperties: false + required: + - data + title: QuerySpansResponse + description: Response containing a list of spans. + SaveSpansToDatasetRequest: + type: object + properties: + attribute_filters: + type: array + items: + $ref: '#/components/schemas/QueryCondition' + description: >- + The attribute filters to apply to the spans. + attributes_to_save: + type: array + items: + type: string + description: The attributes to save to the dataset. + dataset_id: + type: string + description: >- + The ID of the dataset to save the spans to. + max_depth: + type: integer + description: The maximum depth of the tree. + additionalProperties: false + required: + - attribute_filters + - attributes_to_save + - dataset_id + title: SaveSpansToDatasetRequest + GetSpanTreeRequest: + type: object + properties: + attributes_to_return: + type: array + items: + type: string + description: The attributes to return in the tree. + max_depth: + type: integer + description: The maximum depth of the tree. + additionalProperties: false + title: GetSpanTreeRequest + SpanStatus: + type: string + enum: + - ok + - error + title: SpanStatus + description: >- + The status of a span indicating whether it completed successfully or with + an error. + SpanWithStatus: + type: object + properties: + span_id: + type: string + description: Unique identifier for the span + trace_id: + type: string + description: >- + Unique identifier for the trace this span belongs to + parent_span_id: + type: string + description: >- + (Optional) Unique identifier for the parent span, if this is a child span + name: + type: string + description: >- + Human-readable name describing the operation this span represents + start_time: + type: string + format: date-time + description: Timestamp when the operation began + end_time: + type: string + format: date-time + description: >- + (Optional) Timestamp when the operation finished, if completed + attributes: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + (Optional) Key-value pairs containing additional metadata about the span + status: + $ref: '#/components/schemas/SpanStatus' + description: >- + (Optional) The current status of the span + additionalProperties: false + required: + - span_id + - trace_id + - name + - start_time + title: SpanWithStatus + description: A span that includes status information. + QuerySpanTreeResponse: + type: object + properties: + data: + type: object + additionalProperties: + $ref: '#/components/schemas/SpanWithStatus' + description: >- + Dictionary mapping span IDs to spans with status information + additionalProperties: false + required: + - data + title: QuerySpanTreeResponse + description: >- + Response containing a tree structure of spans. + QueryTracesRequest: + type: object + properties: + attribute_filters: + type: array + items: + $ref: '#/components/schemas/QueryCondition' + description: >- + The attribute filters to apply to the traces. + limit: + type: integer + description: The limit of traces to return. + offset: + type: integer + description: The offset of the traces to return. + order_by: + type: array + items: + type: string + description: The order by of the traces to return. + additionalProperties: false + title: QueryTracesRequest + Trace: + type: object + properties: + trace_id: + type: string + description: Unique identifier for the trace + root_span_id: + type: string + description: >- + Unique identifier for the root span that started this trace + start_time: + type: string + format: date-time + description: Timestamp when the trace began + end_time: + type: string + format: date-time + description: >- + (Optional) Timestamp when the trace finished, if completed + additionalProperties: false + required: + - trace_id + - root_span_id + - start_time + title: Trace + description: >- + A trace representing the complete execution path of a request across multiple + operations. + QueryTracesResponse: + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/Trace' + description: >- + List of traces matching the query criteria + additionalProperties: false + required: + - data + title: QueryTracesResponse + description: Response containing a list of traces. + responses: + BadRequest400: + description: The request was invalid or malformed + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: + status: 400 + title: Bad Request + detail: The request was invalid or malformed + TooManyRequests429: + description: >- + The client has sent too many requests in a given amount of time + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: + status: 429 + title: Too Many Requests + detail: >- + You have exceeded the rate limit. Please try again later. + InternalServerError500: + description: >- + The server encountered an unexpected error + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: + status: 500 + title: Internal Server Error + detail: >- + An unexpected error occurred. Our team has been notified. + DefaultError: + description: An unexpected error occurred + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + example: + status: 0 + title: Error + detail: An unexpected error occurred +security: + - Default: [] +tags: + - name: Agents + description: >- + APIs for creating and interacting with agentic systems. + + + ## Agents API (Experimental) + + + > **🧪 EXPERIMENTAL**: This API is in preview and may change based on user feedback. + Great for exploring new capabilities and providing feedback to influence the + final design. + + + Main functionalities provided by this API: + + + - Create agents with specific instructions and ability to use tools. + + - Interactions with agents are grouped into sessions ("threads"), and each interaction + is called a "turn". + + - Agents can be provided with various tools (see the ToolGroups and ToolRuntime + APIs for more details). + + - Agents can be provided with various shields (see the Safety API for more details). + + - Agents can also use Memory to retrieve information from knowledge bases. See + the RAG Tool and Vector IO APIs for more details. + + + ### 🧪 Feedback Welcome + + + This API is actively being developed. We welcome feedback on: + + - API design and usability + + - Performance characteristics + + - Missing features or capabilities + + - Integration patterns + + + **Provide Feedback**: [GitHub Discussions](https://github.com/llamastack/llama-stack/discussions) + or [GitHub Issues](https://github.com/llamastack/llama-stack/issues) + x-displayName: Agents + - name: Benchmarks + description: '' + - name: DatasetIO + description: '' + - name: Datasets + description: '' + - name: Eval + description: '' + x-displayName: >- + Llama Stack Evaluation API for running evaluations on model and agent candidates. + - name: PostTraining (Coming Soon) + description: '' + - name: Telemetry + description: '' +x-tagGroups: + - name: Operations + tags: + - Agents + - Benchmarks + - DatasetIO + - Datasets + - Eval + - PostTraining (Coming Soon) + - Telemetry diff --git a/docs/static/llama-stack-spec.html b/docs/static/llama-stack-spec.html index 20f05a110..fa16e62ee 100644 --- a/docs/static/llama-stack-spec.html +++ b/docs/static/llama-stack-spec.html @@ -32,7 +32,7 @@ "info": { "title": "Llama Stack Specification", "version": "v1", - "description": "This is the specification of the Llama Stack that provides\n a set of endpoints and their corresponding interfaces that are tailored to\n best leverage Llama Models." + "description": "This is the specification of the Llama Stack that provides\n a set of endpoints and their corresponding interfaces that are tailored to\n best leverage Llama Models.\n\n**✅ STABLE**: Production-ready APIs with backward compatibility guarantees." }, "servers": [ { @@ -40,136 +40,15 @@ } ], "paths": { - "/v1/datasetio/append-rows/{dataset_id}": { - "post": { - "responses": { - "200": { - "description": "OK" - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "DatasetIO" - ], - "summary": "Append rows to a dataset.", - "description": "Append rows to a dataset.", - "parameters": [ - { - "name": "dataset_id", - "in": "path", - "description": "The ID of the dataset to append the rows to.", - "required": true, - "schema": { - "type": "string" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/AppendRowsRequest" - } - } - }, - "required": true - } - } - }, - "/v1alpha/post-training/job/cancel": { - "post": { - "responses": { - "200": { - "description": "OK" - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "PostTraining (Coming Soon)" - ], - "summary": "Cancel a training job.", - "description": "Cancel a training job.", - "parameters": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CancelTrainingJobRequest" - } - } - }, - "required": true - } - } - }, - "/v1/post-training/job/cancel": { - "post": { - "responses": { - "200": { - "description": "OK" - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "PostTraining (Coming Soon)" - ], - "summary": "Cancel a training job.", - "description": "Cancel a training job.", - "parameters": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CancelTrainingJobRequest" - } - } - }, - "required": true - } - } - }, - "/v1alpha/agents": { + "/v1/chat/completions": { "get": { "responses": { "200": { - "description": "A PaginatedResponse.", + "description": "A ListOpenAIChatCompletionResponse.", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/PaginatedResponse" + "$ref": "#/components/schemas/ListOpenAIChatCompletionResponse" } } } @@ -188,39 +67,65 @@ } }, "tags": [ - "Agents" + "Inference" ], - "summary": "List all agents.", - "description": "List all agents.", + "summary": "List all chat completions.", + "description": "List all chat completions.", "parameters": [ { - "name": "start_index", + "name": "after", "in": "query", - "description": "The index to start the pagination from.", + "description": "The ID of the last chat completion to return.", "required": false, "schema": { - "type": "integer" + "type": "string" } }, { "name": "limit", "in": "query", - "description": "The number of agents to return.", + "description": "The maximum number of chat completions to return.", "required": false, "schema": { "type": "integer" } + }, + { + "name": "model", + "in": "query", + "description": "The model to filter by.", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "order", + "in": "query", + "description": "The order to sort the chat completions by: \"asc\" or \"desc\". Defaults to \"desc\".", + "required": false, + "schema": { + "$ref": "#/components/schemas/Order" + } } - ] + ], + "deprecated": false }, "post": { "responses": { "200": { - "description": "An AgentCreateResponse with the agent ID.", + "description": "An OpenAIChatCompletion.", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/AgentCreateResponse" + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIChatCompletion" + }, + { + "$ref": "#/components/schemas/OpenAIChatCompletionChunk" + } + ] } } } @@ -239,32 +144,33 @@ } }, "tags": [ - "Agents" + "Inference" ], - "summary": "Create an agent with the given configuration.", - "description": "Create an agent with the given configuration.", + "summary": "Generate an OpenAI-compatible chat completion for the given messages using the specified model.", + "description": "Generate an OpenAI-compatible chat completion for the given messages using the specified model.", "parameters": [], "requestBody": { "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/CreateAgentRequest" + "$ref": "#/components/schemas/OpenaiChatCompletionRequest" } } }, "required": true - } + }, + "deprecated": false } }, - "/v1/agents": { + "/v1/chat/completions/{completion_id}": { "get": { "responses": { "200": { - "description": "A PaginatedResponse.", + "description": "A OpenAICompletionWithInputMessages.", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/PaginatedResponse" + "$ref": "#/components/schemas/OpenAICompletionWithInputMessages" } } } @@ -283,39 +189,193 @@ } }, "tags": [ - "Agents" + "Inference" ], - "summary": "List all agents.", - "description": "List all agents.", + "summary": "Describe a chat completion by its ID.", + "description": "Describe a chat completion by its ID.", "parameters": [ { - "name": "start_index", + "name": "completion_id", + "in": "path", + "description": "ID of the chat completion.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "deprecated": false + } + }, + "/v1/completions": { + "post": { + "responses": { + "200": { + "description": "An OpenAICompletion.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAICompletion" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Inference" + ], + "summary": "Generate an OpenAI-compatible completion for the given prompt using the specified model.", + "description": "Generate an OpenAI-compatible completion for the given prompt using the specified model.", + "parameters": [], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenaiCompletionRequest" + } + } + }, + "required": true + }, + "deprecated": false + } + }, + "/v1/embeddings": { + "post": { + "responses": { + "200": { + "description": "An OpenAIEmbeddingsResponse containing the embeddings.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAIEmbeddingsResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Inference" + ], + "summary": "Generate OpenAI-compatible embeddings for the given input using the specified model.", + "description": "Generate OpenAI-compatible embeddings for the given input using the specified model.", + "parameters": [], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenaiEmbeddingsRequest" + } + } + }, + "required": true + }, + "deprecated": false + } + }, + "/v1/files": { + "get": { + "responses": { + "200": { + "description": "An ListOpenAIFileResponse containing the list of files.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListOpenAIFileResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Files" + ], + "summary": "Returns a list of files that belong to the user's organization.", + "description": "Returns a list of files that belong to the user's organization.", + "parameters": [ + { + "name": "after", "in": "query", - "description": "The index to start the pagination from.", + "description": "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.", "required": false, "schema": { - "type": "integer" + "type": "string" } }, { "name": "limit", "in": "query", - "description": "The number of agents to return.", + "description": "A limit on the number of objects to be returned. Limit can range between 1 and 10,000, and the default is 10,000.", "required": false, "schema": { "type": "integer" } + }, + { + "name": "order", + "in": "query", + "description": "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.", + "required": false, + "schema": { + "$ref": "#/components/schemas/Order" + } + }, + { + "name": "purpose", + "in": "query", + "description": "Only return files with the given purpose.", + "required": false, + "schema": { + "$ref": "#/components/schemas/OpenAIFilePurpose" + } } - ] + ], + "deprecated": false }, "post": { "responses": { "200": { - "description": "An AgentCreateResponse with the agent ID.", + "description": "An OpenAIFileObject representing the uploaded file.", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/AgentCreateResponse" + "$ref": "#/components/schemas/OpenAIFileObject" } } } @@ -334,32 +394,330 @@ } }, "tags": [ - "Agents" + "Files" ], - "summary": "Create an agent with the given configuration.", - "description": "Create an agent with the given configuration.", + "summary": "Upload a file that can be used across various endpoints.", + "description": "Upload a file that can be used across various endpoints.\nThe file upload should be a multipart form request with:\n- file: The File object (not file name) to be uploaded.\n- purpose: The intended purpose of the uploaded file.\n- expires_after: Optional form values describing expiration for the file.", + "parameters": [], + "requestBody": { + "content": { + "multipart/form-data": { + "schema": { + "type": "object", + "properties": { + "file": { + "type": "string", + "format": "binary" + }, + "purpose": { + "$ref": "#/components/schemas/OpenAIFilePurpose" + }, + "expires_after": { + "$ref": "#/components/schemas/ExpiresAfter" + } + }, + "required": [ + "file", + "purpose" + ] + } + } + }, + "required": true + }, + "deprecated": false + } + }, + "/v1/files/{file_id}": { + "get": { + "responses": { + "200": { + "description": "An OpenAIFileObject containing file information.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAIFileObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Files" + ], + "summary": "Returns information about a specific file.", + "description": "Returns information about a specific file.", + "parameters": [ + { + "name": "file_id", + "in": "path", + "description": "The ID of the file to use for this request.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "deprecated": false + }, + "delete": { + "responses": { + "200": { + "description": "An OpenAIFileDeleteResponse indicating successful deletion.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenAIFileDeleteResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Files" + ], + "summary": "Delete a file.", + "description": "Delete a file.", + "parameters": [ + { + "name": "file_id", + "in": "path", + "description": "The ID of the file to use for this request.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "deprecated": false + } + }, + "/v1/files/{file_id}/content": { + "get": { + "responses": { + "200": { + "description": "The raw file content as a binary response.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Response" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Files" + ], + "summary": "Returns the contents of the specified file.", + "description": "Returns the contents of the specified file.", + "parameters": [ + { + "name": "file_id", + "in": "path", + "description": "The ID of the file to use for this request.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "deprecated": false + } + }, + "/v1/health": { + "get": { + "responses": { + "200": { + "description": "Health information indicating if the service is operational.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HealthInfo" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Inspect" + ], + "summary": "Get the current health status of the service.", + "description": "Get the current health status of the service.", + "parameters": [], + "deprecated": false + } + }, + "/v1/inspect/routes": { + "get": { + "responses": { + "200": { + "description": "Response containing information about all available routes.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListRoutesResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Inspect" + ], + "summary": "List all available API routes with their methods and implementing providers.", + "description": "List all available API routes with their methods and implementing providers.", + "parameters": [], + "deprecated": false + } + }, + "/v1/models": { + "get": { + "responses": { + "200": { + "description": "A ListModelsResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListModelsResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Models" + ], + "summary": "List all models.", + "description": "List all models.", + "parameters": [], + "deprecated": false + }, + "post": { + "responses": { + "200": { + "description": "A Model.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Model" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Models" + ], + "summary": "Register a model.", + "description": "Register a model.", "parameters": [], "requestBody": { "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/CreateAgentRequest" + "$ref": "#/components/schemas/RegisterModelRequest" } } }, "required": true - } + }, + "deprecated": false } }, - "/v1alpha/agents/{agent_id}/session": { - "post": { + "/v1/models/{model_id}": { + "get": { "responses": { "200": { - "description": "An AgentSessionCreateResponse.", + "description": "A Model.", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/AgentSessionCreateResponse" + "$ref": "#/components/schemas/Model" } } } @@ -378,42 +736,69 @@ } }, "tags": [ - "Agents" + "Models" ], - "summary": "Create a new session for an agent.", - "description": "Create a new session for an agent.", + "summary": "Get a model by its identifier.", + "description": "Get a model by its identifier.", "parameters": [ { - "name": "agent_id", + "name": "model_id", "in": "path", - "description": "The ID of the agent to create the session for.", + "description": "The identifier of the model to get.", "required": true, "schema": { "type": "string" } } ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CreateAgentSessionRequest" - } - } + "deprecated": false + }, + "delete": { + "responses": { + "200": { + "description": "OK" }, - "required": true - } + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Models" + ], + "summary": "Unregister a model.", + "description": "Unregister a model.", + "parameters": [ + { + "name": "model_id", + "in": "path", + "description": "The identifier of the model to unregister.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "deprecated": false } }, - "/v1/agents/{agent_id}/session": { + "/v1/moderations": { "post": { "responses": { "200": { - "description": "An AgentSessionCreateResponse.", + "description": "A moderation object.", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/AgentSessionCreateResponse" + "$ref": "#/components/schemas/ModerationObject" } } } @@ -432,47 +817,33 @@ } }, "tags": [ - "Agents" - ], - "summary": "Create a new session for an agent.", - "description": "Create a new session for an agent.", - "parameters": [ - { - "name": "agent_id", - "in": "path", - "description": "The ID of the agent to create the session for.", - "required": true, - "schema": { - "type": "string" - } - } + "Safety" ], + "summary": "Classifies if text and/or image inputs are potentially harmful.", + "description": "Classifies if text and/or image inputs are potentially harmful.", + "parameters": [], "requestBody": { "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/CreateAgentSessionRequest" + "$ref": "#/components/schemas/RunModerationRequest" } } }, "required": true - } + }, + "deprecated": false } }, - "/v1alpha/agents/{agent_id}/session/{session_id}/turn": { - "post": { + "/v1/prompts": { + "get": { "responses": { "200": { - "description": "If stream=False, returns a Turn object. If stream=True, returns an SSE event stream of AgentTurnResponseStreamChunk.", + "description": "A ListPromptsResponse containing all prompts.", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/Turn" - } - }, - "text/event-stream": { - "schema": { - "$ref": "#/components/schemas/AgentTurnResponseStreamChunk" + "$ref": "#/components/schemas/ListPromptsResponse" } } } @@ -491,56 +862,21 @@ } }, "tags": [ - "Agents" + "Prompts" ], - "summary": "Create a new turn for an agent.", - "description": "Create a new turn for an agent.", - "parameters": [ - { - "name": "agent_id", - "in": "path", - "description": "The ID of the agent to create the turn for.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "session_id", - "in": "path", - "description": "The ID of the session to create the turn for.", - "required": true, - "schema": { - "type": "string" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CreateAgentTurnRequest" - } - } - }, - "required": true - } - } - }, - "/v1/agents/{agent_id}/session/{session_id}/turn": { + "summary": "List all prompts.", + "description": "List all prompts.", + "parameters": [], + "deprecated": false + }, "post": { "responses": { "200": { - "description": "If stream=False, returns a Turn object. If stream=True, returns an SSE event stream of AgentTurnResponseStreamChunk.", + "description": "The created Prompt resource.", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/Turn" - } - }, - "text/event-stream": { - "schema": { - "$ref": "#/components/schemas/AgentTurnResponseStreamChunk" + "$ref": "#/components/schemas/Prompt" } } } @@ -559,24 +895,112 @@ } }, "tags": [ - "Agents" + "Prompts" ], - "summary": "Create a new turn for an agent.", - "description": "Create a new turn for an agent.", + "summary": "Create a new prompt.", + "description": "Create a new prompt.", + "parameters": [], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreatePromptRequest" + } + } + }, + "required": true + }, + "deprecated": false + } + }, + "/v1/prompts/{prompt_id}": { + "get": { + "responses": { + "200": { + "description": "A Prompt resource.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Prompt" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Prompts" + ], + "summary": "Get a prompt by its identifier and optional version.", + "description": "Get a prompt by its identifier and optional version.", "parameters": [ { - "name": "agent_id", + "name": "prompt_id", "in": "path", - "description": "The ID of the agent to create the turn for.", + "description": "The identifier of the prompt to get.", "required": true, "schema": { "type": "string" } }, { - "name": "session_id", + "name": "version", + "in": "query", + "description": "The version of the prompt to get (defaults to latest).", + "required": false, + "schema": { + "type": "integer" + } + } + ], + "deprecated": false + }, + "post": { + "responses": { + "200": { + "description": "The updated Prompt resource with incremented version.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Prompt" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Prompts" + ], + "summary": "Update an existing prompt (increments version).", + "description": "Update an existing prompt (increments version).", + "parameters": [ + { + "name": "prompt_id", "in": "path", - "description": "The ID of the session to create the turn for.", + "description": "The identifier of the prompt to update.", "required": true, "schema": { "type": "string" @@ -587,12 +1011,229 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/CreateAgentTurnRequest" + "$ref": "#/components/schemas/UpdatePromptRequest" } } }, "required": true - } + }, + "deprecated": false + }, + "delete": { + "responses": { + "200": { + "description": "OK" + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Prompts" + ], + "summary": "Delete a prompt.", + "description": "Delete a prompt.", + "parameters": [ + { + "name": "prompt_id", + "in": "path", + "description": "The identifier of the prompt to delete.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "deprecated": false + } + }, + "/v1/prompts/{prompt_id}/set-default-version": { + "post": { + "responses": { + "200": { + "description": "The prompt with the specified version now set as default.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Prompt" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Prompts" + ], + "summary": "Set which version of a prompt should be the default in get_prompt (latest).", + "description": "Set which version of a prompt should be the default in get_prompt (latest).", + "parameters": [ + { + "name": "prompt_id", + "in": "path", + "description": "The identifier of the prompt.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SetDefaultVersionRequest" + } + } + }, + "required": true + }, + "deprecated": false + } + }, + "/v1/prompts/{prompt_id}/versions": { + "get": { + "responses": { + "200": { + "description": "A ListPromptsResponse containing all versions of the prompt.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListPromptsResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Prompts" + ], + "summary": "List all versions of a specific prompt.", + "description": "List all versions of a specific prompt.", + "parameters": [ + { + "name": "prompt_id", + "in": "path", + "description": "The identifier of the prompt to list versions for.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "deprecated": false + } + }, + "/v1/providers": { + "get": { + "responses": { + "200": { + "description": "A ListProvidersResponse containing information about all providers.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListProvidersResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Providers" + ], + "summary": "List all available providers.", + "description": "List all available providers.", + "parameters": [], + "deprecated": false + } + }, + "/v1/providers/{provider_id}": { + "get": { + "responses": { + "200": { + "description": "A ProviderInfo object containing the provider's details.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ProviderInfo" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Providers" + ], + "summary": "Get detailed information about a specific provider.", + "description": "Get detailed information about a specific provider.", + "parameters": [ + { + "name": "provider_id", + "in": "path", + "description": "The ID of the provider to inspect.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "deprecated": false } }, "/v1/responses": { @@ -663,7 +1304,8 @@ "$ref": "#/components/schemas/Order" } } - ] + ], + "deprecated": false }, "post": { "responses": { @@ -710,459 +1352,8 @@ } }, "required": true - } - } - }, - "/v1/prompts": { - "get": { - "responses": { - "200": { - "description": "A ListPromptsResponse containing all prompts.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ListPromptsResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } }, - "tags": [ - "Prompts" - ], - "summary": "List all prompts.", - "description": "List all prompts.", - "parameters": [] - }, - "post": { - "responses": { - "200": { - "description": "The created Prompt resource.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Prompt" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Prompts" - ], - "summary": "Create a new prompt.", - "description": "Create a new prompt.", - "parameters": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/CreatePromptRequest" - } - } - }, - "required": true - } - } - }, - "/v1alpha/agents/{agent_id}": { - "get": { - "responses": { - "200": { - "description": "An Agent of the agent.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Agent" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Agents" - ], - "summary": "Describe an agent by its ID.", - "description": "Describe an agent by its ID.", - "parameters": [ - { - "name": "agent_id", - "in": "path", - "description": "ID of the agent.", - "required": true, - "schema": { - "type": "string" - } - } - ] - }, - "delete": { - "responses": { - "200": { - "description": "OK" - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Agents" - ], - "summary": "Delete an agent by its ID and its associated sessions and turns.", - "description": "Delete an agent by its ID and its associated sessions and turns.", - "parameters": [ - { - "name": "agent_id", - "in": "path", - "description": "The ID of the agent to delete.", - "required": true, - "schema": { - "type": "string" - } - } - ] - } - }, - "/v1/agents/{agent_id}": { - "get": { - "responses": { - "200": { - "description": "An Agent of the agent.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Agent" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Agents" - ], - "summary": "Describe an agent by its ID.", - "description": "Describe an agent by its ID.", - "parameters": [ - { - "name": "agent_id", - "in": "path", - "description": "ID of the agent.", - "required": true, - "schema": { - "type": "string" - } - } - ] - }, - "delete": { - "responses": { - "200": { - "description": "OK" - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Agents" - ], - "summary": "Delete an agent by its ID and its associated sessions and turns.", - "description": "Delete an agent by its ID and its associated sessions and turns.", - "parameters": [ - { - "name": "agent_id", - "in": "path", - "description": "The ID of the agent to delete.", - "required": true, - "schema": { - "type": "string" - } - } - ] - } - }, - "/v1alpha/agents/{agent_id}/session/{session_id}": { - "get": { - "responses": { - "200": { - "description": "A Session.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Session" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Agents" - ], - "summary": "Retrieve an agent session by its ID.", - "description": "Retrieve an agent session by its ID.", - "parameters": [ - { - "name": "session_id", - "in": "path", - "description": "The ID of the session to get.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "agent_id", - "in": "path", - "description": "The ID of the agent to get the session for.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "turn_ids", - "in": "query", - "description": "(Optional) List of turn IDs to filter the session by.", - "required": false, - "schema": { - "type": "array", - "items": { - "type": "string" - } - } - } - ] - }, - "delete": { - "responses": { - "200": { - "description": "OK" - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Agents" - ], - "summary": "Delete an agent session by its ID and its associated turns.", - "description": "Delete an agent session by its ID and its associated turns.", - "parameters": [ - { - "name": "session_id", - "in": "path", - "description": "The ID of the session to delete.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "agent_id", - "in": "path", - "description": "The ID of the agent to delete the session for.", - "required": true, - "schema": { - "type": "string" - } - } - ] - } - }, - "/v1/agents/{agent_id}/session/{session_id}": { - "get": { - "responses": { - "200": { - "description": "A Session.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Session" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Agents" - ], - "summary": "Retrieve an agent session by its ID.", - "description": "Retrieve an agent session by its ID.", - "parameters": [ - { - "name": "session_id", - "in": "path", - "description": "The ID of the session to get.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "agent_id", - "in": "path", - "description": "The ID of the agent to get the session for.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "turn_ids", - "in": "query", - "description": "(Optional) List of turn IDs to filter the session by.", - "required": false, - "schema": { - "type": "array", - "items": { - "type": "string" - } - } - } - ] - }, - "delete": { - "responses": { - "200": { - "description": "OK" - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Agents" - ], - "summary": "Delete an agent session by its ID and its associated turns.", - "description": "Delete an agent session by its ID and its associated turns.", - "parameters": [ - { - "name": "session_id", - "in": "path", - "description": "The ID of the session to delete.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "agent_id", - "in": "path", - "description": "The ID of the agent to delete the session for.", - "required": true, - "schema": { - "type": "string" - } - } - ] + "deprecated": false } }, "/v1/responses/{response_id}": { @@ -1206,7 +1397,8 @@ "type": "string" } } - ] + ], + "deprecated": false }, "delete": { "responses": { @@ -1248,2728 +1440,8 @@ "type": "string" } } - ] - } - }, - "/v1/prompts/{prompt_id}": { - "get": { - "responses": { - "200": { - "description": "A Prompt resource.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Prompt" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Prompts" ], - "summary": "Get a prompt by its identifier and optional version.", - "description": "Get a prompt by its identifier and optional version.", - "parameters": [ - { - "name": "prompt_id", - "in": "path", - "description": "The identifier of the prompt to get.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "version", - "in": "query", - "description": "The version of the prompt to get (defaults to latest).", - "required": false, - "schema": { - "type": "integer" - } - } - ] - }, - "post": { - "responses": { - "200": { - "description": "The updated Prompt resource with incremented version.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Prompt" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Prompts" - ], - "summary": "Update an existing prompt (increments version).", - "description": "Update an existing prompt (increments version).", - "parameters": [ - { - "name": "prompt_id", - "in": "path", - "description": "The identifier of the prompt to update.", - "required": true, - "schema": { - "type": "string" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/UpdatePromptRequest" - } - } - }, - "required": true - } - }, - "delete": { - "responses": { - "200": { - "description": "OK" - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Prompts" - ], - "summary": "Delete a prompt.", - "description": "Delete a prompt.", - "parameters": [ - { - "name": "prompt_id", - "in": "path", - "description": "The identifier of the prompt to delete.", - "required": true, - "schema": { - "type": "string" - } - } - ] - } - }, - "/v1alpha/eval/benchmarks/{benchmark_id}/evaluations": { - "post": { - "responses": { - "200": { - "description": "EvaluateResponse object containing generations and scores.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/EvaluateResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Eval" - ], - "summary": "Evaluate a list of rows on a benchmark.", - "description": "Evaluate a list of rows on a benchmark.", - "parameters": [ - { - "name": "benchmark_id", - "in": "path", - "description": "The ID of the benchmark to run the evaluation on.", - "required": true, - "schema": { - "type": "string" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/EvaluateRowsRequest" - } - } - }, - "required": true - } - } - }, - "/v1/eval/benchmarks/{benchmark_id}/evaluations": { - "post": { - "responses": { - "200": { - "description": "EvaluateResponse object containing generations and scores.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/EvaluateResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Eval" - ], - "summary": "Evaluate a list of rows on a benchmark.", - "description": "Evaluate a list of rows on a benchmark.", - "parameters": [ - { - "name": "benchmark_id", - "in": "path", - "description": "The ID of the benchmark to run the evaluation on.", - "required": true, - "schema": { - "type": "string" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/EvaluateRowsRequest" - } - } - }, - "required": true - } - } - }, - "/v1alpha/agents/{agent_id}/session/{session_id}/turn/{turn_id}/step/{step_id}": { - "get": { - "responses": { - "200": { - "description": "An AgentStepResponse.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/AgentStepResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Agents" - ], - "summary": "Retrieve an agent step by its ID.", - "description": "Retrieve an agent step by its ID.", - "parameters": [ - { - "name": "agent_id", - "in": "path", - "description": "The ID of the agent to get the step for.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "session_id", - "in": "path", - "description": "The ID of the session to get the step for.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "turn_id", - "in": "path", - "description": "The ID of the turn to get the step for.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "step_id", - "in": "path", - "description": "The ID of the step to get.", - "required": true, - "schema": { - "type": "string" - } - } - ] - } - }, - "/v1/agents/{agent_id}/session/{session_id}/turn/{turn_id}/step/{step_id}": { - "get": { - "responses": { - "200": { - "description": "An AgentStepResponse.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/AgentStepResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Agents" - ], - "summary": "Retrieve an agent step by its ID.", - "description": "Retrieve an agent step by its ID.", - "parameters": [ - { - "name": "agent_id", - "in": "path", - "description": "The ID of the agent to get the step for.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "session_id", - "in": "path", - "description": "The ID of the session to get the step for.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "turn_id", - "in": "path", - "description": "The ID of the turn to get the step for.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "step_id", - "in": "path", - "description": "The ID of the step to get.", - "required": true, - "schema": { - "type": "string" - } - } - ] - } - }, - "/v1alpha/agents/{agent_id}/session/{session_id}/turn/{turn_id}": { - "get": { - "responses": { - "200": { - "description": "A Turn.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Turn" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Agents" - ], - "summary": "Retrieve an agent turn by its ID.", - "description": "Retrieve an agent turn by its ID.", - "parameters": [ - { - "name": "agent_id", - "in": "path", - "description": "The ID of the agent to get the turn for.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "session_id", - "in": "path", - "description": "The ID of the session to get the turn for.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "turn_id", - "in": "path", - "description": "The ID of the turn to get.", - "required": true, - "schema": { - "type": "string" - } - } - ] - } - }, - "/v1/agents/{agent_id}/session/{session_id}/turn/{turn_id}": { - "get": { - "responses": { - "200": { - "description": "A Turn.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Turn" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Agents" - ], - "summary": "Retrieve an agent turn by its ID.", - "description": "Retrieve an agent turn by its ID.", - "parameters": [ - { - "name": "agent_id", - "in": "path", - "description": "The ID of the agent to get the turn for.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "session_id", - "in": "path", - "description": "The ID of the session to get the turn for.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "turn_id", - "in": "path", - "description": "The ID of the turn to get.", - "required": true, - "schema": { - "type": "string" - } - } - ] - } - }, - "/v1alpha/eval/benchmarks/{benchmark_id}": { - "get": { - "responses": { - "200": { - "description": "A Benchmark.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Benchmark" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Benchmarks" - ], - "summary": "Get a benchmark by its ID.", - "description": "Get a benchmark by its ID.", - "parameters": [ - { - "name": "benchmark_id", - "in": "path", - "description": "The ID of the benchmark to get.", - "required": true, - "schema": { - "type": "string" - } - } - ] - }, - "delete": { - "responses": { - "200": { - "description": "OK" - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Benchmarks" - ], - "summary": "Unregister a benchmark.", - "description": "Unregister a benchmark.", - "parameters": [ - { - "name": "benchmark_id", - "in": "path", - "description": "The ID of the benchmark to unregister.", - "required": true, - "schema": { - "type": "string" - } - } - ] - } - }, - "/v1/eval/benchmarks/{benchmark_id}": { - "get": { - "responses": { - "200": { - "description": "A Benchmark.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Benchmark" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Benchmarks" - ], - "summary": "Get a benchmark by its ID.", - "description": "Get a benchmark by its ID.", - "parameters": [ - { - "name": "benchmark_id", - "in": "path", - "description": "The ID of the benchmark to get.", - "required": true, - "schema": { - "type": "string" - } - } - ] - }, - "delete": { - "responses": { - "200": { - "description": "OK" - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Benchmarks" - ], - "summary": "Unregister a benchmark.", - "description": "Unregister a benchmark.", - "parameters": [ - { - "name": "benchmark_id", - "in": "path", - "description": "The ID of the benchmark to unregister.", - "required": true, - "schema": { - "type": "string" - } - } - ] - } - }, - "/v1/chat/completions/{completion_id}": { - "get": { - "responses": { - "200": { - "description": "A OpenAICompletionWithInputMessages.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/OpenAICompletionWithInputMessages" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Inference" - ], - "summary": "Describe a chat completion by its ID.", - "description": "Describe a chat completion by its ID.", - "parameters": [ - { - "name": "completion_id", - "in": "path", - "description": "ID of the chat completion.", - "required": true, - "schema": { - "type": "string" - } - } - ] - } - }, - "/v1/datasets/{dataset_id}": { - "get": { - "responses": { - "200": { - "description": "A Dataset.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Dataset" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Datasets" - ], - "summary": "Get a dataset by its ID.", - "description": "Get a dataset by its ID.", - "parameters": [ - { - "name": "dataset_id", - "in": "path", - "description": "The ID of the dataset to get.", - "required": true, - "schema": { - "type": "string" - } - } - ] - }, - "delete": { - "responses": { - "200": { - "description": "OK" - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Datasets" - ], - "summary": "Unregister a dataset by its ID.", - "description": "Unregister a dataset by its ID.", - "parameters": [ - { - "name": "dataset_id", - "in": "path", - "description": "The ID of the dataset to unregister.", - "required": true, - "schema": { - "type": "string" - } - } - ] - } - }, - "/v1/models/{model_id}": { - "get": { - "responses": { - "200": { - "description": "A Model.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Model" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Models" - ], - "summary": "Get a model by its identifier.", - "description": "Get a model by its identifier.", - "parameters": [ - { - "name": "model_id", - "in": "path", - "description": "The identifier of the model to get.", - "required": true, - "schema": { - "type": "string" - } - } - ] - }, - "delete": { - "responses": { - "200": { - "description": "OK" - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Models" - ], - "summary": "Unregister a model.", - "description": "Unregister a model.", - "parameters": [ - { - "name": "model_id", - "in": "path", - "description": "The identifier of the model to unregister.", - "required": true, - "schema": { - "type": "string" - } - } - ] - } - }, - "/v1/scoring-functions/{scoring_fn_id}": { - "get": { - "responses": { - "200": { - "description": "A ScoringFn.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ScoringFn" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "ScoringFunctions" - ], - "summary": "Get a scoring function by its ID.", - "description": "Get a scoring function by its ID.", - "parameters": [ - { - "name": "scoring_fn_id", - "in": "path", - "description": "The ID of the scoring function to get.", - "required": true, - "schema": { - "type": "string" - } - } - ] - }, - "delete": { - "responses": { - "200": { - "description": "OK" - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "ScoringFunctions" - ], - "summary": "Unregister a scoring function.", - "description": "Unregister a scoring function.", - "parameters": [ - { - "name": "scoring_fn_id", - "in": "path", - "description": "The ID of the scoring function to unregister.", - "required": true, - "schema": { - "type": "string" - } - } - ] - } - }, - "/v1/shields/{identifier}": { - "get": { - "responses": { - "200": { - "description": "A Shield.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Shield" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Shields" - ], - "summary": "Get a shield by its identifier.", - "description": "Get a shield by its identifier.", - "parameters": [ - { - "name": "identifier", - "in": "path", - "description": "The identifier of the shield to get.", - "required": true, - "schema": { - "type": "string" - } - } - ] - }, - "delete": { - "responses": { - "200": { - "description": "OK" - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Shields" - ], - "summary": "Unregister a shield.", - "description": "Unregister a shield.", - "parameters": [ - { - "name": "identifier", - "in": "path", - "description": "The identifier of the shield to unregister.", - "required": true, - "schema": { - "type": "string" - } - } - ] - } - }, - "/v1/telemetry/traces/{trace_id}/spans/{span_id}": { - "get": { - "responses": { - "200": { - "description": "A Span.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Span" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Telemetry" - ], - "summary": "Get a span by its ID.", - "description": "Get a span by its ID.", - "parameters": [ - { - "name": "trace_id", - "in": "path", - "description": "The ID of the trace to get the span from.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "span_id", - "in": "path", - "description": "The ID of the span to get.", - "required": true, - "schema": { - "type": "string" - } - } - ] - } - }, - "/v1/telemetry/spans/{span_id}/tree": { - "post": { - "responses": { - "200": { - "description": "A QuerySpanTreeResponse.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/QuerySpanTreeResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Telemetry" - ], - "summary": "Get a span tree by its ID.", - "description": "Get a span tree by its ID.", - "parameters": [ - { - "name": "span_id", - "in": "path", - "description": "The ID of the span to get the tree from.", - "required": true, - "schema": { - "type": "string" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/GetSpanTreeRequest" - } - } - }, - "required": true - } - } - }, - "/v1/tools/{tool_name}": { - "get": { - "responses": { - "200": { - "description": "A Tool.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Tool" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "ToolGroups" - ], - "summary": "Get a tool by its name.", - "description": "Get a tool by its name.", - "parameters": [ - { - "name": "tool_name", - "in": "path", - "description": "The name of the tool to get.", - "required": true, - "schema": { - "type": "string" - } - } - ] - } - }, - "/v1/toolgroups/{toolgroup_id}": { - "get": { - "responses": { - "200": { - "description": "A ToolGroup.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ToolGroup" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "ToolGroups" - ], - "summary": "Get a tool group by its ID.", - "description": "Get a tool group by its ID.", - "parameters": [ - { - "name": "toolgroup_id", - "in": "path", - "description": "The ID of the tool group to get.", - "required": true, - "schema": { - "type": "string" - } - } - ] - }, - "delete": { - "responses": { - "200": { - "description": "OK" - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "ToolGroups" - ], - "summary": "Unregister a tool group.", - "description": "Unregister a tool group.", - "parameters": [ - { - "name": "toolgroup_id", - "in": "path", - "description": "The ID of the tool group to unregister.", - "required": true, - "schema": { - "type": "string" - } - } - ] - } - }, - "/v1/telemetry/traces/{trace_id}": { - "get": { - "responses": { - "200": { - "description": "A Trace.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Trace" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Telemetry" - ], - "summary": "Get a trace by its ID.", - "description": "Get a trace by its ID.", - "parameters": [ - { - "name": "trace_id", - "in": "path", - "description": "The ID of the trace to get.", - "required": true, - "schema": { - "type": "string" - } - } - ] - } - }, - "/v1alpha/post-training/job/artifacts": { - "get": { - "responses": { - "200": { - "description": "A PostTrainingJobArtifactsResponse.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/PostTrainingJobArtifactsResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "PostTraining (Coming Soon)" - ], - "summary": "Get the artifacts of a training job.", - "description": "Get the artifacts of a training job.", - "parameters": [ - { - "name": "job_uuid", - "in": "query", - "description": "The UUID of the job to get the artifacts of.", - "required": true, - "schema": { - "type": "string" - } - } - ] - } - }, - "/v1/post-training/job/artifacts": { - "get": { - "responses": { - "200": { - "description": "A PostTrainingJobArtifactsResponse.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/PostTrainingJobArtifactsResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "PostTraining (Coming Soon)" - ], - "summary": "Get the artifacts of a training job.", - "description": "Get the artifacts of a training job.", - "parameters": [ - { - "name": "job_uuid", - "in": "query", - "description": "The UUID of the job to get the artifacts of.", - "required": true, - "schema": { - "type": "string" - } - } - ] - } - }, - "/v1alpha/post-training/job/status": { - "get": { - "responses": { - "200": { - "description": "A PostTrainingJobStatusResponse.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/PostTrainingJobStatusResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "PostTraining (Coming Soon)" - ], - "summary": "Get the status of a training job.", - "description": "Get the status of a training job.", - "parameters": [ - { - "name": "job_uuid", - "in": "query", - "description": "The UUID of the job to get the status of.", - "required": true, - "schema": { - "type": "string" - } - } - ] - } - }, - "/v1/post-training/job/status": { - "get": { - "responses": { - "200": { - "description": "A PostTrainingJobStatusResponse.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/PostTrainingJobStatusResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "PostTraining (Coming Soon)" - ], - "summary": "Get the status of a training job.", - "description": "Get the status of a training job.", - "parameters": [ - { - "name": "job_uuid", - "in": "query", - "description": "The UUID of the job to get the status of.", - "required": true, - "schema": { - "type": "string" - } - } - ] - } - }, - "/v1alpha/post-training/jobs": { - "get": { - "responses": { - "200": { - "description": "A ListPostTrainingJobsResponse.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ListPostTrainingJobsResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "PostTraining (Coming Soon)" - ], - "summary": "Get all training jobs.", - "description": "Get all training jobs.", - "parameters": [] - } - }, - "/v1/post-training/jobs": { - "get": { - "responses": { - "200": { - "description": "A ListPostTrainingJobsResponse.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ListPostTrainingJobsResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "PostTraining (Coming Soon)" - ], - "summary": "Get all training jobs.", - "description": "Get all training jobs.", - "parameters": [] - } - }, - "/v1/vector-dbs/{vector_db_id}": { - "get": { - "responses": { - "200": { - "description": "A VectorDB.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/VectorDB" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "VectorDBs" - ], - "summary": "Get a vector database by its identifier.", - "description": "Get a vector database by its identifier.", - "parameters": [ - { - "name": "vector_db_id", - "in": "path", - "description": "The identifier of the vector database to get.", - "required": true, - "schema": { - "type": "string" - } - } - ] - }, - "delete": { - "responses": { - "200": { - "description": "OK" - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "VectorDBs" - ], - "summary": "Unregister a vector database.", - "description": "Unregister a vector database.", - "parameters": [ - { - "name": "vector_db_id", - "in": "path", - "description": "The identifier of the vector database to unregister.", - "required": true, - "schema": { - "type": "string" - } - } - ] - } - }, - "/v1/health": { - "get": { - "responses": { - "200": { - "description": "Health information indicating if the service is operational.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/HealthInfo" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Inspect" - ], - "summary": "Get the current health status of the service.", - "description": "Get the current health status of the service.", - "parameters": [] - } - }, - "/v1/tool-runtime/rag-tool/insert": { - "post": { - "responses": { - "200": { - "description": "OK" - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "ToolRuntime" - ], - "summary": "Index documents so they can be used by the RAG system.", - "description": "Index documents so they can be used by the RAG system.", - "parameters": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/InsertRequest" - } - } - }, - "required": true - } - } - }, - "/v1/vector-io/insert": { - "post": { - "responses": { - "200": { - "description": "OK" - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "VectorIO" - ], - "summary": "Insert chunks into a vector database.", - "description": "Insert chunks into a vector database.", - "parameters": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/InsertChunksRequest" - } - } - }, - "required": true - } - } - }, - "/v1/providers/{provider_id}": { - "get": { - "responses": { - "200": { - "description": "A ProviderInfo object containing the provider's details.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ProviderInfo" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Providers" - ], - "summary": "Get detailed information about a specific provider.", - "description": "Get detailed information about a specific provider.", - "parameters": [ - { - "name": "provider_id", - "in": "path", - "description": "The ID of the provider to inspect.", - "required": true, - "schema": { - "type": "string" - } - } - ] - } - }, - "/v1/tool-runtime/invoke": { - "post": { - "responses": { - "200": { - "description": "A ToolInvocationResult.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ToolInvocationResult" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "ToolRuntime" - ], - "summary": "Run a tool with the given arguments.", - "description": "Run a tool with the given arguments.", - "parameters": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/InvokeToolRequest" - } - } - }, - "required": true - } - } - }, - "/v1/datasetio/iterrows/{dataset_id}": { - "get": { - "responses": { - "200": { - "description": "A PaginatedResponse.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/PaginatedResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "DatasetIO" - ], - "summary": "Get a paginated list of rows from a dataset.", - "description": "Get a paginated list of rows from a dataset.\nUses offset-based pagination where:\n- start_index: The starting index (0-based). If None, starts from beginning.\n- limit: Number of items to return. If None or -1, returns all items.\n\nThe response includes:\n- data: List of items for the current page.\n- has_more: Whether there are more items available after this set.", - "parameters": [ - { - "name": "dataset_id", - "in": "path", - "description": "The ID of the dataset to get the rows from.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "start_index", - "in": "query", - "description": "Index into dataset for the first row to get. Get all rows if None.", - "required": false, - "schema": { - "type": "integer" - } - }, - { - "name": "limit", - "in": "query", - "description": "The number of rows to get.", - "required": false, - "schema": { - "type": "integer" - } - } - ] - } - }, - "/v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}": { - "get": { - "responses": { - "200": { - "description": "The status of the evaluation job.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Job" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Eval" - ], - "summary": "Get the status of a job.", - "description": "Get the status of a job.", - "parameters": [ - { - "name": "benchmark_id", - "in": "path", - "description": "The ID of the benchmark to run the evaluation on.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "job_id", - "in": "path", - "description": "The ID of the job to get the status of.", - "required": true, - "schema": { - "type": "string" - } - } - ] - }, - "delete": { - "responses": { - "200": { - "description": "OK" - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Eval" - ], - "summary": "Cancel a job.", - "description": "Cancel a job.", - "parameters": [ - { - "name": "benchmark_id", - "in": "path", - "description": "The ID of the benchmark to run the evaluation on.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "job_id", - "in": "path", - "description": "The ID of the job to cancel.", - "required": true, - "schema": { - "type": "string" - } - } - ] - } - }, - "/v1/eval/benchmarks/{benchmark_id}/jobs/{job_id}": { - "get": { - "responses": { - "200": { - "description": "The status of the evaluation job.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Job" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Eval" - ], - "summary": "Get the status of a job.", - "description": "Get the status of a job.", - "parameters": [ - { - "name": "benchmark_id", - "in": "path", - "description": "The ID of the benchmark to run the evaluation on.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "job_id", - "in": "path", - "description": "The ID of the job to get the status of.", - "required": true, - "schema": { - "type": "string" - } - } - ] - }, - "delete": { - "responses": { - "200": { - "description": "OK" - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Eval" - ], - "summary": "Cancel a job.", - "description": "Cancel a job.", - "parameters": [ - { - "name": "benchmark_id", - "in": "path", - "description": "The ID of the benchmark to run the evaluation on.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "job_id", - "in": "path", - "description": "The ID of the job to cancel.", - "required": true, - "schema": { - "type": "string" - } - } - ] - } - }, - "/v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}/result": { - "get": { - "responses": { - "200": { - "description": "The result of the job.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/EvaluateResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Eval" - ], - "summary": "Get the result of a job.", - "description": "Get the result of a job.", - "parameters": [ - { - "name": "benchmark_id", - "in": "path", - "description": "The ID of the benchmark to run the evaluation on.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "job_id", - "in": "path", - "description": "The ID of the job to get the result of.", - "required": true, - "schema": { - "type": "string" - } - } - ] - } - }, - "/v1/eval/benchmarks/{benchmark_id}/jobs/{job_id}/result": { - "get": { - "responses": { - "200": { - "description": "The result of the job.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/EvaluateResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Eval" - ], - "summary": "Get the result of a job.", - "description": "Get the result of a job.", - "parameters": [ - { - "name": "benchmark_id", - "in": "path", - "description": "The ID of the benchmark to run the evaluation on.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "job_id", - "in": "path", - "description": "The ID of the job to get the result of.", - "required": true, - "schema": { - "type": "string" - } - } - ] - } - }, - "/v1alpha/agents/{agent_id}/sessions": { - "get": { - "responses": { - "200": { - "description": "A PaginatedResponse.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/PaginatedResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Agents" - ], - "summary": "List all session(s) of a given agent.", - "description": "List all session(s) of a given agent.", - "parameters": [ - { - "name": "agent_id", - "in": "path", - "description": "The ID of the agent to list sessions for.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "start_index", - "in": "query", - "description": "The index to start the pagination from.", - "required": false, - "schema": { - "type": "integer" - } - }, - { - "name": "limit", - "in": "query", - "description": "The number of sessions to return.", - "required": false, - "schema": { - "type": "integer" - } - } - ] - } - }, - "/v1/agents/{agent_id}/sessions": { - "get": { - "responses": { - "200": { - "description": "A PaginatedResponse.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/PaginatedResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Agents" - ], - "summary": "List all session(s) of a given agent.", - "description": "List all session(s) of a given agent.", - "parameters": [ - { - "name": "agent_id", - "in": "path", - "description": "The ID of the agent to list sessions for.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "start_index", - "in": "query", - "description": "The index to start the pagination from.", - "required": false, - "schema": { - "type": "integer" - } - }, - { - "name": "limit", - "in": "query", - "description": "The number of sessions to return.", - "required": false, - "schema": { - "type": "integer" - } - } - ] - } - }, - "/v1alpha/eval/benchmarks": { - "get": { - "responses": { - "200": { - "description": "A ListBenchmarksResponse.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ListBenchmarksResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Benchmarks" - ], - "summary": "List all benchmarks.", - "description": "List all benchmarks.", - "parameters": [] - }, - "post": { - "responses": { - "200": { - "description": "OK" - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Benchmarks" - ], - "summary": "Register a benchmark.", - "description": "Register a benchmark.", - "parameters": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/RegisterBenchmarkRequest" - } - } - }, - "required": true - } - } - }, - "/v1/eval/benchmarks": { - "get": { - "responses": { - "200": { - "description": "A ListBenchmarksResponse.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ListBenchmarksResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Benchmarks" - ], - "summary": "List all benchmarks.", - "description": "List all benchmarks.", - "parameters": [] - }, - "post": { - "responses": { - "200": { - "description": "OK" - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Benchmarks" - ], - "summary": "Register a benchmark.", - "description": "Register a benchmark.", - "parameters": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/RegisterBenchmarkRequest" - } - } - }, - "required": true - } - } - }, - "/v1/chat/completions": { - "get": { - "responses": { - "200": { - "description": "A ListOpenAIChatCompletionResponse.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ListOpenAIChatCompletionResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Inference" - ], - "summary": "List all chat completions.", - "description": "List all chat completions.", - "parameters": [ - { - "name": "after", - "in": "query", - "description": "The ID of the last chat completion to return.", - "required": false, - "schema": { - "type": "string" - } - }, - { - "name": "limit", - "in": "query", - "description": "The maximum number of chat completions to return.", - "required": false, - "schema": { - "type": "integer" - } - }, - { - "name": "model", - "in": "query", - "description": "The model to filter by.", - "required": false, - "schema": { - "type": "string" - } - }, - { - "name": "order", - "in": "query", - "description": "The order to sort the chat completions by: \"asc\" or \"desc\". Defaults to \"desc\".", - "required": false, - "schema": { - "$ref": "#/components/schemas/Order" - } - } - ] - }, - "post": { - "responses": { - "200": { - "description": "An OpenAIChatCompletion.", - "content": { - "application/json": { - "schema": { - "oneOf": [ - { - "$ref": "#/components/schemas/OpenAIChatCompletion" - }, - { - "$ref": "#/components/schemas/OpenAIChatCompletionChunk" - } - ] - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Inference" - ], - "summary": "Generate an OpenAI-compatible chat completion for the given messages using the specified model.", - "description": "Generate an OpenAI-compatible chat completion for the given messages using the specified model.", - "parameters": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/OpenaiChatCompletionRequest" - } - } - }, - "required": true - } - } - }, - "/v1/datasets": { - "get": { - "responses": { - "200": { - "description": "A ListDatasetsResponse.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ListDatasetsResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Datasets" - ], - "summary": "List all datasets.", - "description": "List all datasets.", - "parameters": [] - }, - "post": { - "responses": { - "200": { - "description": "A Dataset.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Dataset" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Datasets" - ], - "summary": "Register a new dataset.", - "description": "Register a new dataset.", - "parameters": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/RegisterDatasetRequest" - } - } - }, - "required": true - } - } - }, - "/v1/models": { - "get": { - "responses": { - "200": { - "description": "A ListModelsResponse.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ListModelsResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Models" - ], - "summary": "List all models.", - "description": "List all models.", - "parameters": [] - }, - "post": { - "responses": { - "200": { - "description": "A Model.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Model" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Models" - ], - "summary": "Register a model.", - "description": "Register a model.", - "parameters": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/RegisterModelRequest" - } - } - }, - "required": true - } + "deprecated": false } }, "/v1/responses/{response_id}/input_items": { @@ -4061,18 +1533,19 @@ "$ref": "#/components/schemas/Order" } } - ] + ], + "deprecated": false } }, - "/v1/prompts/{prompt_id}/versions": { - "get": { + "/v1/safety/run-shield": { + "post": { "responses": { "200": { - "description": "A ListPromptsResponse containing all versions of the prompt.", + "description": "A RunShieldResponse.", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/ListPromptsResponse" + "$ref": "#/components/schemas/RunShieldResponse" } } } @@ -4091,32 +1564,185 @@ } }, "tags": [ - "Prompts" + "Safety" ], - "summary": "List all versions of a specific prompt.", - "description": "List all versions of a specific prompt.", + "summary": "Run a shield.", + "description": "Run a shield.", + "parameters": [], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RunShieldRequest" + } + } + }, + "required": true + }, + "deprecated": false + } + }, + "/v1/scoring-functions": { + "get": { + "responses": { + "200": { + "description": "A ListScoringFunctionsResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListScoringFunctionsResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "ScoringFunctions" + ], + "summary": "List all scoring functions.", + "description": "List all scoring functions.", + "parameters": [], + "deprecated": false + }, + "post": { + "responses": { + "200": { + "description": "OK" + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "ScoringFunctions" + ], + "summary": "Register a scoring function.", + "description": "Register a scoring function.", + "parameters": [], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RegisterScoringFunctionRequest" + } + } + }, + "required": true + }, + "deprecated": false + } + }, + "/v1/scoring-functions/{scoring_fn_id}": { + "get": { + "responses": { + "200": { + "description": "A ScoringFn.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ScoringFn" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "ScoringFunctions" + ], + "summary": "Get a scoring function by its ID.", + "description": "Get a scoring function by its ID.", "parameters": [ { - "name": "prompt_id", + "name": "scoring_fn_id", "in": "path", - "description": "The identifier of the prompt to list versions for.", + "description": "The ID of the scoring function to get.", "required": true, "schema": { "type": "string" } } - ] - } - }, - "/v1/providers": { - "get": { + ], + "deprecated": false + }, + "delete": { "responses": { "200": { - "description": "A ListProvidersResponse containing information about all providers.", + "description": "OK" + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "ScoringFunctions" + ], + "summary": "Unregister a scoring function.", + "description": "Unregister a scoring function.", + "parameters": [ + { + "name": "scoring_fn_id", + "in": "path", + "description": "The ID of the scoring function to unregister.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "deprecated": false + } + }, + "/v1/scoring/score": { + "post": { + "responses": { + "200": { + "description": "A ScoreResponse object containing rows and aggregated results.", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/ListProvidersResponse" + "$ref": "#/components/schemas/ScoreResponse" } } } @@ -4135,22 +1761,33 @@ } }, "tags": [ - "Providers" + "Scoring" ], - "summary": "List all available providers.", - "description": "List all available providers.", - "parameters": [] + "summary": "Score a list of rows.", + "description": "Score a list of rows.", + "parameters": [], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ScoreRequest" + } + } + }, + "required": true + }, + "deprecated": false } }, - "/v1/inspect/routes": { - "get": { + "/v1/scoring/score-batch": { + "post": { "responses": { "200": { - "description": "Response containing information about all available routes.", + "description": "A ScoreBatchResponse.", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/ListRoutesResponse" + "$ref": "#/components/schemas/ScoreBatchResponse" } } } @@ -4169,11 +1806,309 @@ } }, "tags": [ - "Inspect" + "Scoring" ], - "summary": "List all available API routes with their methods and implementing providers.", - "description": "List all available API routes with their methods and implementing providers.", - "parameters": [] + "summary": "Score a batch of rows.", + "description": "Score a batch of rows.", + "parameters": [], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ScoreBatchRequest" + } + } + }, + "required": true + }, + "deprecated": false + } + }, + "/v1/shields": { + "get": { + "responses": { + "200": { + "description": "A ListShieldsResponse.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListShieldsResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Shields" + ], + "summary": "List all shields.", + "description": "List all shields.", + "parameters": [], + "deprecated": false + }, + "post": { + "responses": { + "200": { + "description": "A Shield.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Shield" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Shields" + ], + "summary": "Register a shield.", + "description": "Register a shield.", + "parameters": [], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RegisterShieldRequest" + } + } + }, + "required": true + }, + "deprecated": false + } + }, + "/v1/shields/{identifier}": { + "get": { + "responses": { + "200": { + "description": "A Shield.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Shield" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Shields" + ], + "summary": "Get a shield by its identifier.", + "description": "Get a shield by its identifier.", + "parameters": [ + { + "name": "identifier", + "in": "path", + "description": "The identifier of the shield to get.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "deprecated": false + }, + "delete": { + "responses": { + "200": { + "description": "OK" + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Shields" + ], + "summary": "Unregister a shield.", + "description": "Unregister a shield.", + "parameters": [ + { + "name": "identifier", + "in": "path", + "description": "The identifier of the shield to unregister.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "deprecated": false + } + }, + "/v1/synthetic-data-generation/generate": { + "post": { + "responses": { + "200": { + "description": "Response containing filtered synthetic data samples and optional statistics", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SyntheticDataGenerationResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "SyntheticDataGeneration (Coming Soon)" + ], + "summary": "Generate synthetic data based on input dialogs and apply filtering.", + "description": "Generate synthetic data based on input dialogs and apply filtering.", + "parameters": [], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SyntheticDataGenerateRequest" + } + } + }, + "required": true + }, + "deprecated": false + } + }, + "/v1/telemetry/events": { + "post": { + "responses": { + "200": { + "description": "OK" + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "Telemetry" + ], + "summary": "Log an event.", + "description": "Log an event.", + "parameters": [], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LogEventRequest" + } + } + }, + "required": true + }, + "deprecated": false + } + }, + "/v1/tool-runtime/invoke": { + "post": { + "responses": { + "200": { + "description": "A ToolInvocationResult.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ToolInvocationResult" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "ToolRuntime" + ], + "summary": "Run a tool with the given arguments.", + "description": "Run a tool with the given arguments.", + "parameters": [], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InvokeToolRequest" + } + } + }, + "required": true + }, + "deprecated": false } }, "/v1/tool-runtime/list-tools": { @@ -4226,42 +2161,11 @@ "$ref": "#/components/schemas/URL" } } - ] + ], + "deprecated": false } }, - "/v1/scoring-functions": { - "get": { - "responses": { - "200": { - "description": "A ListScoringFunctionsResponse.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ListScoringFunctionsResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "ScoringFunctions" - ], - "summary": "List all scoring functions.", - "description": "List all scoring functions.", - "parameters": [] - }, + "/v1/tool-runtime/rag-tool/insert": { "post": { "responses": { "200": { @@ -4281,64 +2185,33 @@ } }, "tags": [ - "ScoringFunctions" + "ToolRuntime" ], - "summary": "Register a scoring function.", - "description": "Register a scoring function.", + "summary": "Index documents so they can be used by the RAG system.", + "description": "Index documents so they can be used by the RAG system.", "parameters": [], "requestBody": { "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/RegisterScoringFunctionRequest" + "$ref": "#/components/schemas/InsertRequest" } } }, "required": true - } + }, + "deprecated": false } }, - "/v1/shields": { - "get": { - "responses": { - "200": { - "description": "A ListShieldsResponse.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ListShieldsResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Shields" - ], - "summary": "List all shields.", - "description": "List all shields.", - "parameters": [] - }, + "/v1/tool-runtime/rag-tool/query": { "post": { "responses": { "200": { - "description": "A Shield.", + "description": "RAGQueryResult containing the retrieved content and metadata", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/Shield" + "$ref": "#/components/schemas/RAGQueryResult" } } } @@ -4357,21 +2230,22 @@ } }, "tags": [ - "Shields" + "ToolRuntime" ], - "summary": "Register a shield.", - "description": "Register a shield.", + "summary": "Query the RAG system for context; typically invoked by the agent.", + "description": "Query the RAG system for context; typically invoked by the agent.", "parameters": [], "requestBody": { "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/RegisterShieldRequest" + "$ref": "#/components/schemas/QueryRequest" } } }, "required": true - } + }, + "deprecated": false } }, "/v1/toolgroups": { @@ -4405,7 +2279,8 @@ ], "summary": "List tool groups with optional provider.", "description": "List tool groups with optional provider.", - "parameters": [] + "parameters": [], + "deprecated": false }, "post": { "responses": { @@ -4440,7 +2315,89 @@ } }, "required": true - } + }, + "deprecated": false + } + }, + "/v1/toolgroups/{toolgroup_id}": { + "get": { + "responses": { + "200": { + "description": "A ToolGroup.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ToolGroup" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "ToolGroups" + ], + "summary": "Get a tool group by its ID.", + "description": "Get a tool group by its ID.", + "parameters": [ + { + "name": "toolgroup_id", + "in": "path", + "description": "The ID of the tool group to get.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "deprecated": false + }, + "delete": { + "responses": { + "200": { + "description": "OK" + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "ToolGroups" + ], + "summary": "Unregister a tool group.", + "description": "Unregister a tool group.", + "parameters": [ + { + "name": "toolgroup_id", + "in": "path", + "description": "The ID of the tool group to unregister.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "deprecated": false } }, "/v1/tools": { @@ -4484,7 +2441,53 @@ "type": "string" } } - ] + ], + "deprecated": false + } + }, + "/v1/tools/{tool_name}": { + "get": { + "responses": { + "200": { + "description": "A Tool.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Tool" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "ToolGroups" + ], + "summary": "Get a tool by its name.", + "description": "Get a tool by its name.", + "parameters": [ + { + "name": "tool_name", + "in": "path", + "description": "The name of the tool to get.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "deprecated": false } }, "/v1/vector-dbs": { @@ -4518,7 +2521,8 @@ ], "summary": "List all vector databases.", "description": "List all vector databases.", - "parameters": [] + "parameters": [], + "deprecated": false }, "post": { "responses": { @@ -4560,10 +2564,92 @@ } }, "required": true - } + }, + "deprecated": false } }, - "/v1/telemetry/events": { + "/v1/vector-dbs/{vector_db_id}": { + "get": { + "responses": { + "200": { + "description": "A VectorDB.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorDB" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "VectorDBs" + ], + "summary": "Get a vector database by its identifier.", + "description": "Get a vector database by its identifier.", + "parameters": [ + { + "name": "vector_db_id", + "in": "path", + "description": "The identifier of the vector database to get.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "deprecated": false + }, + "delete": { + "responses": { + "200": { + "description": "OK" + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "VectorDBs" + ], + "summary": "Unregister a vector database.", + "description": "Unregister a vector database.", + "parameters": [ + { + "name": "vector_db_id", + "in": "path", + "description": "The identifier of the vector database to unregister.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "deprecated": false + } + }, + "/v1/vector-io/insert": { "post": { "responses": { "200": { @@ -4583,32 +2669,33 @@ } }, "tags": [ - "Telemetry" + "VectorIO" ], - "summary": "Log an event.", - "description": "Log an event.", + "summary": "Insert chunks into a vector database.", + "description": "Insert chunks into a vector database.", "parameters": [], "requestBody": { "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/LogEventRequest" + "$ref": "#/components/schemas/InsertChunksRequest" } } }, "required": true - } + }, + "deprecated": false } }, - "/v1/vector_stores/{vector_store_id}/files": { - "get": { + "/v1/vector-io/query": { + "post": { "responses": { "200": { - "description": "A VectorStoreListFilesResponse containing the list of files.", + "description": "A QueryChunksResponse.", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/VectorStoreListFilesResponse" + "$ref": "#/components/schemas/QueryChunksResponse" } } } @@ -4629,213 +2716,20 @@ "tags": [ "VectorIO" ], - "summary": "List files in a vector store.", - "description": "List files in a vector store.", - "parameters": [ - { - "name": "vector_store_id", - "in": "path", - "description": "The ID of the vector store to list files from.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "limit", - "in": "query", - "description": "(Optional) A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.", - "required": false, - "schema": { - "type": "integer" - } - }, - { - "name": "order", - "in": "query", - "description": "(Optional) Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.", - "required": false, - "schema": { - "type": "string" - } - }, - { - "name": "after", - "in": "query", - "description": "(Optional) A cursor for use in pagination. `after` is an object ID that defines your place in the list.", - "required": false, - "schema": { - "type": "string" - } - }, - { - "name": "before", - "in": "query", - "description": "(Optional) A cursor for use in pagination. `before` is an object ID that defines your place in the list.", - "required": false, - "schema": { - "type": "string" - } - }, - { - "name": "filter", - "in": "query", - "description": "(Optional) Filter by file status to only return files with the specified status.", - "required": false, - "schema": { - "$ref": "#/components/schemas/VectorStoreFileStatus" - } - } - ] - }, - "post": { - "responses": { - "200": { - "description": "A VectorStoreFileObject representing the attached file.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/VectorStoreFileObject" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "VectorIO" - ], - "summary": "Attach a file to a vector store.", - "description": "Attach a file to a vector store.", - "parameters": [ - { - "name": "vector_store_id", - "in": "path", - "description": "The ID of the vector store to attach the file to.", - "required": true, - "schema": { - "type": "string" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/OpenaiAttachFileToVectorStoreRequest" - } - } - }, - "required": true - } - } - }, - "/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel": { - "post": { - "responses": { - "200": { - "description": "A VectorStoreFileBatchObject representing the cancelled file batch.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/VectorStoreFileBatchObject" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "VectorIO" - ], - "summary": "Cancels a vector store file batch.", - "description": "Cancels a vector store file batch.", - "parameters": [ - { - "name": "batch_id", - "in": "path", - "description": "The ID of the file batch to cancel.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "vector_store_id", - "in": "path", - "description": "The ID of the vector store containing the file batch.", - "required": true, - "schema": { - "type": "string" - } - } - ] - } - }, - "/v1/completions": { - "post": { - "responses": { - "200": { - "description": "An OpenAICompletion.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/OpenAICompletion" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Inference" - ], - "summary": "Generate an OpenAI-compatible completion for the given prompt using the specified model.", - "description": "Generate an OpenAI-compatible completion for the given prompt using the specified model.", + "summary": "Query chunks from a vector database.", + "description": "Query chunks from a vector database.", "parameters": [], "requestBody": { "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/OpenaiCompletionRequest" + "$ref": "#/components/schemas/QueryChunksRequest" } } }, "required": true - } + }, + "deprecated": false } }, "/v1/vector_stores": { @@ -4906,7 +2800,8 @@ "type": "string" } } - ] + ], + "deprecated": false }, "post": { "responses": { @@ -4948,7 +2843,149 @@ } }, "required": true - } + }, + "deprecated": false + } + }, + "/v1/vector_stores/{vector_store_id}": { + "get": { + "responses": { + "200": { + "description": "A VectorStoreObject representing the vector store.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "VectorIO" + ], + "summary": "Retrieves a vector store.", + "description": "Retrieves a vector store.", + "parameters": [ + { + "name": "vector_store_id", + "in": "path", + "description": "The ID of the vector store to retrieve.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "deprecated": false + }, + "post": { + "responses": { + "200": { + "description": "A VectorStoreObject representing the updated vector store.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "VectorIO" + ], + "summary": "Updates a vector store.", + "description": "Updates a vector store.", + "parameters": [ + { + "name": "vector_store_id", + "in": "path", + "description": "The ID of the vector store to update.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenaiUpdateVectorStoreRequest" + } + } + }, + "required": true + }, + "deprecated": false + }, + "delete": { + "responses": { + "200": { + "description": "A VectorStoreDeleteResponse indicating the deletion status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreDeleteResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "VectorIO" + ], + "summary": "Delete a vector store.", + "description": "Delete a vector store.", + "parameters": [ + { + "name": "vector_store_id", + "in": "path", + "description": "The ID of the vector store to delete.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "deprecated": false } }, "/v1/vector_stores/{vector_store_id}/file_batches": { @@ -5002,104 +3039,19 @@ } }, "required": true - } + }, + "deprecated": false } }, - "/v1/files/{file_id}": { + "/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}": { "get": { "responses": { "200": { - "description": "An OpenAIFileObject containing file information.", + "description": "A VectorStoreFileBatchObject representing the file batch.", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/OpenAIFileObject" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Files" - ], - "summary": "Returns information about a specific file.", - "description": "Returns information about a specific file.", - "parameters": [ - { - "name": "file_id", - "in": "path", - "description": "The ID of the file to use for this request.", - "required": true, - "schema": { - "type": "string" - } - } - ] - }, - "delete": { - "responses": { - "200": { - "description": "An OpenAIFileDeleteResponse indicating successful deletion.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/OpenAIFileDeleteResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Files" - ], - "summary": "Delete a file.", - "description": "Delete a file.", - "parameters": [ - { - "name": "file_id", - "in": "path", - "description": "The ID of the file to use for this request.", - "required": true, - "schema": { - "type": "string" - } - } - ] - } - }, - "/v1/vector_stores/{vector_store_id}": { - "get": { - "responses": { - "200": { - "description": "A VectorStoreObject representing the vector store.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/VectorStoreObject" + "$ref": "#/components/schemas/VectorStoreFileBatchObject" } } } @@ -5120,28 +3072,40 @@ "tags": [ "VectorIO" ], - "summary": "Retrieves a vector store.", - "description": "Retrieves a vector store.", + "summary": "Retrieve a vector store file batch.", + "description": "Retrieve a vector store file batch.", "parameters": [ + { + "name": "batch_id", + "in": "path", + "description": "The ID of the file batch to retrieve.", + "required": true, + "schema": { + "type": "string" + } + }, { "name": "vector_store_id", "in": "path", - "description": "The ID of the vector store to retrieve.", + "description": "The ID of the vector store containing the file batch.", "required": true, "schema": { "type": "string" } } - ] - }, + ], + "deprecated": false + } + }, + "/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel": { "post": { "responses": { "200": { - "description": "A VectorStoreObject representing the updated vector store.", + "description": "A VectorStoreFileBatchObject representing the cancelled file batch.", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/VectorStoreObject" + "$ref": "#/components/schemas/VectorStoreFileBatchObject" } } } @@ -5162,409 +3126,29 @@ "tags": [ "VectorIO" ], - "summary": "Updates a vector store.", - "description": "Updates a vector store.", + "summary": "Cancels a vector store file batch.", + "description": "Cancels a vector store file batch.", "parameters": [ + { + "name": "batch_id", + "in": "path", + "description": "The ID of the file batch to cancel.", + "required": true, + "schema": { + "type": "string" + } + }, { "name": "vector_store_id", "in": "path", - "description": "The ID of the vector store to update.", + "description": "The ID of the vector store containing the file batch.", "required": true, "schema": { "type": "string" } } ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/OpenaiUpdateVectorStoreRequest" - } - } - }, - "required": true - } - }, - "delete": { - "responses": { - "200": { - "description": "A VectorStoreDeleteResponse indicating the deletion status.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/VectorStoreDeleteResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "VectorIO" - ], - "summary": "Delete a vector store.", - "description": "Delete a vector store.", - "parameters": [ - { - "name": "vector_store_id", - "in": "path", - "description": "The ID of the vector store to delete.", - "required": true, - "schema": { - "type": "string" - } - } - ] - } - }, - "/v1/vector_stores/{vector_store_id}/files/{file_id}": { - "get": { - "responses": { - "200": { - "description": "A VectorStoreFileObject representing the file.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/VectorStoreFileObject" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "VectorIO" - ], - "summary": "Retrieves a vector store file.", - "description": "Retrieves a vector store file.", - "parameters": [ - { - "name": "vector_store_id", - "in": "path", - "description": "The ID of the vector store containing the file to retrieve.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "file_id", - "in": "path", - "description": "The ID of the file to retrieve.", - "required": true, - "schema": { - "type": "string" - } - } - ] - }, - "post": { - "responses": { - "200": { - "description": "A VectorStoreFileObject representing the updated file.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/VectorStoreFileObject" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "VectorIO" - ], - "summary": "Updates a vector store file.", - "description": "Updates a vector store file.", - "parameters": [ - { - "name": "vector_store_id", - "in": "path", - "description": "The ID of the vector store containing the file to update.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "file_id", - "in": "path", - "description": "The ID of the file to update.", - "required": true, - "schema": { - "type": "string" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/OpenaiUpdateVectorStoreFileRequest" - } - } - }, - "required": true - } - }, - "delete": { - "responses": { - "200": { - "description": "A VectorStoreFileDeleteResponse indicating the deletion status.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/VectorStoreFileDeleteResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "VectorIO" - ], - "summary": "Delete a vector store file.", - "description": "Delete a vector store file.", - "parameters": [ - { - "name": "vector_store_id", - "in": "path", - "description": "The ID of the vector store containing the file to delete.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "file_id", - "in": "path", - "description": "The ID of the file to delete.", - "required": true, - "schema": { - "type": "string" - } - } - ] - } - }, - "/v1/embeddings": { - "post": { - "responses": { - "200": { - "description": "An OpenAIEmbeddingsResponse containing the embeddings.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/OpenAIEmbeddingsResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Inference" - ], - "summary": "Generate OpenAI-compatible embeddings for the given input using the specified model.", - "description": "Generate OpenAI-compatible embeddings for the given input using the specified model.", - "parameters": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/OpenaiEmbeddingsRequest" - } - } - }, - "required": true - } - } - }, - "/v1/files": { - "get": { - "responses": { - "200": { - "description": "An ListOpenAIFileResponse containing the list of files.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ListOpenAIFileResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Files" - ], - "summary": "Returns a list of files that belong to the user's organization.", - "description": "Returns a list of files that belong to the user's organization.", - "parameters": [ - { - "name": "after", - "in": "query", - "description": "A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.", - "required": false, - "schema": { - "type": "string" - } - }, - { - "name": "limit", - "in": "query", - "description": "A limit on the number of objects to be returned. Limit can range between 1 and 10,000, and the default is 10,000.", - "required": false, - "schema": { - "type": "integer" - } - }, - { - "name": "order", - "in": "query", - "description": "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.", - "required": false, - "schema": { - "$ref": "#/components/schemas/Order" - } - }, - { - "name": "purpose", - "in": "query", - "description": "Only return files with the given purpose.", - "required": false, - "schema": { - "$ref": "#/components/schemas/OpenAIFilePurpose" - } - } - ] - }, - "post": { - "responses": { - "200": { - "description": "An OpenAIFileObject representing the uploaded file.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/OpenAIFileObject" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Files" - ], - "summary": "Upload a file that can be used across various endpoints.", - "description": "Upload a file that can be used across various endpoints.\nThe file upload should be a multipart form request with:\n- file: The File object (not file name) to be uploaded.\n- purpose: The intended purpose of the uploaded file.\n- expires_after: Optional form values describing expiration for the file.", - "parameters": [], - "requestBody": { - "content": { - "multipart/form-data": { - "schema": { - "type": "object", - "properties": { - "file": { - "type": "string", - "format": "binary" - }, - "purpose": { - "$ref": "#/components/schemas/OpenAIFilePurpose" - }, - "expires_after": { - "$ref": "#/components/schemas/ExpiresAfter" - } - }, - "required": [ - "file", - "purpose" - ] - } - } - }, - "required": true - } + "deprecated": false } }, "/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/files": { @@ -5662,62 +3246,19 @@ "type": "string" } } - ] - } - }, - "/v1/files/{file_id}/content": { - "get": { - "responses": { - "200": { - "description": "The raw file content as a binary response.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Response" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Files" ], - "summary": "Returns the contents of the specified file.", - "description": "Returns the contents of the specified file.", - "parameters": [ - { - "name": "file_id", - "in": "path", - "description": "The ID of the file to use for this request.", - "required": true, - "schema": { - "type": "string" - } - } - ] + "deprecated": false } }, - "/v1/vector_stores/{vector_store_id}/file_batches/{batch_id}": { + "/v1/vector_stores/{vector_store_id}/files": { "get": { "responses": { "200": { - "description": "A VectorStoreFileBatchObject representing the file batch.", + "description": "A VectorStoreListFilesResponse containing the list of files.", "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/VectorStoreFileBatchObject" + "$ref": "#/components/schemas/VectorStoreListFilesResponse" } } } @@ -5738,28 +3279,286 @@ "tags": [ "VectorIO" ], - "summary": "Retrieve a vector store file batch.", - "description": "Retrieve a vector store file batch.", + "summary": "List files in a vector store.", + "description": "List files in a vector store.", "parameters": [ { - "name": "batch_id", + "name": "vector_store_id", "in": "path", - "description": "The ID of the file batch to retrieve.", + "description": "The ID of the vector store to list files from.", "required": true, "schema": { "type": "string" } }, + { + "name": "limit", + "in": "query", + "description": "(Optional) A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.", + "required": false, + "schema": { + "type": "integer" + } + }, + { + "name": "order", + "in": "query", + "description": "(Optional) Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "after", + "in": "query", + "description": "(Optional) A cursor for use in pagination. `after` is an object ID that defines your place in the list.", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "before", + "in": "query", + "description": "(Optional) A cursor for use in pagination. `before` is an object ID that defines your place in the list.", + "required": false, + "schema": { + "type": "string" + } + }, + { + "name": "filter", + "in": "query", + "description": "(Optional) Filter by file status to only return files with the specified status.", + "required": false, + "schema": { + "$ref": "#/components/schemas/VectorStoreFileStatus" + } + } + ], + "deprecated": false + }, + "post": { + "responses": { + "200": { + "description": "A VectorStoreFileObject representing the attached file.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreFileObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "VectorIO" + ], + "summary": "Attach a file to a vector store.", + "description": "Attach a file to a vector store.", + "parameters": [ { "name": "vector_store_id", "in": "path", - "description": "The ID of the vector store containing the file batch.", + "description": "The ID of the vector store to attach the file to.", "required": true, "schema": { "type": "string" } } - ] + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenaiAttachFileToVectorStoreRequest" + } + } + }, + "required": true + }, + "deprecated": false + } + }, + "/v1/vector_stores/{vector_store_id}/files/{file_id}": { + "get": { + "responses": { + "200": { + "description": "A VectorStoreFileObject representing the file.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreFileObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "VectorIO" + ], + "summary": "Retrieves a vector store file.", + "description": "Retrieves a vector store file.", + "parameters": [ + { + "name": "vector_store_id", + "in": "path", + "description": "The ID of the vector store containing the file to retrieve.", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "file_id", + "in": "path", + "description": "The ID of the file to retrieve.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "deprecated": false + }, + "post": { + "responses": { + "200": { + "description": "A VectorStoreFileObject representing the updated file.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreFileObject" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "VectorIO" + ], + "summary": "Updates a vector store file.", + "description": "Updates a vector store file.", + "parameters": [ + { + "name": "vector_store_id", + "in": "path", + "description": "The ID of the vector store containing the file to update.", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "file_id", + "in": "path", + "description": "The ID of the file to update.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/OpenaiUpdateVectorStoreFileRequest" + } + } + }, + "required": true + }, + "deprecated": false + }, + "delete": { + "responses": { + "200": { + "description": "A VectorStoreFileDeleteResponse indicating the deletion status.", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/VectorStoreFileDeleteResponse" + } + } + } + }, + "400": { + "$ref": "#/components/responses/BadRequest400" + }, + "429": { + "$ref": "#/components/responses/TooManyRequests429" + }, + "500": { + "$ref": "#/components/responses/InternalServerError500" + }, + "default": { + "$ref": "#/components/responses/DefaultError" + } + }, + "tags": [ + "VectorIO" + ], + "summary": "Delete a vector store file.", + "description": "Delete a vector store file.", + "parameters": [ + { + "name": "vector_store_id", + "in": "path", + "description": "The ID of the vector store containing the file to delete.", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "file_id", + "in": "path", + "description": "The ID of the file to delete.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "deprecated": false } }, "/v1/vector_stores/{vector_store_id}/files/{file_id}/content": { @@ -5812,7 +3611,8 @@ "type": "string" } } - ] + ], + "deprecated": false } }, "/v1/vector_stores/{vector_store_id}/search": { @@ -5866,1030 +3666,8 @@ } }, "required": true - } - } - }, - "/v1alpha/post-training/preference-optimize": { - "post": { - "responses": { - "200": { - "description": "A PostTrainingJob.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/PostTrainingJob" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } }, - "tags": [ - "PostTraining (Coming Soon)" - ], - "summary": "Run preference optimization of a model.", - "description": "Run preference optimization of a model.", - "parameters": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/PreferenceOptimizeRequest" - } - } - }, - "required": true - } - } - }, - "/v1/post-training/preference-optimize": { - "post": { - "responses": { - "200": { - "description": "A PostTrainingJob.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/PostTrainingJob" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "PostTraining (Coming Soon)" - ], - "summary": "Run preference optimization of a model.", - "description": "Run preference optimization of a model.", - "parameters": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/PreferenceOptimizeRequest" - } - } - }, - "required": true - } - } - }, - "/v1/tool-runtime/rag-tool/query": { - "post": { - "responses": { - "200": { - "description": "RAGQueryResult containing the retrieved content and metadata", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/RAGQueryResult" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "ToolRuntime" - ], - "summary": "Query the RAG system for context; typically invoked by the agent.", - "description": "Query the RAG system for context; typically invoked by the agent.", - "parameters": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/QueryRequest" - } - } - }, - "required": true - } - } - }, - "/v1/vector-io/query": { - "post": { - "responses": { - "200": { - "description": "A QueryChunksResponse.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/QueryChunksResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "VectorIO" - ], - "summary": "Query chunks from a vector database.", - "description": "Query chunks from a vector database.", - "parameters": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/QueryChunksRequest" - } - } - }, - "required": true - } - } - }, - "/v1/telemetry/metrics/{metric_name}": { - "post": { - "responses": { - "200": { - "description": "A QueryMetricsResponse.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/QueryMetricsResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Telemetry" - ], - "summary": "Query metrics.", - "description": "Query metrics.", - "parameters": [ - { - "name": "metric_name", - "in": "path", - "description": "The name of the metric to query.", - "required": true, - "schema": { - "type": "string" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/QueryMetricsRequest" - } - } - }, - "required": true - } - } - }, - "/v1/telemetry/spans": { - "post": { - "responses": { - "200": { - "description": "A QuerySpansResponse.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/QuerySpansResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Telemetry" - ], - "summary": "Query spans.", - "description": "Query spans.", - "parameters": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/QuerySpansRequest" - } - } - }, - "required": true - } - } - }, - "/v1/telemetry/traces": { - "post": { - "responses": { - "200": { - "description": "A QueryTracesResponse.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/QueryTracesResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Telemetry" - ], - "summary": "Query traces.", - "description": "Query traces.", - "parameters": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/QueryTracesRequest" - } - } - }, - "required": true - } - } - }, - "/v1alpha/inference/rerank": { - "post": { - "responses": { - "200": { - "description": "RerankResponse with indices sorted by relevance score (descending).", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/RerankResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Inference" - ], - "summary": "Rerank a list of documents based on their relevance to a query.", - "description": "Rerank a list of documents based on their relevance to a query.", - "parameters": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/RerankRequest" - } - } - }, - "required": true - } - } - }, - "/v1alpha/agents/{agent_id}/session/{session_id}/turn/{turn_id}/resume": { - "post": { - "responses": { - "200": { - "description": "A Turn object if stream is False, otherwise an AsyncIterator of AgentTurnResponseStreamChunk objects.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Turn" - } - }, - "text/event-stream": { - "schema": { - "$ref": "#/components/schemas/AgentTurnResponseStreamChunk" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Agents" - ], - "summary": "Resume an agent turn with executed tool call responses.", - "description": "Resume an agent turn with executed tool call responses.\nWhen a Turn has the status `awaiting_input` due to pending input from client side tool calls, this endpoint can be used to submit the outputs from the tool calls once they are ready.", - "parameters": [ - { - "name": "agent_id", - "in": "path", - "description": "The ID of the agent to resume.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "session_id", - "in": "path", - "description": "The ID of the session to resume.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "turn_id", - "in": "path", - "description": "The ID of the turn to resume.", - "required": true, - "schema": { - "type": "string" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ResumeAgentTurnRequest" - } - } - }, - "required": true - } - } - }, - "/v1/agents/{agent_id}/session/{session_id}/turn/{turn_id}/resume": { - "post": { - "responses": { - "200": { - "description": "A Turn object if stream is False, otherwise an AsyncIterator of AgentTurnResponseStreamChunk objects.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Turn" - } - }, - "text/event-stream": { - "schema": { - "$ref": "#/components/schemas/AgentTurnResponseStreamChunk" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Agents" - ], - "summary": "Resume an agent turn with executed tool call responses.", - "description": "Resume an agent turn with executed tool call responses.\nWhen a Turn has the status `awaiting_input` due to pending input from client side tool calls, this endpoint can be used to submit the outputs from the tool calls once they are ready.", - "parameters": [ - { - "name": "agent_id", - "in": "path", - "description": "The ID of the agent to resume.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "session_id", - "in": "path", - "description": "The ID of the session to resume.", - "required": true, - "schema": { - "type": "string" - } - }, - { - "name": "turn_id", - "in": "path", - "description": "The ID of the turn to resume.", - "required": true, - "schema": { - "type": "string" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ResumeAgentTurnRequest" - } - } - }, - "required": true - } - } - }, - "/v1alpha/eval/benchmarks/{benchmark_id}/jobs": { - "post": { - "responses": { - "200": { - "description": "The job that was created to run the evaluation.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Job" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Eval" - ], - "summary": "Run an evaluation on a benchmark.", - "description": "Run an evaluation on a benchmark.", - "parameters": [ - { - "name": "benchmark_id", - "in": "path", - "description": "The ID of the benchmark to run the evaluation on.", - "required": true, - "schema": { - "type": "string" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/RunEvalRequest" - } - } - }, - "required": true - } - } - }, - "/v1/eval/benchmarks/{benchmark_id}/jobs": { - "post": { - "responses": { - "200": { - "description": "The job that was created to run the evaluation.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Job" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Eval" - ], - "summary": "Run an evaluation on a benchmark.", - "description": "Run an evaluation on a benchmark.", - "parameters": [ - { - "name": "benchmark_id", - "in": "path", - "description": "The ID of the benchmark to run the evaluation on.", - "required": true, - "schema": { - "type": "string" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/RunEvalRequest" - } - } - }, - "required": true - } - } - }, - "/v1/moderations": { - "post": { - "responses": { - "200": { - "description": "A moderation object.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ModerationObject" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Safety" - ], - "summary": "Classifies if text and/or image inputs are potentially harmful.", - "description": "Classifies if text and/or image inputs are potentially harmful.", - "parameters": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/RunModerationRequest" - } - } - }, - "required": true - } - } - }, - "/v1/safety/run-shield": { - "post": { - "responses": { - "200": { - "description": "A RunShieldResponse.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/RunShieldResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Safety" - ], - "summary": "Run a shield.", - "description": "Run a shield.", - "parameters": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/RunShieldRequest" - } - } - }, - "required": true - } - } - }, - "/v1/telemetry/spans/export": { - "post": { - "responses": { - "200": { - "description": "OK" - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Telemetry" - ], - "summary": "Save spans to a dataset.", - "description": "Save spans to a dataset.", - "parameters": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/SaveSpansToDatasetRequest" - } - } - }, - "required": true - } - } - }, - "/v1/scoring/score": { - "post": { - "responses": { - "200": { - "description": "A ScoreResponse object containing rows and aggregated results.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ScoreResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Scoring" - ], - "summary": "Score a list of rows.", - "description": "Score a list of rows.", - "parameters": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ScoreRequest" - } - } - }, - "required": true - } - } - }, - "/v1/scoring/score-batch": { - "post": { - "responses": { - "200": { - "description": "A ScoreBatchResponse.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ScoreBatchResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Scoring" - ], - "summary": "Score a batch of rows.", - "description": "Score a batch of rows.", - "parameters": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/ScoreBatchRequest" - } - } - }, - "required": true - } - } - }, - "/v1/prompts/{prompt_id}/set-default-version": { - "post": { - "responses": { - "200": { - "description": "The prompt with the specified version now set as default.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/Prompt" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "Prompts" - ], - "summary": "Set which version of a prompt should be the default in get_prompt (latest).", - "description": "Set which version of a prompt should be the default in get_prompt (latest).", - "parameters": [ - { - "name": "prompt_id", - "in": "path", - "description": "The identifier of the prompt.", - "required": true, - "schema": { - "type": "string" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/SetDefaultVersionRequest" - } - } - }, - "required": true - } - } - }, - "/v1alpha/post-training/supervised-fine-tune": { - "post": { - "responses": { - "200": { - "description": "A PostTrainingJob.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/PostTrainingJob" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "PostTraining (Coming Soon)" - ], - "summary": "Run supervised fine-tuning of a model.", - "description": "Run supervised fine-tuning of a model.", - "parameters": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/SupervisedFineTuneRequest" - } - } - }, - "required": true - } - } - }, - "/v1/post-training/supervised-fine-tune": { - "post": { - "responses": { - "200": { - "description": "A PostTrainingJob.", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/PostTrainingJob" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "PostTraining (Coming Soon)" - ], - "summary": "Run supervised fine-tuning of a model.", - "description": "Run supervised fine-tuning of a model.", - "parameters": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/SupervisedFineTuneRequest" - } - } - }, - "required": true - } - } - }, - "/v1/synthetic-data-generation/generate": { - "post": { - "responses": { - "200": { - "description": "Response containing filtered synthetic data samples and optional statistics", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/SyntheticDataGenerationResponse" - } - } - } - }, - "400": { - "$ref": "#/components/responses/BadRequest400" - }, - "429": { - "$ref": "#/components/responses/TooManyRequests429" - }, - "500": { - "$ref": "#/components/responses/InternalServerError500" - }, - "default": { - "$ref": "#/components/responses/DefaultError" - } - }, - "tags": [ - "SyntheticDataGeneration (Coming Soon)" - ], - "summary": "Generate synthetic data based on input dialogs and apply filtering.", - "description": "Generate synthetic data based on input dialogs and apply filtering.", - "parameters": [], - "requestBody": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/SyntheticDataGenerateRequest" - } - } - }, - "required": true - } + "deprecated": false } }, "/v1/version": { @@ -6923,7 +3701,8 @@ ], "summary": "Get the version of the service.", "description": "Get the version of the service.", - "parameters": [] + "parameters": [], + "deprecated": false } } }, @@ -6959,10 +3738,788 @@ "title": "Error", "description": "Error response from the API. Roughly follows RFC 7807." }, - "AppendRowsRequest": { + "Order": { + "type": "string", + "enum": [ + "asc", + "desc" + ], + "title": "Order", + "description": "Sort order for paginated responses." + }, + "ListOpenAIChatCompletionResponse": { "type": "object", "properties": { - "rows": { + "data": { + "type": "array", + "items": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the chat completion" + }, + "choices": { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIChoice" + }, + "description": "List of choices" + }, + "object": { + "type": "string", + "const": "chat.completion", + "default": "chat.completion", + "description": "The object type, which will be \"chat.completion\"" + }, + "created": { + "type": "integer", + "description": "The Unix timestamp in seconds when the chat completion was created" + }, + "model": { + "type": "string", + "description": "The model that was used to generate the chat completion" + }, + "input_messages": { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIMessageParam" + } + } + }, + "additionalProperties": false, + "required": [ + "id", + "choices", + "object", + "created", + "model", + "input_messages" + ], + "title": "OpenAICompletionWithInputMessages" + }, + "description": "List of chat completion objects with their input messages" + }, + "has_more": { + "type": "boolean", + "description": "Whether there are more completions available beyond this list" + }, + "first_id": { + "type": "string", + "description": "ID of the first completion in this list" + }, + "last_id": { + "type": "string", + "description": "ID of the last completion in this list" + }, + "object": { + "type": "string", + "const": "list", + "default": "list", + "description": "Must be \"list\" to identify this as a list response" + } + }, + "additionalProperties": false, + "required": [ + "data", + "has_more", + "first_id", + "last_id", + "object" + ], + "title": "ListOpenAIChatCompletionResponse", + "description": "Response from listing OpenAI-compatible chat completions." + }, + "OpenAIAssistantMessageParam": { + "type": "object", + "properties": { + "role": { + "type": "string", + "const": "assistant", + "default": "assistant", + "description": "Must be \"assistant\" to identify this as the model's response" + }, + "content": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" + } + } + ], + "description": "The content of the model's response" + }, + "name": { + "type": "string", + "description": "(Optional) The name of the assistant message participant." + }, + "tool_calls": { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIChatCompletionToolCall" + }, + "description": "List of tool calls. Each tool call is an OpenAIChatCompletionToolCall object." + } + }, + "additionalProperties": false, + "required": [ + "role" + ], + "title": "OpenAIAssistantMessageParam", + "description": "A message containing the model's (assistant) response in an OpenAI-compatible chat completion request." + }, + "OpenAIChatCompletionContentPartImageParam": { + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "image_url", + "default": "image_url", + "description": "Must be \"image_url\" to identify this as image content" + }, + "image_url": { + "$ref": "#/components/schemas/OpenAIImageURL", + "description": "Image URL specification and processing details" + } + }, + "additionalProperties": false, + "required": [ + "type", + "image_url" + ], + "title": "OpenAIChatCompletionContentPartImageParam", + "description": "Image content part for OpenAI-compatible chat completion messages." + }, + "OpenAIChatCompletionContentPartParam": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" + }, + { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartImageParam" + }, + { + "$ref": "#/components/schemas/OpenAIFile" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "text": "#/components/schemas/OpenAIChatCompletionContentPartTextParam", + "image_url": "#/components/schemas/OpenAIChatCompletionContentPartImageParam", + "file": "#/components/schemas/OpenAIFile" + } + } + }, + "OpenAIChatCompletionContentPartTextParam": { + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "text", + "default": "text", + "description": "Must be \"text\" to identify this as text content" + }, + "text": { + "type": "string", + "description": "The text content of the message" + } + }, + "additionalProperties": false, + "required": [ + "type", + "text" + ], + "title": "OpenAIChatCompletionContentPartTextParam", + "description": "Text content part for OpenAI-compatible chat completion messages." + }, + "OpenAIChatCompletionToolCall": { + "type": "object", + "properties": { + "index": { + "type": "integer", + "description": "(Optional) Index of the tool call in the list" + }, + "id": { + "type": "string", + "description": "(Optional) Unique identifier for the tool call" + }, + "type": { + "type": "string", + "const": "function", + "default": "function", + "description": "Must be \"function\" to identify this as a function call" + }, + "function": { + "$ref": "#/components/schemas/OpenAIChatCompletionToolCallFunction", + "description": "(Optional) Function call details" + } + }, + "additionalProperties": false, + "required": [ + "type" + ], + "title": "OpenAIChatCompletionToolCall", + "description": "Tool call specification for OpenAI-compatible chat completion responses." + }, + "OpenAIChatCompletionToolCallFunction": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "(Optional) Name of the function to call" + }, + "arguments": { + "type": "string", + "description": "(Optional) Arguments to pass to the function as a JSON string" + } + }, + "additionalProperties": false, + "title": "OpenAIChatCompletionToolCallFunction", + "description": "Function call details for OpenAI-compatible tool calls." + }, + "OpenAIChoice": { + "type": "object", + "properties": { + "message": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIUserMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAISystemMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIAssistantMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIToolMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIDeveloperMessageParam" + } + ], + "discriminator": { + "propertyName": "role", + "mapping": { + "user": "#/components/schemas/OpenAIUserMessageParam", + "system": "#/components/schemas/OpenAISystemMessageParam", + "assistant": "#/components/schemas/OpenAIAssistantMessageParam", + "tool": "#/components/schemas/OpenAIToolMessageParam", + "developer": "#/components/schemas/OpenAIDeveloperMessageParam" + } + }, + "description": "The message from the model" + }, + "finish_reason": { + "type": "string", + "description": "The reason the model stopped generating" + }, + "index": { + "type": "integer", + "description": "The index of the choice" + }, + "logprobs": { + "$ref": "#/components/schemas/OpenAIChoiceLogprobs", + "description": "(Optional) The log probabilities for the tokens in the message" + } + }, + "additionalProperties": false, + "required": [ + "message", + "finish_reason", + "index" + ], + "title": "OpenAIChoice", + "description": "A choice from an OpenAI-compatible chat completion response." + }, + "OpenAIChoiceLogprobs": { + "type": "object", + "properties": { + "content": { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAITokenLogProb" + }, + "description": "(Optional) The log probabilities for the tokens in the message" + }, + "refusal": { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAITokenLogProb" + }, + "description": "(Optional) The log probabilities for the tokens in the message" + } + }, + "additionalProperties": false, + "title": "OpenAIChoiceLogprobs", + "description": "The log probabilities for the tokens in the message from an OpenAI-compatible chat completion response." + }, + "OpenAIDeveloperMessageParam": { + "type": "object", + "properties": { + "role": { + "type": "string", + "const": "developer", + "default": "developer", + "description": "Must be \"developer\" to identify this as a developer message" + }, + "content": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" + } + } + ], + "description": "The content of the developer message" + }, + "name": { + "type": "string", + "description": "(Optional) The name of the developer message participant." + } + }, + "additionalProperties": false, + "required": [ + "role", + "content" + ], + "title": "OpenAIDeveloperMessageParam", + "description": "A message from the developer in an OpenAI-compatible chat completion request." + }, + "OpenAIFile": { + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "file", + "default": "file" + }, + "file": { + "$ref": "#/components/schemas/OpenAIFileFile" + } + }, + "additionalProperties": false, + "required": [ + "type", + "file" + ], + "title": "OpenAIFile" + }, + "OpenAIFileFile": { + "type": "object", + "properties": { + "file_data": { + "type": "string" + }, + "file_id": { + "type": "string" + }, + "filename": { + "type": "string" + } + }, + "additionalProperties": false, + "title": "OpenAIFileFile" + }, + "OpenAIImageURL": { + "type": "object", + "properties": { + "url": { + "type": "string", + "description": "URL of the image to include in the message" + }, + "detail": { + "type": "string", + "description": "(Optional) Level of detail for image processing. Can be \"low\", \"high\", or \"auto\"" + } + }, + "additionalProperties": false, + "required": [ + "url" + ], + "title": "OpenAIImageURL", + "description": "Image URL specification for OpenAI-compatible chat completion messages." + }, + "OpenAIMessageParam": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIUserMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAISystemMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIAssistantMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIToolMessageParam" + }, + { + "$ref": "#/components/schemas/OpenAIDeveloperMessageParam" + } + ], + "discriminator": { + "propertyName": "role", + "mapping": { + "user": "#/components/schemas/OpenAIUserMessageParam", + "system": "#/components/schemas/OpenAISystemMessageParam", + "assistant": "#/components/schemas/OpenAIAssistantMessageParam", + "tool": "#/components/schemas/OpenAIToolMessageParam", + "developer": "#/components/schemas/OpenAIDeveloperMessageParam" + } + } + }, + "OpenAISystemMessageParam": { + "type": "object", + "properties": { + "role": { + "type": "string", + "const": "system", + "default": "system", + "description": "Must be \"system\" to identify this as a system message" + }, + "content": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" + } + } + ], + "description": "The content of the \"system prompt\". If multiple system messages are provided, they are concatenated. The underlying Llama Stack code may also add other system messages (for example, for formatting tool definitions)." + }, + "name": { + "type": "string", + "description": "(Optional) The name of the system message participant." + } + }, + "additionalProperties": false, + "required": [ + "role", + "content" + ], + "title": "OpenAISystemMessageParam", + "description": "A system message providing instructions or context to the model." + }, + "OpenAITokenLogProb": { + "type": "object", + "properties": { + "token": { + "type": "string" + }, + "bytes": { + "type": "array", + "items": { + "type": "integer" + } + }, + "logprob": { + "type": "number" + }, + "top_logprobs": { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAITopLogProb" + } + } + }, + "additionalProperties": false, + "required": [ + "token", + "logprob", + "top_logprobs" + ], + "title": "OpenAITokenLogProb", + "description": "The log probability for a token from an OpenAI-compatible chat completion response." + }, + "OpenAIToolMessageParam": { + "type": "object", + "properties": { + "role": { + "type": "string", + "const": "tool", + "default": "tool", + "description": "Must be \"tool\" to identify this as a tool response" + }, + "tool_call_id": { + "type": "string", + "description": "Unique identifier for the tool call this response is for" + }, + "content": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" + } + } + ], + "description": "The response content from the tool" + } + }, + "additionalProperties": false, + "required": [ + "role", + "tool_call_id", + "content" + ], + "title": "OpenAIToolMessageParam", + "description": "A message representing the result of a tool invocation in an OpenAI-compatible chat completion request." + }, + "OpenAITopLogProb": { + "type": "object", + "properties": { + "token": { + "type": "string" + }, + "bytes": { + "type": "array", + "items": { + "type": "integer" + } + }, + "logprob": { + "type": "number" + } + }, + "additionalProperties": false, + "required": [ + "token", + "logprob" + ], + "title": "OpenAITopLogProb", + "description": "The top log probability for a token from an OpenAI-compatible chat completion response." + }, + "OpenAIUserMessageParam": { + "type": "object", + "properties": { + "role": { + "type": "string", + "const": "user", + "default": "user", + "description": "Must be \"user\" to identify this as a user message" + }, + "content": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIChatCompletionContentPartParam" + } + } + ], + "description": "The content of the message, which can include text and other media" + }, + "name": { + "type": "string", + "description": "(Optional) The name of the user message participant." + } + }, + "additionalProperties": false, + "required": [ + "role", + "content" + ], + "title": "OpenAIUserMessageParam", + "description": "A message from the user in an OpenAI-compatible chat completion request." + }, + "OpenAIJSONSchema": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Name of the schema" + }, + "description": { + "type": "string", + "description": "(Optional) Description of the schema" + }, + "strict": { + "type": "boolean", + "description": "(Optional) Whether to enforce strict adherence to the schema" + }, + "schema": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + }, + "description": "(Optional) The JSON schema definition" + } + }, + "additionalProperties": false, + "required": [ + "name" + ], + "title": "OpenAIJSONSchema", + "description": "JSON schema specification for OpenAI-compatible structured response format." + }, + "OpenAIResponseFormatJSONObject": { + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "json_object", + "default": "json_object", + "description": "Must be \"json_object\" to indicate generic JSON object response format" + } + }, + "additionalProperties": false, + "required": [ + "type" + ], + "title": "OpenAIResponseFormatJSONObject", + "description": "JSON object response format for OpenAI-compatible chat completion requests." + }, + "OpenAIResponseFormatJSONSchema": { + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "json_schema", + "default": "json_schema", + "description": "Must be \"json_schema\" to indicate structured JSON response format" + }, + "json_schema": { + "$ref": "#/components/schemas/OpenAIJSONSchema", + "description": "The JSON schema specification for the response" + } + }, + "additionalProperties": false, + "required": [ + "type", + "json_schema" + ], + "title": "OpenAIResponseFormatJSONSchema", + "description": "JSON schema response format for OpenAI-compatible chat completion requests." + }, + "OpenAIResponseFormatParam": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseFormatText" + }, + { + "$ref": "#/components/schemas/OpenAIResponseFormatJSONSchema" + }, + { + "$ref": "#/components/schemas/OpenAIResponseFormatJSONObject" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "text": "#/components/schemas/OpenAIResponseFormatText", + "json_schema": "#/components/schemas/OpenAIResponseFormatJSONSchema", + "json_object": "#/components/schemas/OpenAIResponseFormatJSONObject" + } + } + }, + "OpenAIResponseFormatText": { + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "text", + "default": "text", + "description": "Must be \"text\" to indicate plain text response format" + } + }, + "additionalProperties": false, + "required": [ + "type" + ], + "title": "OpenAIResponseFormatText", + "description": "Text response format for OpenAI-compatible chat completion requests." + }, + "OpenaiChatCompletionRequest": { + "type": "object", + "properties": { + "model": { + "type": "string", + "description": "The identifier of the model to use. The model must be registered with Llama Stack and available via the /models endpoint." + }, + "messages": { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIMessageParam" + }, + "description": "List of messages in the conversation." + }, + "frequency_penalty": { + "type": "number", + "description": "(Optional) The penalty for repeated tokens." + }, + "function_call": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + } + } + ], + "description": "(Optional) The function call to use." + }, + "functions": { "type": "array", "items": { "type": "object", @@ -6989,130 +4546,101 @@ ] } }, - "description": "The rows to append to the dataset." - } - }, - "additionalProperties": false, - "required": [ - "rows" - ], - "title": "AppendRowsRequest" - }, - "CancelTrainingJobRequest": { - "type": "object", - "properties": { - "job_uuid": { - "type": "string", - "description": "The UUID of the job to cancel." - } - }, - "additionalProperties": false, - "required": [ - "job_uuid" - ], - "title": "CancelTrainingJobRequest" - }, - "AgentConfig": { - "type": "object", - "properties": { - "sampling_params": { - "$ref": "#/components/schemas/SamplingParams" + "description": "(Optional) List of functions to use." }, - "input_shields": { - "type": "array", - "items": { - "type": "string" - } + "logit_bias": { + "type": "object", + "additionalProperties": { + "type": "number" + }, + "description": "(Optional) The logit bias to use." }, - "output_shields": { - "type": "array", - "items": { - "type": "string" - } - }, - "toolgroups": { - "type": "array", - "items": { - "$ref": "#/components/schemas/AgentTool" - } - }, - "client_tools": { - "type": "array", - "items": { - "$ref": "#/components/schemas/ToolDef" - } - }, - "tool_choice": { - "type": "string", - "enum": [ - "auto", - "required", - "none" - ], - "title": "ToolChoice", - "description": "Whether tool use is required or automatic. This is a hint to the model which may not be followed. It depends on the Instruction Following capabilities of the model.", - "deprecated": true - }, - "tool_prompt_format": { - "type": "string", - "enum": [ - "json", - "function_tag", - "python_list" - ], - "title": "ToolPromptFormat", - "description": "Prompt format for calling custom / zero shot tools.", - "deprecated": true - }, - "tool_config": { - "$ref": "#/components/schemas/ToolConfig" - }, - "max_infer_iters": { - "type": "integer", - "default": 10 - }, - "model": { - "type": "string", - "description": "The model identifier to use for the agent" - }, - "instructions": { - "type": "string", - "description": "The system instructions for the agent" - }, - "name": { - "type": "string", - "description": "Optional name for the agent, used in telemetry and identification" - }, - "enable_session_persistence": { + "logprobs": { "type": "boolean", - "default": false, - "description": "Optional flag indicating whether session data has to be persisted" + "description": "(Optional) The log probabilities to use." + }, + "max_completion_tokens": { + "type": "integer", + "description": "(Optional) The maximum number of tokens to generate." + }, + "max_tokens": { + "type": "integer", + "description": "(Optional) The maximum number of tokens to generate." + }, + "n": { + "type": "integer", + "description": "(Optional) The number of completions to generate." + }, + "parallel_tool_calls": { + "type": "boolean", + "description": "(Optional) Whether to parallelize tool calls." + }, + "presence_penalty": { + "type": "number", + "description": "(Optional) The penalty for repeated tokens." }, "response_format": { - "$ref": "#/components/schemas/ResponseFormat", - "description": "Optional response format configuration" - } - }, - "additionalProperties": false, - "required": [ - "model", - "instructions" - ], - "title": "AgentConfig", - "description": "Configuration for an agent." - }, - "AgentTool": { - "oneOf": [ - { - "type": "string" + "$ref": "#/components/schemas/OpenAIResponseFormatParam", + "description": "(Optional) The response format to use." }, - { - "type": "object", - "properties": { - "name": { + "seed": { + "type": "integer", + "description": "(Optional) The seed to use." + }, + "stop": { + "oneOf": [ + { "type": "string" }, - "args": { + { + "type": "array", + "items": { + "type": "string" + } + } + ], + "description": "(Optional) The stop tokens to use." + }, + "stream": { + "type": "boolean", + "description": "(Optional) Whether to stream the response." + }, + "stream_options": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + }, + "description": "(Optional) The stream options to use." + }, + "temperature": { + "type": "number", + "description": "(Optional) The temperature to use." + }, + "tool_choice": { + "oneOf": [ + { + "type": "string" + }, + { "type": "object", "additionalProperties": { "oneOf": [ @@ -7137,1504 +4665,832 @@ ] } } - }, - "additionalProperties": false, - "required": [ - "name", - "args" ], - "title": "AgentToolGroupWithArgs" - } - ] - }, - "GrammarResponseFormat": { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "json_schema", - "grammar" - ], - "description": "Must be \"grammar\" to identify this format type", - "const": "grammar", - "default": "grammar" + "description": "(Optional) The tool choice to use." }, - "bnf": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "The BNF grammar specification the response should conform to" - } - }, - "additionalProperties": false, - "required": [ - "type", - "bnf" - ], - "title": "GrammarResponseFormat", - "description": "Configuration for grammar-guided response generation." - }, - "GreedySamplingStrategy": { - "type": "object", - "properties": { - "type": { - "type": "string", - "const": "greedy", - "default": "greedy", - "description": "Must be \"greedy\" to identify this sampling strategy" - } - }, - "additionalProperties": false, - "required": [ - "type" - ], - "title": "GreedySamplingStrategy", - "description": "Greedy sampling strategy that selects the highest probability token at each step." - }, - "JsonSchemaResponseFormat": { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "json_schema", - "grammar" - ], - "description": "Must be \"json_schema\" to identify this format type", - "const": "json_schema", - "default": "json_schema" - }, - "json_schema": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "The JSON schema the response should conform to. In a Python SDK, this is often a `pydantic` model." - } - }, - "additionalProperties": false, - "required": [ - "type", - "json_schema" - ], - "title": "JsonSchemaResponseFormat", - "description": "Configuration for JSON schema-guided response generation." - }, - "ResponseFormat": { - "oneOf": [ - { - "$ref": "#/components/schemas/JsonSchemaResponseFormat" - }, - { - "$ref": "#/components/schemas/GrammarResponseFormat" - } - ], - "discriminator": { - "propertyName": "type", - "mapping": { - "json_schema": "#/components/schemas/JsonSchemaResponseFormat", - "grammar": "#/components/schemas/GrammarResponseFormat" - } - } - }, - "SamplingParams": { - "type": "object", - "properties": { - "strategy": { - "oneOf": [ - { - "$ref": "#/components/schemas/GreedySamplingStrategy" - }, - { - "$ref": "#/components/schemas/TopPSamplingStrategy" - }, - { - "$ref": "#/components/schemas/TopKSamplingStrategy" - } - ], - "discriminator": { - "propertyName": "type", - "mapping": { - "greedy": "#/components/schemas/GreedySamplingStrategy", - "top_p": "#/components/schemas/TopPSamplingStrategy", - "top_k": "#/components/schemas/TopKSamplingStrategy" - } - }, - "description": "The sampling strategy." - }, - "max_tokens": { - "type": "integer", - "default": 0, - "description": "The maximum number of tokens that can be generated in the completion. The token count of your prompt plus max_tokens cannot exceed the model's context length." - }, - "repetition_penalty": { - "type": "number", - "default": 1.0, - "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics." - }, - "stop": { - "type": "array", - "items": { - "type": "string" - }, - "description": "Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence." - } - }, - "additionalProperties": false, - "required": [ - "strategy" - ], - "title": "SamplingParams", - "description": "Sampling parameters." - }, - "ToolConfig": { - "type": "object", - "properties": { - "tool_choice": { - "oneOf": [ - { - "type": "string", - "enum": [ - "auto", - "required", - "none" - ], - "title": "ToolChoice", - "description": "Whether tool use is required or automatic. This is a hint to the model which may not be followed. It depends on the Instruction Following capabilities of the model." - }, - { - "type": "string" - } - ], - "default": "auto", - "description": "(Optional) Whether tool use is automatic, required, or none. Can also specify a tool name to use a specific tool. Defaults to ToolChoice.auto." - }, - "tool_prompt_format": { - "type": "string", - "enum": [ - "json", - "function_tag", - "python_list" - ], - "description": "(Optional) Instructs the model how to format tool calls. By default, Llama Stack will attempt to use a format that is best adapted to the model. - `ToolPromptFormat.json`: The tool calls are formatted as a JSON object. - `ToolPromptFormat.function_tag`: The tool calls are enclosed in a tag. - `ToolPromptFormat.python_list`: The tool calls are output as Python syntax -- a list of function calls." - }, - "system_message_behavior": { - "type": "string", - "enum": [ - "append", - "replace" - ], - "description": "(Optional) Config for how to override the default system prompt. - `SystemMessageBehavior.append`: Appends the provided system message to the default system prompt. - `SystemMessageBehavior.replace`: Replaces the default system prompt with the provided system message. The system message can include the string '{{function_definitions}}' to indicate where the function definitions should be inserted.", - "default": "append" - } - }, - "additionalProperties": false, - "title": "ToolConfig", - "description": "Configuration for tool use." - }, - "ToolDef": { - "type": "object", - "properties": { - "name": { - "type": "string", - "description": "Name of the tool" - }, - "description": { - "type": "string", - "description": "(Optional) Human-readable description of what the tool does" - }, - "parameters": { - "type": "array", - "items": { - "$ref": "#/components/schemas/ToolParameter" - }, - "description": "(Optional) List of parameters this tool accepts" - }, - "metadata": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "(Optional) Additional metadata about the tool" - } - }, - "additionalProperties": false, - "required": [ - "name" - ], - "title": "ToolDef", - "description": "Tool definition used in runtime contexts." - }, - "ToolParameter": { - "type": "object", - "properties": { - "name": { - "type": "string", - "description": "Name of the parameter" - }, - "parameter_type": { - "type": "string", - "description": "Type of the parameter (e.g., string, integer)" - }, - "description": { - "type": "string", - "description": "Human-readable description of what the parameter does" - }, - "required": { - "type": "boolean", - "default": true, - "description": "Whether this parameter is required for tool invocation" - }, - "items": { - "type": "object", - "description": "Type of the elements when parameter_type is array" - }, - "title": { - "type": "string", - "description": "(Optional) Title of the parameter" - }, - "default": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ], - "description": "(Optional) Default value for the parameter if not provided" - } - }, - "additionalProperties": false, - "required": [ - "name", - "parameter_type", - "description", - "required" - ], - "title": "ToolParameter", - "description": "Parameter definition for a tool." - }, - "TopKSamplingStrategy": { - "type": "object", - "properties": { - "type": { - "type": "string", - "const": "top_k", - "default": "top_k", - "description": "Must be \"top_k\" to identify this sampling strategy" - }, - "top_k": { - "type": "integer", - "description": "Number of top tokens to consider for sampling. Must be at least 1" - } - }, - "additionalProperties": false, - "required": [ - "type", - "top_k" - ], - "title": "TopKSamplingStrategy", - "description": "Top-k sampling strategy that restricts sampling to the k most likely tokens." - }, - "TopPSamplingStrategy": { - "type": "object", - "properties": { - "type": { - "type": "string", - "const": "top_p", - "default": "top_p", - "description": "Must be \"top_p\" to identify this sampling strategy" - }, - "temperature": { - "type": "number", - "description": "Controls randomness in sampling. Higher values increase randomness" - }, - "top_p": { - "type": "number", - "default": 0.95, - "description": "Cumulative probability threshold for nucleus sampling. Defaults to 0.95" - } - }, - "additionalProperties": false, - "required": [ - "type" - ], - "title": "TopPSamplingStrategy", - "description": "Top-p (nucleus) sampling strategy that samples from the smallest set of tokens with cumulative probability >= p." - }, - "CreateAgentRequest": { - "type": "object", - "properties": { - "agent_config": { - "$ref": "#/components/schemas/AgentConfig", - "description": "The configuration for the agent." - } - }, - "additionalProperties": false, - "required": [ - "agent_config" - ], - "title": "CreateAgentRequest" - }, - "AgentCreateResponse": { - "type": "object", - "properties": { - "agent_id": { - "type": "string", - "description": "Unique identifier for the created agent" - } - }, - "additionalProperties": false, - "required": [ - "agent_id" - ], - "title": "AgentCreateResponse", - "description": "Response returned when creating a new agent." - }, - "CreateAgentSessionRequest": { - "type": "object", - "properties": { - "session_name": { - "type": "string", - "description": "The name of the session to create." - } - }, - "additionalProperties": false, - "required": [ - "session_name" - ], - "title": "CreateAgentSessionRequest" - }, - "AgentSessionCreateResponse": { - "type": "object", - "properties": { - "session_id": { - "type": "string", - "description": "Unique identifier for the created session" - } - }, - "additionalProperties": false, - "required": [ - "session_id" - ], - "title": "AgentSessionCreateResponse", - "description": "Response returned when creating a new agent session." - }, - "ImageContentItem": { - "type": "object", - "properties": { - "type": { - "type": "string", - "const": "image", - "default": "image", - "description": "Discriminator type of the content item. Always \"image\"" - }, - "image": { - "type": "object", - "properties": { - "url": { - "$ref": "#/components/schemas/URL", - "description": "A URL of the image or data URL in the format of data:image/{type};base64,{data}. Note that URL could have length limits." - }, - "data": { - "type": "string", - "contentEncoding": "base64", - "description": "base64 encoded image data as string" - } - }, - "additionalProperties": false, - "description": "Image as a base64 encoded string or an URL" - } - }, - "additionalProperties": false, - "required": [ - "type", - "image" - ], - "title": "ImageContentItem", - "description": "A image content item" - }, - "InterleavedContent": { - "oneOf": [ - { - "type": "string" - }, - { - "$ref": "#/components/schemas/InterleavedContentItem" - }, - { - "type": "array", - "items": { - "$ref": "#/components/schemas/InterleavedContentItem" - } - } - ] - }, - "InterleavedContentItem": { - "oneOf": [ - { - "$ref": "#/components/schemas/ImageContentItem" - }, - { - "$ref": "#/components/schemas/TextContentItem" - } - ], - "discriminator": { - "propertyName": "type", - "mapping": { - "image": "#/components/schemas/ImageContentItem", - "text": "#/components/schemas/TextContentItem" - } - } - }, - "TextContentItem": { - "type": "object", - "properties": { - "type": { - "type": "string", - "const": "text", - "default": "text", - "description": "Discriminator type of the content item. Always \"text\"" - }, - "text": { - "type": "string", - "description": "Text content" - } - }, - "additionalProperties": false, - "required": [ - "type", - "text" - ], - "title": "TextContentItem", - "description": "A text content item" - }, - "ToolResponseMessage": { - "type": "object", - "properties": { - "role": { - "type": "string", - "const": "tool", - "default": "tool", - "description": "Must be \"tool\" to identify this as a tool response" - }, - "call_id": { - "type": "string", - "description": "Unique identifier for the tool call this response is for" - }, - "content": { - "$ref": "#/components/schemas/InterleavedContent", - "description": "The response content from the tool" - } - }, - "additionalProperties": false, - "required": [ - "role", - "call_id", - "content" - ], - "title": "ToolResponseMessage", - "description": "A message representing the result of a tool invocation." - }, - "URL": { - "type": "object", - "properties": { - "uri": { - "type": "string", - "description": "The URL string pointing to the resource" - } - }, - "additionalProperties": false, - "required": [ - "uri" - ], - "title": "URL", - "description": "A URL reference to external content." - }, - "UserMessage": { - "type": "object", - "properties": { - "role": { - "type": "string", - "const": "user", - "default": "user", - "description": "Must be \"user\" to identify this as a user message" - }, - "content": { - "$ref": "#/components/schemas/InterleavedContent", - "description": "The content of the message, which can include text and other media" - }, - "context": { - "$ref": "#/components/schemas/InterleavedContent", - "description": "(Optional) This field is used internally by Llama Stack to pass RAG context. This field may be removed in the API in the future." - } - }, - "additionalProperties": false, - "required": [ - "role", - "content" - ], - "title": "UserMessage", - "description": "A message from the user in a chat conversation." - }, - "CreateAgentTurnRequest": { - "type": "object", - "properties": { - "messages": { - "type": "array", - "items": { - "oneOf": [ - { - "$ref": "#/components/schemas/UserMessage" - }, - { - "$ref": "#/components/schemas/ToolResponseMessage" - } - ] - }, - "description": "List of messages to start the turn with." - }, - "stream": { - "type": "boolean", - "description": "(Optional) If True, generate an SSE event stream of the response. Defaults to False." - }, - "documents": { + "tools": { "type": "array", "items": { "type": "object", - "properties": { - "content": { - "oneOf": [ - { - "type": "string" - }, - { - "$ref": "#/components/schemas/InterleavedContentItem" - }, - { - "type": "array", - "items": { - "$ref": "#/components/schemas/InterleavedContentItem" - } - }, - { - "$ref": "#/components/schemas/URL" - } - ], - "description": "The content of the document." - }, - "mime_type": { - "type": "string", - "description": "The MIME type of the document." - } - }, - "additionalProperties": false, - "required": [ - "content", - "mime_type" - ], - "title": "Document", - "description": "A document to be used by an agent." + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + } }, - "description": "(Optional) List of documents to create the turn with." + "description": "(Optional) The tools to use." }, - "toolgroups": { - "type": "array", - "items": { - "$ref": "#/components/schemas/AgentTool" - }, - "description": "(Optional) List of toolgroups to create the turn with, will be used in addition to the agent's config toolgroups for the request." + "top_logprobs": { + "type": "integer", + "description": "(Optional) The top log probabilities to use." }, - "tool_config": { - "$ref": "#/components/schemas/ToolConfig", - "description": "(Optional) The tool configuration to create the turn with, will be used to override the agent's tool_config." + "top_p": { + "type": "number", + "description": "(Optional) The top p to use." + }, + "user": { + "type": "string", + "description": "(Optional) The user to use." } }, "additionalProperties": false, "required": [ + "model", "messages" ], - "title": "CreateAgentTurnRequest" + "title": "OpenaiChatCompletionRequest" }, - "CompletionMessage": { + "OpenAIChatCompletion": { "type": "object", "properties": { + "id": { + "type": "string", + "description": "The ID of the chat completion" + }, + "choices": { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIChoice" + }, + "description": "List of choices" + }, + "object": { + "type": "string", + "const": "chat.completion", + "default": "chat.completion", + "description": "The object type, which will be \"chat.completion\"" + }, + "created": { + "type": "integer", + "description": "The Unix timestamp in seconds when the chat completion was created" + }, + "model": { + "type": "string", + "description": "The model that was used to generate the chat completion" + } + }, + "additionalProperties": false, + "required": [ + "id", + "choices", + "object", + "created", + "model" + ], + "title": "OpenAIChatCompletion", + "description": "Response from an OpenAI-compatible chat completion request." + }, + "OpenAIChatCompletionChunk": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The ID of the chat completion" + }, + "choices": { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIChunkChoice" + }, + "description": "List of choices" + }, + "object": { + "type": "string", + "const": "chat.completion.chunk", + "default": "chat.completion.chunk", + "description": "The object type, which will be \"chat.completion.chunk\"" + }, + "created": { + "type": "integer", + "description": "The Unix timestamp in seconds when the chat completion was created" + }, + "model": { + "type": "string", + "description": "The model that was used to generate the chat completion" + } + }, + "additionalProperties": false, + "required": [ + "id", + "choices", + "object", + "created", + "model" + ], + "title": "OpenAIChatCompletionChunk", + "description": "Chunk from a streaming response to an OpenAI-compatible chat completion request." + }, + "OpenAIChoiceDelta": { + "type": "object", + "properties": { + "content": { + "type": "string", + "description": "(Optional) The content of the delta" + }, + "refusal": { + "type": "string", + "description": "(Optional) The refusal of the delta" + }, "role": { "type": "string", - "const": "assistant", - "default": "assistant", - "description": "Must be \"assistant\" to identify this as the model's response" - }, - "content": { - "$ref": "#/components/schemas/InterleavedContent", - "description": "The content of the model's response" - }, - "stop_reason": { - "type": "string", - "enum": [ - "end_of_turn", - "end_of_message", - "out_of_tokens" - ], - "description": "Reason why the model stopped generating. Options are: - `StopReason.end_of_turn`: The model finished generating the entire response. - `StopReason.end_of_message`: The model finished generating but generated a partial response -- usually, a tool call. The user may call the tool and continue the conversation with the tool's response. - `StopReason.out_of_tokens`: The model ran out of token budget." + "description": "(Optional) The role of the delta" }, "tool_calls": { "type": "array", "items": { - "$ref": "#/components/schemas/ToolCall" + "$ref": "#/components/schemas/OpenAIChatCompletionToolCall" }, - "description": "List of tool calls. Each tool call is a ToolCall object." + "description": "(Optional) The tool calls of the delta" + } + }, + "additionalProperties": false, + "title": "OpenAIChoiceDelta", + "description": "A delta from an OpenAI-compatible chat completion streaming response." + }, + "OpenAIChunkChoice": { + "type": "object", + "properties": { + "delta": { + "$ref": "#/components/schemas/OpenAIChoiceDelta", + "description": "The delta from the chunk" + }, + "finish_reason": { + "type": "string", + "description": "The reason the model stopped generating" + }, + "index": { + "type": "integer", + "description": "The index of the choice" + }, + "logprobs": { + "$ref": "#/components/schemas/OpenAIChoiceLogprobs", + "description": "(Optional) The log probabilities for the tokens in the message" } }, "additionalProperties": false, "required": [ - "role", - "content", - "stop_reason" + "delta", + "finish_reason", + "index" ], - "title": "CompletionMessage", - "description": "A message containing the model's (assistant) response in a chat conversation." + "title": "OpenAIChunkChoice", + "description": "A chunk choice from an OpenAI-compatible chat completion streaming response." }, - "InferenceStep": { + "OpenAICompletionWithInputMessages": { "type": "object", "properties": { - "turn_id": { + "id": { "type": "string", - "description": "The ID of the turn." + "description": "The ID of the chat completion" }, - "step_id": { - "type": "string", - "description": "The ID of the step." - }, - "started_at": { - "type": "string", - "format": "date-time", - "description": "The time the step started." - }, - "completed_at": { - "type": "string", - "format": "date-time", - "description": "The time the step completed." - }, - "step_type": { - "type": "string", - "enum": [ - "inference", - "tool_execution", - "shield_call", - "memory_retrieval" - ], - "title": "StepType", - "description": "Type of the step in an agent turn.", - "const": "inference", - "default": "inference" - }, - "model_response": { - "$ref": "#/components/schemas/CompletionMessage", - "description": "The response from the LLM." - } - }, - "additionalProperties": false, - "required": [ - "turn_id", - "step_id", - "step_type", - "model_response" - ], - "title": "InferenceStep", - "description": "An inference step in an agent turn." - }, - "MemoryRetrievalStep": { - "type": "object", - "properties": { - "turn_id": { - "type": "string", - "description": "The ID of the turn." - }, - "step_id": { - "type": "string", - "description": "The ID of the step." - }, - "started_at": { - "type": "string", - "format": "date-time", - "description": "The time the step started." - }, - "completed_at": { - "type": "string", - "format": "date-time", - "description": "The time the step completed." - }, - "step_type": { - "type": "string", - "enum": [ - "inference", - "tool_execution", - "shield_call", - "memory_retrieval" - ], - "title": "StepType", - "description": "Type of the step in an agent turn.", - "const": "memory_retrieval", - "default": "memory_retrieval" - }, - "vector_db_ids": { - "type": "string", - "description": "The IDs of the vector databases to retrieve context from." - }, - "inserted_context": { - "$ref": "#/components/schemas/InterleavedContent", - "description": "The context retrieved from the vector databases." - } - }, - "additionalProperties": false, - "required": [ - "turn_id", - "step_id", - "step_type", - "vector_db_ids", - "inserted_context" - ], - "title": "MemoryRetrievalStep", - "description": "A memory retrieval step in an agent turn." - }, - "SafetyViolation": { - "type": "object", - "properties": { - "violation_level": { - "$ref": "#/components/schemas/ViolationLevel", - "description": "Severity level of the violation" - }, - "user_message": { - "type": "string", - "description": "(Optional) Message to convey to the user about the violation" - }, - "metadata": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "Additional metadata including specific violation codes for debugging and telemetry" - } - }, - "additionalProperties": false, - "required": [ - "violation_level", - "metadata" - ], - "title": "SafetyViolation", - "description": "Details of a safety violation detected by content moderation." - }, - "ShieldCallStep": { - "type": "object", - "properties": { - "turn_id": { - "type": "string", - "description": "The ID of the turn." - }, - "step_id": { - "type": "string", - "description": "The ID of the step." - }, - "started_at": { - "type": "string", - "format": "date-time", - "description": "The time the step started." - }, - "completed_at": { - "type": "string", - "format": "date-time", - "description": "The time the step completed." - }, - "step_type": { - "type": "string", - "enum": [ - "inference", - "tool_execution", - "shield_call", - "memory_retrieval" - ], - "title": "StepType", - "description": "Type of the step in an agent turn.", - "const": "shield_call", - "default": "shield_call" - }, - "violation": { - "$ref": "#/components/schemas/SafetyViolation", - "description": "The violation from the shield call." - } - }, - "additionalProperties": false, - "required": [ - "turn_id", - "step_id", - "step_type" - ], - "title": "ShieldCallStep", - "description": "A shield call step in an agent turn." - }, - "ToolCall": { - "type": "object", - "properties": { - "call_id": { - "type": "string" - }, - "tool_name": { - "oneOf": [ - { - "type": "string", - "enum": [ - "brave_search", - "wolfram_alpha", - "photogen", - "code_interpreter" - ], - "title": "BuiltinTool" - }, - { - "type": "string" - } - ] - }, - "arguments": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - }, - { - "type": "boolean" - }, - { - "type": "null" - }, - { - "type": "array", - "items": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - }, - { - "type": "boolean" - }, - { - "type": "null" - } - ] - } - }, - { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "number" - }, - { - "type": "boolean" - }, - { - "type": "null" - } - ] - } - } - ] - } - } - ] - }, - "arguments_json": { - "type": "string" - } - }, - "additionalProperties": false, - "required": [ - "call_id", - "tool_name", - "arguments" - ], - "title": "ToolCall" - }, - "ToolExecutionStep": { - "type": "object", - "properties": { - "turn_id": { - "type": "string", - "description": "The ID of the turn." - }, - "step_id": { - "type": "string", - "description": "The ID of the step." - }, - "started_at": { - "type": "string", - "format": "date-time", - "description": "The time the step started." - }, - "completed_at": { - "type": "string", - "format": "date-time", - "description": "The time the step completed." - }, - "step_type": { - "type": "string", - "enum": [ - "inference", - "tool_execution", - "shield_call", - "memory_retrieval" - ], - "title": "StepType", - "description": "Type of the step in an agent turn.", - "const": "tool_execution", - "default": "tool_execution" - }, - "tool_calls": { + "choices": { "type": "array", "items": { - "$ref": "#/components/schemas/ToolCall" + "$ref": "#/components/schemas/OpenAIChoice" }, - "description": "The tool calls to execute." + "description": "List of choices" }, - "tool_responses": { - "type": "array", - "items": { - "$ref": "#/components/schemas/ToolResponse" - }, - "description": "The tool responses from the tool calls." - } - }, - "additionalProperties": false, - "required": [ - "turn_id", - "step_id", - "step_type", - "tool_calls", - "tool_responses" - ], - "title": "ToolExecutionStep", - "description": "A tool execution step in an agent turn." - }, - "ToolResponse": { - "type": "object", - "properties": { - "call_id": { + "object": { "type": "string", - "description": "Unique identifier for the tool call this response is for" + "const": "chat.completion", + "default": "chat.completion", + "description": "The object type, which will be \"chat.completion\"" }, - "tool_name": { - "oneOf": [ - { - "type": "string", - "enum": [ - "brave_search", - "wolfram_alpha", - "photogen", - "code_interpreter" - ], - "title": "BuiltinTool" - }, - { - "type": "string" - } - ], - "description": "Name of the tool that was invoked" + "created": { + "type": "integer", + "description": "The Unix timestamp in seconds when the chat completion was created" }, - "content": { - "$ref": "#/components/schemas/InterleavedContent", - "description": "The response content from the tool" - }, - "metadata": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "(Optional) Additional metadata about the tool response" - } - }, - "additionalProperties": false, - "required": [ - "call_id", - "tool_name", - "content" - ], - "title": "ToolResponse", - "description": "Response from a tool invocation." - }, - "Turn": { - "type": "object", - "properties": { - "turn_id": { + "model": { "type": "string", - "description": "Unique identifier for the turn within a session" - }, - "session_id": { - "type": "string", - "description": "Unique identifier for the conversation session" + "description": "The model that was used to generate the chat completion" }, "input_messages": { "type": "array", "items": { + "$ref": "#/components/schemas/OpenAIMessageParam" + } + } + }, + "additionalProperties": false, + "required": [ + "id", + "choices", + "object", + "created", + "model", + "input_messages" + ], + "title": "OpenAICompletionWithInputMessages" + }, + "OpenaiCompletionRequest": { + "type": "object", + "properties": { + "model": { + "type": "string", + "description": "The identifier of the model to use. The model must be registered with Llama Stack and available via the /models endpoint." + }, + "prompt": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "type": "string" + } + }, + { + "type": "array", + "items": { + "type": "integer" + } + }, + { + "type": "array", + "items": { + "type": "array", + "items": { + "type": "integer" + } + } + } + ], + "description": "The prompt to generate a completion for." + }, + "best_of": { + "type": "integer", + "description": "(Optional) The number of completions to generate." + }, + "echo": { + "type": "boolean", + "description": "(Optional) Whether to echo the prompt." + }, + "frequency_penalty": { + "type": "number", + "description": "(Optional) The penalty for repeated tokens." + }, + "logit_bias": { + "type": "object", + "additionalProperties": { + "type": "number" + }, + "description": "(Optional) The logit bias to use." + }, + "logprobs": { + "type": "boolean", + "description": "(Optional) The log probabilities to use." + }, + "max_tokens": { + "type": "integer", + "description": "(Optional) The maximum number of tokens to generate." + }, + "n": { + "type": "integer", + "description": "(Optional) The number of completions to generate." + }, + "presence_penalty": { + "type": "number", + "description": "(Optional) The penalty for repeated tokens." + }, + "seed": { + "type": "integer", + "description": "(Optional) The seed to use." + }, + "stop": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "type": "string" + } + } + ], + "description": "(Optional) The stop tokens to use." + }, + "stream": { + "type": "boolean", + "description": "(Optional) Whether to stream the response." + }, + "stream_options": { + "type": "object", + "additionalProperties": { "oneOf": [ { - "$ref": "#/components/schemas/UserMessage" + "type": "null" }, { - "$ref": "#/components/schemas/ToolResponseMessage" + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" } ] }, - "description": "List of messages that initiated this turn" + "description": "(Optional) The stream options to use." }, - "steps": { + "temperature": { + "type": "number", + "description": "(Optional) The temperature to use." + }, + "top_p": { + "type": "number", + "description": "(Optional) The top p to use." + }, + "user": { + "type": "string", + "description": "(Optional) The user to use." + }, + "guided_choice": { "type": "array", "items": { - "oneOf": [ - { - "$ref": "#/components/schemas/InferenceStep" - }, - { - "$ref": "#/components/schemas/ToolExecutionStep" - }, - { - "$ref": "#/components/schemas/ShieldCallStep" - }, - { - "$ref": "#/components/schemas/MemoryRetrievalStep" - } - ], - "discriminator": { - "propertyName": "step_type", - "mapping": { - "inference": "#/components/schemas/InferenceStep", - "tool_execution": "#/components/schemas/ToolExecutionStep", - "shield_call": "#/components/schemas/ShieldCallStep", - "memory_retrieval": "#/components/schemas/MemoryRetrievalStep" - } - } - }, - "description": "Ordered list of processing steps executed during this turn" + "type": "string" + } }, - "output_message": { - "$ref": "#/components/schemas/CompletionMessage", - "description": "The model's generated response containing content and metadata" + "prompt_logprobs": { + "type": "integer" }, - "output_attachments": { - "type": "array", - "items": { - "type": "object", - "properties": { - "content": { - "oneOf": [ - { - "type": "string" - }, - { - "$ref": "#/components/schemas/InterleavedContentItem" - }, - { - "type": "array", - "items": { - "$ref": "#/components/schemas/InterleavedContentItem" - } - }, - { - "$ref": "#/components/schemas/URL" - } - ], - "description": "The content of the attachment." - }, - "mime_type": { - "type": "string", - "description": "The MIME type of the attachment." - } - }, - "additionalProperties": false, - "required": [ - "content", - "mime_type" - ], - "title": "Attachment", - "description": "An attachment to an agent turn." - }, - "description": "(Optional) Files or media attached to the agent's response" - }, - "started_at": { + "suffix": { "type": "string", - "format": "date-time", - "description": "Timestamp when the turn began" - }, - "completed_at": { - "type": "string", - "format": "date-time", - "description": "(Optional) Timestamp when the turn finished, if completed" + "description": "(Optional) The suffix that should be appended to the completion." } }, "additionalProperties": false, "required": [ - "turn_id", - "session_id", - "input_messages", - "steps", - "output_message", - "started_at" + "model", + "prompt" ], - "title": "Turn", - "description": "A single turn in an interaction with an Agentic System." + "title": "OpenaiCompletionRequest" }, - "ViolationLevel": { + "OpenAICompletion": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "choices": { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAICompletionChoice" + } + }, + "created": { + "type": "integer" + }, + "model": { + "type": "string" + }, + "object": { + "type": "string", + "const": "text_completion", + "default": "text_completion" + } + }, + "additionalProperties": false, + "required": [ + "id", + "choices", + "created", + "model", + "object" + ], + "title": "OpenAICompletion", + "description": "Response from an OpenAI-compatible completion request." + }, + "OpenAICompletionChoice": { + "type": "object", + "properties": { + "finish_reason": { + "type": "string" + }, + "text": { + "type": "string" + }, + "index": { + "type": "integer" + }, + "logprobs": { + "$ref": "#/components/schemas/OpenAIChoiceLogprobs" + } + }, + "additionalProperties": false, + "required": [ + "finish_reason", + "text", + "index" + ], + "title": "OpenAICompletionChoice", + "description": "A choice from an OpenAI-compatible completion response." + }, + "OpenaiEmbeddingsRequest": { + "type": "object", + "properties": { + "model": { + "type": "string", + "description": "The identifier of the model to use. The model must be an embedding model registered with Llama Stack and available via the /models endpoint." + }, + "input": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "type": "string" + } + } + ], + "description": "Input text to embed, encoded as a string or array of strings. To embed multiple inputs in a single request, pass an array of strings." + }, + "encoding_format": { + "type": "string", + "description": "(Optional) The format to return the embeddings in. Can be either \"float\" or \"base64\". Defaults to \"float\"." + }, + "dimensions": { + "type": "integer", + "description": "(Optional) The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3 and later models." + }, + "user": { + "type": "string", + "description": "(Optional) A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse." + } + }, + "additionalProperties": false, + "required": [ + "model", + "input" + ], + "title": "OpenaiEmbeddingsRequest" + }, + "OpenAIEmbeddingData": { + "type": "object", + "properties": { + "object": { + "type": "string", + "const": "embedding", + "default": "embedding", + "description": "The object type, which will be \"embedding\"" + }, + "embedding": { + "oneOf": [ + { + "type": "array", + "items": { + "type": "number" + } + }, + { + "type": "string" + } + ], + "description": "The embedding vector as a list of floats (when encoding_format=\"float\") or as a base64-encoded string (when encoding_format=\"base64\")" + }, + "index": { + "type": "integer", + "description": "The index of the embedding in the input list" + } + }, + "additionalProperties": false, + "required": [ + "object", + "embedding", + "index" + ], + "title": "OpenAIEmbeddingData", + "description": "A single embedding data object from an OpenAI-compatible embeddings response." + }, + "OpenAIEmbeddingUsage": { + "type": "object", + "properties": { + "prompt_tokens": { + "type": "integer", + "description": "The number of tokens in the input" + }, + "total_tokens": { + "type": "integer", + "description": "The total number of tokens used" + } + }, + "additionalProperties": false, + "required": [ + "prompt_tokens", + "total_tokens" + ], + "title": "OpenAIEmbeddingUsage", + "description": "Usage information for an OpenAI-compatible embeddings response." + }, + "OpenAIEmbeddingsResponse": { + "type": "object", + "properties": { + "object": { + "type": "string", + "const": "list", + "default": "list", + "description": "The object type, which will be \"list\"" + }, + "data": { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIEmbeddingData" + }, + "description": "List of embedding data objects" + }, + "model": { + "type": "string", + "description": "The model that was used to generate the embeddings" + }, + "usage": { + "$ref": "#/components/schemas/OpenAIEmbeddingUsage", + "description": "Usage information" + } + }, + "additionalProperties": false, + "required": [ + "object", + "data", + "model", + "usage" + ], + "title": "OpenAIEmbeddingsResponse", + "description": "Response from an OpenAI-compatible embeddings request." + }, + "OpenAIFilePurpose": { "type": "string", "enum": [ - "info", - "warn", - "error" + "assistants", + "batch" ], - "title": "ViolationLevel", - "description": "Severity level of a safety violation." + "title": "OpenAIFilePurpose", + "description": "Valid purpose values for OpenAI Files API." }, - "AgentTurnResponseEvent": { + "ListOpenAIFileResponse": { "type": "object", "properties": { - "payload": { - "oneOf": [ - { - "$ref": "#/components/schemas/AgentTurnResponseStepStartPayload" - }, - { - "$ref": "#/components/schemas/AgentTurnResponseStepProgressPayload" - }, - { - "$ref": "#/components/schemas/AgentTurnResponseStepCompletePayload" - }, - { - "$ref": "#/components/schemas/AgentTurnResponseTurnStartPayload" - }, - { - "$ref": "#/components/schemas/AgentTurnResponseTurnCompletePayload" - }, - { - "$ref": "#/components/schemas/AgentTurnResponseTurnAwaitingInputPayload" - } - ], - "discriminator": { - "propertyName": "event_type", - "mapping": { - "step_start": "#/components/schemas/AgentTurnResponseStepStartPayload", - "step_progress": "#/components/schemas/AgentTurnResponseStepProgressPayload", - "step_complete": "#/components/schemas/AgentTurnResponseStepCompletePayload", - "turn_start": "#/components/schemas/AgentTurnResponseTurnStartPayload", - "turn_complete": "#/components/schemas/AgentTurnResponseTurnCompletePayload", - "turn_awaiting_input": "#/components/schemas/AgentTurnResponseTurnAwaitingInputPayload" - } + "data": { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIFileObject" }, - "description": "Event-specific payload containing event data" + "description": "List of file objects" + }, + "has_more": { + "type": "boolean", + "description": "Whether there are more files available beyond this page" + }, + "first_id": { + "type": "string", + "description": "ID of the first file in the list for pagination" + }, + "last_id": { + "type": "string", + "description": "ID of the last file in the list for pagination" + }, + "object": { + "type": "string", + "const": "list", + "default": "list", + "description": "The object type, which is always \"list\"" } }, "additionalProperties": false, "required": [ - "payload" + "data", + "has_more", + "first_id", + "last_id", + "object" ], - "title": "AgentTurnResponseEvent", - "description": "An event in an agent turn response stream." + "title": "ListOpenAIFileResponse", + "description": "Response for listing files in OpenAI Files API." }, - "AgentTurnResponseStepCompletePayload": { + "OpenAIFileObject": { "type": "object", "properties": { - "event_type": { + "object": { + "type": "string", + "const": "file", + "default": "file", + "description": "The object type, which is always \"file\"" + }, + "id": { + "type": "string", + "description": "The file identifier, which can be referenced in the API endpoints" + }, + "bytes": { + "type": "integer", + "description": "The size of the file, in bytes" + }, + "created_at": { + "type": "integer", + "description": "The Unix timestamp (in seconds) for when the file was created" + }, + "expires_at": { + "type": "integer", + "description": "The Unix timestamp (in seconds) for when the file expires" + }, + "filename": { + "type": "string", + "description": "The name of the file" + }, + "purpose": { "type": "string", "enum": [ - "step_start", - "step_complete", - "step_progress", - "turn_start", - "turn_complete", - "turn_awaiting_input" + "assistants", + "batch" ], - "const": "step_complete", - "default": "step_complete", - "description": "Type of event being reported" - }, - "step_type": { - "type": "string", - "enum": [ - "inference", - "tool_execution", - "shield_call", - "memory_retrieval" - ], - "description": "Type of step being executed" - }, - "step_id": { - "type": "string", - "description": "Unique identifier for the step within a turn" - }, - "step_details": { - "oneOf": [ - { - "$ref": "#/components/schemas/InferenceStep" - }, - { - "$ref": "#/components/schemas/ToolExecutionStep" - }, - { - "$ref": "#/components/schemas/ShieldCallStep" - }, - { - "$ref": "#/components/schemas/MemoryRetrievalStep" - } - ], - "discriminator": { - "propertyName": "step_type", - "mapping": { - "inference": "#/components/schemas/InferenceStep", - "tool_execution": "#/components/schemas/ToolExecutionStep", - "shield_call": "#/components/schemas/ShieldCallStep", - "memory_retrieval": "#/components/schemas/MemoryRetrievalStep" - } - }, - "description": "Complete details of the executed step" + "description": "The intended purpose of the file" } }, "additionalProperties": false, "required": [ - "event_type", - "step_type", - "step_id", - "step_details" + "object", + "id", + "bytes", + "created_at", + "expires_at", + "filename", + "purpose" ], - "title": "AgentTurnResponseStepCompletePayload", - "description": "Payload for step completion events in agent turn responses." + "title": "OpenAIFileObject", + "description": "OpenAI File object as defined in the OpenAI Files API." }, - "AgentTurnResponseStepProgressPayload": { + "ExpiresAfter": { "type": "object", "properties": { - "event_type": { + "anchor": { "type": "string", - "enum": [ - "step_start", - "step_complete", - "step_progress", - "turn_start", - "turn_complete", - "turn_awaiting_input" - ], - "const": "step_progress", - "default": "step_progress", - "description": "Type of event being reported" + "const": "created_at" }, - "step_type": { - "type": "string", - "enum": [ - "inference", - "tool_execution", - "shield_call", - "memory_retrieval" - ], - "description": "Type of step being executed" - }, - "step_id": { - "type": "string", - "description": "Unique identifier for the step within a turn" - }, - "delta": { - "oneOf": [ - { - "$ref": "#/components/schemas/TextDelta" - }, - { - "$ref": "#/components/schemas/ImageDelta" - }, - { - "$ref": "#/components/schemas/ToolCallDelta" - } - ], - "discriminator": { - "propertyName": "type", - "mapping": { - "text": "#/components/schemas/TextDelta", - "image": "#/components/schemas/ImageDelta", - "tool_call": "#/components/schemas/ToolCallDelta" - } - }, - "description": "Incremental content changes during step execution" + "seconds": { + "type": "integer" } }, "additionalProperties": false, "required": [ - "event_type", - "step_type", - "step_id", - "delta" + "anchor", + "seconds" ], - "title": "AgentTurnResponseStepProgressPayload", - "description": "Payload for step progress events in agent turn responses." + "title": "ExpiresAfter", + "description": "Control expiration of uploaded files.\nParams:\n - anchor, must be \"created_at\"\n - seconds, must be int between 3600 and 2592000 (1 hour to 30 days)" }, - "AgentTurnResponseStepStartPayload": { + "OpenAIFileDeleteResponse": { "type": "object", "properties": { - "event_type": { + "id": { + "type": "string", + "description": "The file identifier that was deleted" + }, + "object": { + "type": "string", + "const": "file", + "default": "file", + "description": "The object type, which is always \"file\"" + }, + "deleted": { + "type": "boolean", + "description": "Whether the file was successfully deleted" + } + }, + "additionalProperties": false, + "required": [ + "id", + "object", + "deleted" + ], + "title": "OpenAIFileDeleteResponse", + "description": "Response for deleting a file in OpenAI Files API." + }, + "Response": { + "type": "object", + "title": "Response" + }, + "HealthInfo": { + "type": "object", + "properties": { + "status": { "type": "string", "enum": [ - "step_start", - "step_complete", - "step_progress", - "turn_start", - "turn_complete", - "turn_awaiting_input" + "OK", + "Error", + "Not Implemented" ], - "const": "step_start", - "default": "step_start", - "description": "Type of event being reported" + "description": "Current health status of the service" + } + }, + "additionalProperties": false, + "required": [ + "status" + ], + "title": "HealthInfo", + "description": "Health status information for the service." + }, + "RouteInfo": { + "type": "object", + "properties": { + "route": { + "type": "string", + "description": "The API endpoint path" }, - "step_type": { + "method": { + "type": "string", + "description": "HTTP method for the route" + }, + "provider_types": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of provider types that implement this route" + } + }, + "additionalProperties": false, + "required": [ + "route", + "method", + "provider_types" + ], + "title": "RouteInfo", + "description": "Information about an API route including its path, method, and implementing providers." + }, + "ListRoutesResponse": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/components/schemas/RouteInfo" + }, + "description": "List of available route information objects" + } + }, + "additionalProperties": false, + "required": [ + "data" + ], + "title": "ListRoutesResponse", + "description": "Response containing a list of all available API routes." + }, + "Model": { + "type": "object", + "properties": { + "identifier": { + "type": "string", + "description": "Unique identifier for this resource in llama stack" + }, + "provider_resource_id": { + "type": "string", + "description": "Unique identifier for this resource in the provider" + }, + "provider_id": { + "type": "string", + "description": "ID of the provider that owns this resource" + }, + "type": { "type": "string", "enum": [ - "inference", - "tool_execution", - "shield_call", - "memory_retrieval" + "model", + "shield", + "vector_db", + "dataset", + "scoring_function", + "benchmark", + "tool", + "tool_group", + "prompt" ], - "description": "Type of step being executed" - }, - "step_id": { - "type": "string", - "description": "Unique identifier for the step within a turn" + "const": "model", + "default": "model", + "description": "The resource type, always 'model' for model resources" }, "metadata": { "type": "object", @@ -8660,207 +5516,484 @@ } ] }, - "description": "(Optional) Additional metadata for the step" - } - }, - "additionalProperties": false, - "required": [ - "event_type", - "step_type", - "step_id" - ], - "title": "AgentTurnResponseStepStartPayload", - "description": "Payload for step start events in agent turn responses." - }, - "AgentTurnResponseStreamChunk": { - "type": "object", - "properties": { - "event": { - "$ref": "#/components/schemas/AgentTurnResponseEvent", - "description": "Individual event in the agent turn response stream" - } - }, - "additionalProperties": false, - "required": [ - "event" - ], - "title": "AgentTurnResponseStreamChunk", - "description": "Streamed agent turn completion response." - }, - "AgentTurnResponseTurnAwaitingInputPayload": { - "type": "object", - "properties": { - "event_type": { - "type": "string", - "enum": [ - "step_start", - "step_complete", - "step_progress", - "turn_start", - "turn_complete", - "turn_awaiting_input" - ], - "const": "turn_awaiting_input", - "default": "turn_awaiting_input", - "description": "Type of event being reported" + "description": "Any additional metadata for this model" }, - "turn": { - "$ref": "#/components/schemas/Turn", - "description": "Turn data when waiting for external tool responses" - } - }, - "additionalProperties": false, - "required": [ - "event_type", - "turn" - ], - "title": "AgentTurnResponseTurnAwaitingInputPayload", - "description": "Payload for turn awaiting input events in agent turn responses." - }, - "AgentTurnResponseTurnCompletePayload": { - "type": "object", - "properties": { - "event_type": { - "type": "string", - "enum": [ - "step_start", - "step_complete", - "step_progress", - "turn_start", - "turn_complete", - "turn_awaiting_input" - ], - "const": "turn_complete", - "default": "turn_complete", - "description": "Type of event being reported" - }, - "turn": { - "$ref": "#/components/schemas/Turn", - "description": "Complete turn data including all steps and results" - } - }, - "additionalProperties": false, - "required": [ - "event_type", - "turn" - ], - "title": "AgentTurnResponseTurnCompletePayload", - "description": "Payload for turn completion events in agent turn responses." - }, - "AgentTurnResponseTurnStartPayload": { - "type": "object", - "properties": { - "event_type": { - "type": "string", - "enum": [ - "step_start", - "step_complete", - "step_progress", - "turn_start", - "turn_complete", - "turn_awaiting_input" - ], - "const": "turn_start", - "default": "turn_start", - "description": "Type of event being reported" - }, - "turn_id": { - "type": "string", - "description": "Unique identifier for the turn within a session" - } - }, - "additionalProperties": false, - "required": [ - "event_type", - "turn_id" - ], - "title": "AgentTurnResponseTurnStartPayload", - "description": "Payload for turn start events in agent turn responses." - }, - "ImageDelta": { - "type": "object", - "properties": { - "type": { - "type": "string", - "const": "image", - "default": "image", - "description": "Discriminator type of the delta. Always \"image\"" - }, - "image": { - "type": "string", - "contentEncoding": "base64", - "description": "The incremental image data as bytes" + "model_type": { + "$ref": "#/components/schemas/ModelType", + "default": "llm", + "description": "The type of model (LLM or embedding model)" } }, "additionalProperties": false, "required": [ + "identifier", + "provider_id", "type", - "image" + "metadata", + "model_type" ], - "title": "ImageDelta", - "description": "An image content delta for streaming responses." + "title": "Model", + "description": "A model resource representing an AI model registered in Llama Stack." }, - "TextDelta": { + "ModelType": { + "type": "string", + "enum": [ + "llm", + "embedding" + ], + "title": "ModelType", + "description": "Enumeration of supported model types in Llama Stack." + }, + "ListModelsResponse": { "type": "object", "properties": { - "type": { - "type": "string", - "const": "text", - "default": "text", - "description": "Discriminator type of the delta. Always \"text\"" - }, - "text": { - "type": "string", - "description": "The incremental text content" + "data": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Model" + } } }, "additionalProperties": false, "required": [ - "type", - "text" + "data" ], - "title": "TextDelta", - "description": "A text content delta for streaming responses." + "title": "ListModelsResponse" }, - "ToolCallDelta": { + "RegisterModelRequest": { "type": "object", "properties": { - "type": { + "model_id": { "type": "string", - "const": "tool_call", - "default": "tool_call", - "description": "Discriminator type of the delta. Always \"tool_call\"" + "description": "The identifier of the model to register." }, - "tool_call": { + "provider_model_id": { + "type": "string", + "description": "The identifier of the model in the provider." + }, + "provider_id": { + "type": "string", + "description": "The identifier of the provider." + }, + "metadata": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + }, + "description": "Any additional metadata for this model." + }, + "model_type": { + "$ref": "#/components/schemas/ModelType", + "description": "The type of model to register." + } + }, + "additionalProperties": false, + "required": [ + "model_id" + ], + "title": "RegisterModelRequest" + }, + "RunModerationRequest": { + "type": "object", + "properties": { + "input": { "oneOf": [ { "type": "string" }, { - "$ref": "#/components/schemas/ToolCall" + "type": "array", + "items": { + "type": "string" + } } ], - "description": "Either an in-progress tool call string or the final parsed tool call" + "description": "Input (or inputs) to classify. Can be a single string, an array of strings, or an array of multi-modal input objects similar to other models." }, - "parse_status": { + "model": { "type": "string", - "enum": [ - "started", - "in_progress", - "failed", - "succeeded" - ], - "description": "Current parsing status of the tool call" + "description": "The content moderation model you would like to use." } }, "additionalProperties": false, "required": [ - "type", - "tool_call", - "parse_status" + "input", + "model" ], - "title": "ToolCallDelta", - "description": "A tool call content delta for streaming responses." + "title": "RunModerationRequest" + }, + "ModerationObject": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The unique identifier for the moderation request." + }, + "model": { + "type": "string", + "description": "The model used to generate the moderation results." + }, + "results": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ModerationObjectResults" + }, + "description": "A list of moderation objects" + } + }, + "additionalProperties": false, + "required": [ + "id", + "model", + "results" + ], + "title": "ModerationObject", + "description": "A moderation object." + }, + "ModerationObjectResults": { + "type": "object", + "properties": { + "flagged": { + "type": "boolean", + "description": "Whether any of the below categories are flagged." + }, + "categories": { + "type": "object", + "additionalProperties": { + "type": "boolean" + }, + "description": "A list of the categories, and whether they are flagged or not." + }, + "category_applied_input_types": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + } + }, + "description": "A list of the categories along with the input type(s) that the score applies to." + }, + "category_scores": { + "type": "object", + "additionalProperties": { + "type": "number" + }, + "description": "A list of the categories along with their scores as predicted by model." + }, + "user_message": { + "type": "string" + }, + "metadata": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + } + } + }, + "additionalProperties": false, + "required": [ + "flagged", + "metadata" + ], + "title": "ModerationObjectResults", + "description": "A moderation object." + }, + "Prompt": { + "type": "object", + "properties": { + "prompt": { + "type": "string", + "description": "The system prompt text with variable placeholders. Variables are only supported when using the Responses API." + }, + "version": { + "type": "integer", + "description": "Version (integer starting at 1, incremented on save)" + }, + "prompt_id": { + "type": "string", + "description": "Unique identifier formatted as 'pmpt_<48-digit-hash>'" + }, + "variables": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of prompt variable names that can be used in the prompt template" + }, + "is_default": { + "type": "boolean", + "default": false, + "description": "Boolean indicating whether this version is the default version for this prompt" + } + }, + "additionalProperties": false, + "required": [ + "version", + "prompt_id", + "variables", + "is_default" + ], + "title": "Prompt", + "description": "A prompt resource representing a stored OpenAI Compatible prompt template in Llama Stack." + }, + "ListPromptsResponse": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Prompt" + } + } + }, + "additionalProperties": false, + "required": [ + "data" + ], + "title": "ListPromptsResponse", + "description": "Response model to list prompts." + }, + "CreatePromptRequest": { + "type": "object", + "properties": { + "prompt": { + "type": "string", + "description": "The prompt text content with variable placeholders." + }, + "variables": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of variable names that can be used in the prompt template." + } + }, + "additionalProperties": false, + "required": [ + "prompt" + ], + "title": "CreatePromptRequest" + }, + "UpdatePromptRequest": { + "type": "object", + "properties": { + "prompt": { + "type": "string", + "description": "The updated prompt text content." + }, + "version": { + "type": "integer", + "description": "The current version of the prompt being updated." + }, + "variables": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Updated list of variable names that can be used in the prompt template." + }, + "set_as_default": { + "type": "boolean", + "description": "Set the new version as the default (default=True)." + } + }, + "additionalProperties": false, + "required": [ + "prompt", + "version", + "set_as_default" + ], + "title": "UpdatePromptRequest" + }, + "SetDefaultVersionRequest": { + "type": "object", + "properties": { + "version": { + "type": "integer", + "description": "The version to set as default." + } + }, + "additionalProperties": false, + "required": [ + "version" + ], + "title": "SetDefaultVersionRequest" + }, + "ProviderInfo": { + "type": "object", + "properties": { + "api": { + "type": "string", + "description": "The API name this provider implements" + }, + "provider_id": { + "type": "string", + "description": "Unique identifier for the provider" + }, + "provider_type": { + "type": "string", + "description": "The type of provider implementation" + }, + "config": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + }, + "description": "Configuration parameters for the provider" + }, + "health": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + }, + "description": "Current health status of the provider" + } + }, + "additionalProperties": false, + "required": [ + "api", + "provider_id", + "provider_type", + "config", + "health" + ], + "title": "ProviderInfo", + "description": "Information about a registered provider including its configuration and health status." + }, + "ListProvidersResponse": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ProviderInfo" + }, + "description": "List of provider information objects" + } + }, + "additionalProperties": false, + "required": [ + "data" + ], + "title": "ListProvidersResponse", + "description": "Response containing a list of all available providers." + }, + "ListOpenAIResponseObject": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIResponseObjectWithInput" + }, + "description": "List of response objects with their input context" + }, + "has_more": { + "type": "boolean", + "description": "Whether there are more results available beyond this page" + }, + "first_id": { + "type": "string", + "description": "Identifier of the first item in this page" + }, + "last_id": { + "type": "string", + "description": "Identifier of the last item in this page" + }, + "object": { + "type": "string", + "const": "list", + "default": "list", + "description": "Object type identifier, always \"list\"" + } + }, + "additionalProperties": false, + "required": [ + "data", + "has_more", + "first_id", + "last_id", + "object" + ], + "title": "ListOpenAIResponseObject", + "description": "Paginated list of OpenAI response objects with navigation metadata." }, "OpenAIResponseAnnotationCitation": { "type": "object", @@ -9014,6 +6147,26 @@ } } }, + "OpenAIResponseError": { + "type": "object", + "properties": { + "code": { + "type": "string", + "description": "Error code identifying the type of failure" + }, + "message": { + "type": "string", + "description": "Human-readable error message describing the failure" + } + }, + "additionalProperties": false, + "required": [ + "code", + "message" + ], + "title": "OpenAIResponseError", + "description": "Error details for failed OpenAI response requests." + }, "OpenAIResponseInput": { "oneOf": [ { @@ -9148,6 +6301,637 @@ "title": "OpenAIResponseInputMessageContentText", "description": "Text content for input messages in OpenAI response format." }, + "OpenAIResponseMCPApprovalRequest": { + "type": "object", + "properties": { + "arguments": { + "type": "string" + }, + "id": { + "type": "string" + }, + "name": { + "type": "string" + }, + "server_label": { + "type": "string" + }, + "type": { + "type": "string", + "const": "mcp_approval_request", + "default": "mcp_approval_request" + } + }, + "additionalProperties": false, + "required": [ + "arguments", + "id", + "name", + "server_label", + "type" + ], + "title": "OpenAIResponseMCPApprovalRequest", + "description": "A request for human approval of a tool invocation." + }, + "OpenAIResponseMCPApprovalResponse": { + "type": "object", + "properties": { + "approval_request_id": { + "type": "string" + }, + "approve": { + "type": "boolean" + }, + "type": { + "type": "string", + "const": "mcp_approval_response", + "default": "mcp_approval_response" + }, + "id": { + "type": "string" + }, + "reason": { + "type": "string" + } + }, + "additionalProperties": false, + "required": [ + "approval_request_id", + "approve", + "type" + ], + "title": "OpenAIResponseMCPApprovalResponse", + "description": "A response to an MCP approval request." + }, + "OpenAIResponseMessage": { + "type": "object", + "properties": { + "content": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIResponseInputMessageContent" + } + }, + { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageContent" + } + } + ] + }, + "role": { + "oneOf": [ + { + "type": "string", + "const": "system" + }, + { + "type": "string", + "const": "developer" + }, + { + "type": "string", + "const": "user" + }, + { + "type": "string", + "const": "assistant" + } + ] + }, + "type": { + "type": "string", + "const": "message", + "default": "message" + }, + "id": { + "type": "string" + }, + "status": { + "type": "string" + } + }, + "additionalProperties": false, + "required": [ + "content", + "role", + "type" + ], + "title": "OpenAIResponseMessage", + "description": "Corresponds to the various Message types in the Responses API. They are all under one type because the Responses API gives them all the same \"type\" value, and there is no way to tell them apart in certain scenarios." + }, + "OpenAIResponseObjectWithInput": { + "type": "object", + "properties": { + "created_at": { + "type": "integer", + "description": "Unix timestamp when the response was created" + }, + "error": { + "$ref": "#/components/schemas/OpenAIResponseError", + "description": "(Optional) Error details if the response generation failed" + }, + "id": { + "type": "string", + "description": "Unique identifier for this response" + }, + "model": { + "type": "string", + "description": "Model identifier used for generation" + }, + "object": { + "type": "string", + "const": "response", + "default": "response", + "description": "Object type identifier, always \"response\"" + }, + "output": { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIResponseOutput" + }, + "description": "List of generated output items (messages, tool calls, etc.)" + }, + "parallel_tool_calls": { + "type": "boolean", + "default": false, + "description": "Whether tool calls can be executed in parallel" + }, + "previous_response_id": { + "type": "string", + "description": "(Optional) ID of the previous response in a conversation" + }, + "status": { + "type": "string", + "description": "Current status of the response generation" + }, + "temperature": { + "type": "number", + "description": "(Optional) Sampling temperature used for generation" + }, + "text": { + "$ref": "#/components/schemas/OpenAIResponseText", + "description": "Text formatting configuration for the response" + }, + "top_p": { + "type": "number", + "description": "(Optional) Nucleus sampling parameter used for generation" + }, + "truncation": { + "type": "string", + "description": "(Optional) Truncation strategy applied to the response" + }, + "input": { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIResponseInput" + }, + "description": "List of input items that led to this response" + } + }, + "additionalProperties": false, + "required": [ + "created_at", + "id", + "model", + "object", + "output", + "parallel_tool_calls", + "status", + "text", + "input" + ], + "title": "OpenAIResponseObjectWithInput", + "description": "OpenAI response object extended with input context information." + }, + "OpenAIResponseOutput": { + "oneOf": [ + { + "$ref": "#/components/schemas/OpenAIResponseMessage" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPCall" + }, + { + "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools" + }, + { + "$ref": "#/components/schemas/OpenAIResponseMCPApprovalRequest" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "message": "#/components/schemas/OpenAIResponseMessage", + "web_search_call": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall", + "file_search_call": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall", + "function_call": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall", + "mcp_call": "#/components/schemas/OpenAIResponseOutputMessageMCPCall", + "mcp_list_tools": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools", + "mcp_approval_request": "#/components/schemas/OpenAIResponseMCPApprovalRequest" + } + } + }, + "OpenAIResponseOutputMessageContent": { + "type": "object", + "properties": { + "text": { + "type": "string" + }, + "type": { + "type": "string", + "const": "output_text", + "default": "output_text" + }, + "annotations": { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIResponseAnnotations" + } + } + }, + "additionalProperties": false, + "required": [ + "text", + "type", + "annotations" + ], + "title": "OpenAIResponseOutputMessageContentOutputText" + }, + "OpenAIResponseOutputMessageFileSearchToolCall": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "Unique identifier for this tool call" + }, + "queries": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of search queries executed" + }, + "status": { + "type": "string", + "description": "Current status of the file search operation" + }, + "type": { + "type": "string", + "const": "file_search_call", + "default": "file_search_call", + "description": "Tool call type identifier, always \"file_search_call\"" + }, + "results": { + "type": "array", + "items": { + "type": "object", + "properties": { + "attributes": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + }, + "description": "(Optional) Key-value attributes associated with the file" + }, + "file_id": { + "type": "string", + "description": "Unique identifier of the file containing the result" + }, + "filename": { + "type": "string", + "description": "Name of the file containing the result" + }, + "score": { + "type": "number", + "description": "Relevance score for this search result (between 0 and 1)" + }, + "text": { + "type": "string", + "description": "Text content of the search result" + } + }, + "additionalProperties": false, + "required": [ + "attributes", + "file_id", + "filename", + "score", + "text" + ], + "title": "OpenAIResponseOutputMessageFileSearchToolCallResults", + "description": "Search results returned by the file search operation." + }, + "description": "(Optional) Search results returned by the file search operation" + } + }, + "additionalProperties": false, + "required": [ + "id", + "queries", + "status", + "type" + ], + "title": "OpenAIResponseOutputMessageFileSearchToolCall", + "description": "File search tool call output message for OpenAI responses." + }, + "OpenAIResponseOutputMessageFunctionToolCall": { + "type": "object", + "properties": { + "call_id": { + "type": "string", + "description": "Unique identifier for the function call" + }, + "name": { + "type": "string", + "description": "Name of the function being called" + }, + "arguments": { + "type": "string", + "description": "JSON string containing the function arguments" + }, + "type": { + "type": "string", + "const": "function_call", + "default": "function_call", + "description": "Tool call type identifier, always \"function_call\"" + }, + "id": { + "type": "string", + "description": "(Optional) Additional identifier for the tool call" + }, + "status": { + "type": "string", + "description": "(Optional) Current status of the function call execution" + } + }, + "additionalProperties": false, + "required": [ + "call_id", + "name", + "arguments", + "type" + ], + "title": "OpenAIResponseOutputMessageFunctionToolCall", + "description": "Function tool call output message for OpenAI responses." + }, + "OpenAIResponseOutputMessageMCPCall": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "Unique identifier for this MCP call" + }, + "type": { + "type": "string", + "const": "mcp_call", + "default": "mcp_call", + "description": "Tool call type identifier, always \"mcp_call\"" + }, + "arguments": { + "type": "string", + "description": "JSON string containing the MCP call arguments" + }, + "name": { + "type": "string", + "description": "Name of the MCP method being called" + }, + "server_label": { + "type": "string", + "description": "Label identifying the MCP server handling the call" + }, + "error": { + "type": "string", + "description": "(Optional) Error message if the MCP call failed" + }, + "output": { + "type": "string", + "description": "(Optional) Output result from the successful MCP call" + } + }, + "additionalProperties": false, + "required": [ + "id", + "type", + "arguments", + "name", + "server_label" + ], + "title": "OpenAIResponseOutputMessageMCPCall", + "description": "Model Context Protocol (MCP) call output message for OpenAI responses." + }, + "OpenAIResponseOutputMessageMCPListTools": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "Unique identifier for this MCP list tools operation" + }, + "type": { + "type": "string", + "const": "mcp_list_tools", + "default": "mcp_list_tools", + "description": "Tool call type identifier, always \"mcp_list_tools\"" + }, + "server_label": { + "type": "string", + "description": "Label identifying the MCP server providing the tools" + }, + "tools": { + "type": "array", + "items": { + "type": "object", + "properties": { + "input_schema": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + }, + "description": "JSON schema defining the tool's input parameters" + }, + "name": { + "type": "string", + "description": "Name of the tool" + }, + "description": { + "type": "string", + "description": "(Optional) Description of what the tool does" + } + }, + "additionalProperties": false, + "required": [ + "input_schema", + "name" + ], + "title": "MCPListToolsTool", + "description": "Tool definition returned by MCP list tools operation." + }, + "description": "List of available tools provided by the MCP server" + } + }, + "additionalProperties": false, + "required": [ + "id", + "type", + "server_label", + "tools" + ], + "title": "OpenAIResponseOutputMessageMCPListTools", + "description": "MCP list tools output message containing available tools from an MCP server." + }, + "OpenAIResponseOutputMessageWebSearchToolCall": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "Unique identifier for this tool call" + }, + "status": { + "type": "string", + "description": "Current status of the web search operation" + }, + "type": { + "type": "string", + "const": "web_search_call", + "default": "web_search_call", + "description": "Tool call type identifier, always \"web_search_call\"" + } + }, + "additionalProperties": false, + "required": [ + "id", + "status", + "type" + ], + "title": "OpenAIResponseOutputMessageWebSearchToolCall", + "description": "Web search tool call output message for OpenAI responses." + }, + "OpenAIResponseText": { + "type": "object", + "properties": { + "format": { + "type": "object", + "properties": { + "type": { + "oneOf": [ + { + "type": "string", + "const": "text" + }, + { + "type": "string", + "const": "json_schema" + }, + { + "type": "string", + "const": "json_object" + } + ], + "description": "Must be \"text\", \"json_schema\", or \"json_object\" to identify the format type" + }, + "name": { + "type": "string", + "description": "The name of the response format. Only used for json_schema." + }, + "schema": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + }, + "description": "The JSON schema the response should conform to. In a Python SDK, this is often a `pydantic` model. Only used for json_schema." + }, + "description": { + "type": "string", + "description": "(Optional) A description of the response format. Only used for json_schema." + }, + "strict": { + "type": "boolean", + "description": "(Optional) Whether to strictly enforce the JSON schema. If true, the response must match the schema exactly. Only used for json_schema." + } + }, + "additionalProperties": false, + "required": [ + "type" + ], + "description": "(Optional) Text format configuration specifying output format requirements" + } + }, + "additionalProperties": false, + "title": "OpenAIResponseText", + "description": "Text response configuration for OpenAI responses." + }, "OpenAIResponseInputTool": { "oneOf": [ { @@ -9451,393 +7235,6 @@ "title": "OpenAIResponseInputToolWebSearch", "description": "Web search tool configuration for OpenAI response inputs." }, - "OpenAIResponseMCPApprovalRequest": { - "type": "object", - "properties": { - "arguments": { - "type": "string" - }, - "id": { - "type": "string" - }, - "name": { - "type": "string" - }, - "server_label": { - "type": "string" - }, - "type": { - "type": "string", - "const": "mcp_approval_request", - "default": "mcp_approval_request" - } - }, - "additionalProperties": false, - "required": [ - "arguments", - "id", - "name", - "server_label", - "type" - ], - "title": "OpenAIResponseMCPApprovalRequest", - "description": "A request for human approval of a tool invocation." - }, - "OpenAIResponseMCPApprovalResponse": { - "type": "object", - "properties": { - "approval_request_id": { - "type": "string" - }, - "approve": { - "type": "boolean" - }, - "type": { - "type": "string", - "const": "mcp_approval_response", - "default": "mcp_approval_response" - }, - "id": { - "type": "string" - }, - "reason": { - "type": "string" - } - }, - "additionalProperties": false, - "required": [ - "approval_request_id", - "approve", - "type" - ], - "title": "OpenAIResponseMCPApprovalResponse", - "description": "A response to an MCP approval request." - }, - "OpenAIResponseMessage": { - "type": "object", - "properties": { - "content": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "array", - "items": { - "$ref": "#/components/schemas/OpenAIResponseInputMessageContent" - } - }, - { - "type": "array", - "items": { - "$ref": "#/components/schemas/OpenAIResponseOutputMessageContent" - } - } - ] - }, - "role": { - "oneOf": [ - { - "type": "string", - "const": "system" - }, - { - "type": "string", - "const": "developer" - }, - { - "type": "string", - "const": "user" - }, - { - "type": "string", - "const": "assistant" - } - ] - }, - "type": { - "type": "string", - "const": "message", - "default": "message" - }, - "id": { - "type": "string" - }, - "status": { - "type": "string" - } - }, - "additionalProperties": false, - "required": [ - "content", - "role", - "type" - ], - "title": "OpenAIResponseMessage", - "description": "Corresponds to the various Message types in the Responses API. They are all under one type because the Responses API gives them all the same \"type\" value, and there is no way to tell them apart in certain scenarios." - }, - "OpenAIResponseOutputMessageContent": { - "type": "object", - "properties": { - "text": { - "type": "string" - }, - "type": { - "type": "string", - "const": "output_text", - "default": "output_text" - }, - "annotations": { - "type": "array", - "items": { - "$ref": "#/components/schemas/OpenAIResponseAnnotations" - } - } - }, - "additionalProperties": false, - "required": [ - "text", - "type", - "annotations" - ], - "title": "OpenAIResponseOutputMessageContentOutputText" - }, - "OpenAIResponseOutputMessageFileSearchToolCall": { - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "Unique identifier for this tool call" - }, - "queries": { - "type": "array", - "items": { - "type": "string" - }, - "description": "List of search queries executed" - }, - "status": { - "type": "string", - "description": "Current status of the file search operation" - }, - "type": { - "type": "string", - "const": "file_search_call", - "default": "file_search_call", - "description": "Tool call type identifier, always \"file_search_call\"" - }, - "results": { - "type": "array", - "items": { - "type": "object", - "properties": { - "attributes": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "(Optional) Key-value attributes associated with the file" - }, - "file_id": { - "type": "string", - "description": "Unique identifier of the file containing the result" - }, - "filename": { - "type": "string", - "description": "Name of the file containing the result" - }, - "score": { - "type": "number", - "description": "Relevance score for this search result (between 0 and 1)" - }, - "text": { - "type": "string", - "description": "Text content of the search result" - } - }, - "additionalProperties": false, - "required": [ - "attributes", - "file_id", - "filename", - "score", - "text" - ], - "title": "OpenAIResponseOutputMessageFileSearchToolCallResults", - "description": "Search results returned by the file search operation." - }, - "description": "(Optional) Search results returned by the file search operation" - } - }, - "additionalProperties": false, - "required": [ - "id", - "queries", - "status", - "type" - ], - "title": "OpenAIResponseOutputMessageFileSearchToolCall", - "description": "File search tool call output message for OpenAI responses." - }, - "OpenAIResponseOutputMessageFunctionToolCall": { - "type": "object", - "properties": { - "call_id": { - "type": "string", - "description": "Unique identifier for the function call" - }, - "name": { - "type": "string", - "description": "Name of the function being called" - }, - "arguments": { - "type": "string", - "description": "JSON string containing the function arguments" - }, - "type": { - "type": "string", - "const": "function_call", - "default": "function_call", - "description": "Tool call type identifier, always \"function_call\"" - }, - "id": { - "type": "string", - "description": "(Optional) Additional identifier for the tool call" - }, - "status": { - "type": "string", - "description": "(Optional) Current status of the function call execution" - } - }, - "additionalProperties": false, - "required": [ - "call_id", - "name", - "arguments", - "type" - ], - "title": "OpenAIResponseOutputMessageFunctionToolCall", - "description": "Function tool call output message for OpenAI responses." - }, - "OpenAIResponseOutputMessageWebSearchToolCall": { - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "Unique identifier for this tool call" - }, - "status": { - "type": "string", - "description": "Current status of the web search operation" - }, - "type": { - "type": "string", - "const": "web_search_call", - "default": "web_search_call", - "description": "Tool call type identifier, always \"web_search_call\"" - } - }, - "additionalProperties": false, - "required": [ - "id", - "status", - "type" - ], - "title": "OpenAIResponseOutputMessageWebSearchToolCall", - "description": "Web search tool call output message for OpenAI responses." - }, - "OpenAIResponseText": { - "type": "object", - "properties": { - "format": { - "type": "object", - "properties": { - "type": { - "oneOf": [ - { - "type": "string", - "const": "text" - }, - { - "type": "string", - "const": "json_schema" - }, - { - "type": "string", - "const": "json_object" - } - ], - "description": "Must be \"text\", \"json_schema\", or \"json_object\" to identify the format type" - }, - "name": { - "type": "string", - "description": "The name of the response format. Only used for json_schema." - }, - "schema": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "The JSON schema the response should conform to. In a Python SDK, this is often a `pydantic` model. Only used for json_schema." - }, - "description": { - "type": "string", - "description": "(Optional) A description of the response format. Only used for json_schema." - }, - "strict": { - "type": "boolean", - "description": "(Optional) Whether to strictly enforce the JSON schema. If true, the response must match the schema exactly. Only used for json_schema." - } - }, - "additionalProperties": false, - "required": [ - "type" - ], - "description": "(Optional) Text format configuration specifying output format requirements" - } - }, - "additionalProperties": false, - "title": "OpenAIResponseText", - "description": "Text response configuration for OpenAI responses." - }, "CreateOpenaiResponseRequest": { "type": "object", "properties": { @@ -9902,26 +7299,6 @@ ], "title": "CreateOpenaiResponseRequest" }, - "OpenAIResponseError": { - "type": "object", - "properties": { - "code": { - "type": "string", - "description": "Error code identifying the type of failure" - }, - "message": { - "type": "string", - "description": "Human-readable error message describing the failure" - } - }, - "additionalProperties": false, - "required": [ - "code", - "message" - ], - "title": "OpenAIResponseError", - "description": "Error details for failed OpenAI response requests." - }, "OpenAIResponseObject": { "type": "object", "properties": { @@ -9998,166 +7375,6 @@ "title": "OpenAIResponseObject", "description": "Complete OpenAI response object containing generation results and metadata." }, - "OpenAIResponseOutput": { - "oneOf": [ - { - "$ref": "#/components/schemas/OpenAIResponseMessage" - }, - { - "$ref": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall" - }, - { - "$ref": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall" - }, - { - "$ref": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall" - }, - { - "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPCall" - }, - { - "$ref": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools" - }, - { - "$ref": "#/components/schemas/OpenAIResponseMCPApprovalRequest" - } - ], - "discriminator": { - "propertyName": "type", - "mapping": { - "message": "#/components/schemas/OpenAIResponseMessage", - "web_search_call": "#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall", - "file_search_call": "#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall", - "function_call": "#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall", - "mcp_call": "#/components/schemas/OpenAIResponseOutputMessageMCPCall", - "mcp_list_tools": "#/components/schemas/OpenAIResponseOutputMessageMCPListTools", - "mcp_approval_request": "#/components/schemas/OpenAIResponseMCPApprovalRequest" - } - } - }, - "OpenAIResponseOutputMessageMCPCall": { - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "Unique identifier for this MCP call" - }, - "type": { - "type": "string", - "const": "mcp_call", - "default": "mcp_call", - "description": "Tool call type identifier, always \"mcp_call\"" - }, - "arguments": { - "type": "string", - "description": "JSON string containing the MCP call arguments" - }, - "name": { - "type": "string", - "description": "Name of the MCP method being called" - }, - "server_label": { - "type": "string", - "description": "Label identifying the MCP server handling the call" - }, - "error": { - "type": "string", - "description": "(Optional) Error message if the MCP call failed" - }, - "output": { - "type": "string", - "description": "(Optional) Output result from the successful MCP call" - } - }, - "additionalProperties": false, - "required": [ - "id", - "type", - "arguments", - "name", - "server_label" - ], - "title": "OpenAIResponseOutputMessageMCPCall", - "description": "Model Context Protocol (MCP) call output message for OpenAI responses." - }, - "OpenAIResponseOutputMessageMCPListTools": { - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "Unique identifier for this MCP list tools operation" - }, - "type": { - "type": "string", - "const": "mcp_list_tools", - "default": "mcp_list_tools", - "description": "Tool call type identifier, always \"mcp_list_tools\"" - }, - "server_label": { - "type": "string", - "description": "Label identifying the MCP server providing the tools" - }, - "tools": { - "type": "array", - "items": { - "type": "object", - "properties": { - "input_schema": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "JSON schema defining the tool's input parameters" - }, - "name": { - "type": "string", - "description": "Name of the tool" - }, - "description": { - "type": "string", - "description": "(Optional) Description of what the tool does" - } - }, - "additionalProperties": false, - "required": [ - "input_schema", - "name" - ], - "title": "MCPListToolsTool", - "description": "Tool definition returned by MCP list tools operation." - }, - "description": "List of available tools provided by the MCP server" - } - }, - "additionalProperties": false, - "required": [ - "id", - "type", - "server_label", - "tools" - ], - "title": "OpenAIResponseOutputMessageMCPListTools", - "description": "MCP list tools output message containing available tools from an MCP server." - }, "OpenAIResponseContentPartOutputText": { "type": "object", "properties": { @@ -11021,65 +8238,6 @@ ], "title": "OpenAIResponseObjectStreamResponseWebSearchCallSearching" }, - "CreatePromptRequest": { - "type": "object", - "properties": { - "prompt": { - "type": "string", - "description": "The prompt text content with variable placeholders." - }, - "variables": { - "type": "array", - "items": { - "type": "string" - }, - "description": "List of variable names that can be used in the prompt template." - } - }, - "additionalProperties": false, - "required": [ - "prompt" - ], - "title": "CreatePromptRequest" - }, - "Prompt": { - "type": "object", - "properties": { - "prompt": { - "type": "string", - "description": "The system prompt text with variable placeholders. Variables are only supported when using the Responses API." - }, - "version": { - "type": "integer", - "description": "Version (integer starting at 1, incremented on save)" - }, - "prompt_id": { - "type": "string", - "description": "Unique identifier formatted as 'pmpt_<48-digit-hash>'" - }, - "variables": { - "type": "array", - "items": { - "type": "string" - }, - "description": "List of prompt variable names that can be used in the prompt template" - }, - "is_default": { - "type": "boolean", - "default": false, - "description": "Boolean indicating whether this version is the default version for this prompt" - } - }, - "additionalProperties": false, - "required": [ - "version", - "prompt_id", - "variables", - "is_default" - ], - "title": "Prompt", - "description": "A prompt resource representing a stored OpenAI Compatible prompt template in Llama Stack." - }, "OpenAIDeleteResponseObject": { "type": "object", "properties": { @@ -11108,26 +8266,515 @@ "title": "OpenAIDeleteResponseObject", "description": "Response object confirming deletion of an OpenAI response." }, - "AgentCandidate": { + "ListOpenAIResponseInputItem": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/components/schemas/OpenAIResponseInput" + }, + "description": "List of input items" + }, + "object": { + "type": "string", + "const": "list", + "default": "list", + "description": "Object type identifier, always \"list\"" + } + }, + "additionalProperties": false, + "required": [ + "data", + "object" + ], + "title": "ListOpenAIResponseInputItem", + "description": "List container for OpenAI response input items." + }, + "CompletionMessage": { + "type": "object", + "properties": { + "role": { + "type": "string", + "const": "assistant", + "default": "assistant", + "description": "Must be \"assistant\" to identify this as the model's response" + }, + "content": { + "$ref": "#/components/schemas/InterleavedContent", + "description": "The content of the model's response" + }, + "stop_reason": { + "type": "string", + "enum": [ + "end_of_turn", + "end_of_message", + "out_of_tokens" + ], + "description": "Reason why the model stopped generating. Options are: - `StopReason.end_of_turn`: The model finished generating the entire response. - `StopReason.end_of_message`: The model finished generating but generated a partial response -- usually, a tool call. The user may call the tool and continue the conversation with the tool's response. - `StopReason.out_of_tokens`: The model ran out of token budget." + }, + "tool_calls": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ToolCall" + }, + "description": "List of tool calls. Each tool call is a ToolCall object." + } + }, + "additionalProperties": false, + "required": [ + "role", + "content", + "stop_reason" + ], + "title": "CompletionMessage", + "description": "A message containing the model's (assistant) response in a chat conversation." + }, + "ImageContentItem": { "type": "object", "properties": { "type": { "type": "string", - "const": "agent", - "default": "agent" + "const": "image", + "default": "image", + "description": "Discriminator type of the content item. Always \"image\"" }, - "config": { - "$ref": "#/components/schemas/AgentConfig", - "description": "The configuration for the agent candidate." + "image": { + "type": "object", + "properties": { + "url": { + "$ref": "#/components/schemas/URL", + "description": "A URL of the image or data URL in the format of data:image/{type};base64,{data}. Note that URL could have length limits." + }, + "data": { + "type": "string", + "contentEncoding": "base64", + "description": "base64 encoded image data as string" + } + }, + "additionalProperties": false, + "description": "Image as a base64 encoded string or an URL" } }, "additionalProperties": false, "required": [ "type", - "config" + "image" ], - "title": "AgentCandidate", - "description": "An agent candidate for evaluation." + "title": "ImageContentItem", + "description": "A image content item" + }, + "InterleavedContent": { + "oneOf": [ + { + "type": "string" + }, + { + "$ref": "#/components/schemas/InterleavedContentItem" + }, + { + "type": "array", + "items": { + "$ref": "#/components/schemas/InterleavedContentItem" + } + } + ] + }, + "InterleavedContentItem": { + "oneOf": [ + { + "$ref": "#/components/schemas/ImageContentItem" + }, + { + "$ref": "#/components/schemas/TextContentItem" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "image": "#/components/schemas/ImageContentItem", + "text": "#/components/schemas/TextContentItem" + } + } + }, + "Message": { + "oneOf": [ + { + "$ref": "#/components/schemas/UserMessage" + }, + { + "$ref": "#/components/schemas/SystemMessage" + }, + { + "$ref": "#/components/schemas/ToolResponseMessage" + }, + { + "$ref": "#/components/schemas/CompletionMessage" + } + ], + "discriminator": { + "propertyName": "role", + "mapping": { + "user": "#/components/schemas/UserMessage", + "system": "#/components/schemas/SystemMessage", + "tool": "#/components/schemas/ToolResponseMessage", + "assistant": "#/components/schemas/CompletionMessage" + } + } + }, + "SystemMessage": { + "type": "object", + "properties": { + "role": { + "type": "string", + "const": "system", + "default": "system", + "description": "Must be \"system\" to identify this as a system message" + }, + "content": { + "$ref": "#/components/schemas/InterleavedContent", + "description": "The content of the \"system prompt\". If multiple system messages are provided, they are concatenated. The underlying Llama Stack code may also add other system messages (for example, for formatting tool definitions)." + } + }, + "additionalProperties": false, + "required": [ + "role", + "content" + ], + "title": "SystemMessage", + "description": "A system message providing instructions or context to the model." + }, + "TextContentItem": { + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "text", + "default": "text", + "description": "Discriminator type of the content item. Always \"text\"" + }, + "text": { + "type": "string", + "description": "Text content" + } + }, + "additionalProperties": false, + "required": [ + "type", + "text" + ], + "title": "TextContentItem", + "description": "A text content item" + }, + "ToolCall": { + "type": "object", + "properties": { + "call_id": { + "type": "string" + }, + "tool_name": { + "oneOf": [ + { + "type": "string", + "enum": [ + "brave_search", + "wolfram_alpha", + "photogen", + "code_interpreter" + ], + "title": "BuiltinTool" + }, + { + "type": "string" + } + ] + }, + "arguments": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + }, + { + "type": "boolean" + }, + { + "type": "null" + }, + { + "type": "array", + "items": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + }, + { + "type": "boolean" + }, + { + "type": "null" + } + ] + } + }, + { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + }, + { + "type": "boolean" + }, + { + "type": "null" + } + ] + } + } + ] + } + } + ] + }, + "arguments_json": { + "type": "string" + } + }, + "additionalProperties": false, + "required": [ + "call_id", + "tool_name", + "arguments" + ], + "title": "ToolCall" + }, + "ToolResponseMessage": { + "type": "object", + "properties": { + "role": { + "type": "string", + "const": "tool", + "default": "tool", + "description": "Must be \"tool\" to identify this as a tool response" + }, + "call_id": { + "type": "string", + "description": "Unique identifier for the tool call this response is for" + }, + "content": { + "$ref": "#/components/schemas/InterleavedContent", + "description": "The response content from the tool" + } + }, + "additionalProperties": false, + "required": [ + "role", + "call_id", + "content" + ], + "title": "ToolResponseMessage", + "description": "A message representing the result of a tool invocation." + }, + "URL": { + "type": "object", + "properties": { + "uri": { + "type": "string", + "description": "The URL string pointing to the resource" + } + }, + "additionalProperties": false, + "required": [ + "uri" + ], + "title": "URL", + "description": "A URL reference to external content." + }, + "UserMessage": { + "type": "object", + "properties": { + "role": { + "type": "string", + "const": "user", + "default": "user", + "description": "Must be \"user\" to identify this as a user message" + }, + "content": { + "$ref": "#/components/schemas/InterleavedContent", + "description": "The content of the message, which can include text and other media" + }, + "context": { + "$ref": "#/components/schemas/InterleavedContent", + "description": "(Optional) This field is used internally by Llama Stack to pass RAG context. This field may be removed in the API in the future." + } + }, + "additionalProperties": false, + "required": [ + "role", + "content" + ], + "title": "UserMessage", + "description": "A message from the user in a chat conversation." + }, + "RunShieldRequest": { + "type": "object", + "properties": { + "shield_id": { + "type": "string", + "description": "The identifier of the shield to run." + }, + "messages": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Message" + }, + "description": "The messages to run the shield on." + }, + "params": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + }, + "description": "The parameters of the shield." + } + }, + "additionalProperties": false, + "required": [ + "shield_id", + "messages", + "params" + ], + "title": "RunShieldRequest" + }, + "RunShieldResponse": { + "type": "object", + "properties": { + "violation": { + "$ref": "#/components/schemas/SafetyViolation", + "description": "(Optional) Safety violation detected by the shield, if any" + } + }, + "additionalProperties": false, + "title": "RunShieldResponse", + "description": "Response from running a safety shield." + }, + "SafetyViolation": { + "type": "object", + "properties": { + "violation_level": { + "$ref": "#/components/schemas/ViolationLevel", + "description": "Severity level of the violation" + }, + "user_message": { + "type": "string", + "description": "(Optional) Message to convey to the user about the violation" + }, + "metadata": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + }, + "description": "Additional metadata including specific violation codes for debugging and telemetry" + } + }, + "additionalProperties": false, + "required": [ + "violation_level", + "metadata" + ], + "title": "SafetyViolation", + "description": "Details of a safety violation detected by content moderation." + }, + "ViolationLevel": { + "type": "string", + "enum": [ + "info", + "warn", + "error" + ], + "title": "ViolationLevel", + "description": "Severity level of a safety violation." + }, + "AgentTurnInputType": { + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "agent_turn_input", + "default": "agent_turn_input", + "description": "Discriminator type. Always \"agent_turn_input\"" + } + }, + "additionalProperties": false, + "required": [ + "type" + ], + "title": "AgentTurnInputType", + "description": "Parameter type for agent turn input." }, "AggregationFunctionType": { "type": "string", @@ -11141,6 +8788,23 @@ "title": "AggregationFunctionType", "description": "Types of aggregation functions for scoring results." }, + "ArrayType": { + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "array", + "default": "array", + "description": "Discriminator type. Always \"array\"" + } + }, + "additionalProperties": false, + "required": [ + "type" + ], + "title": "ArrayType", + "description": "Parameter type for array values." + }, "BasicScoringFnParams": { "type": "object", "properties": { @@ -11166,1386 +8830,6 @@ "title": "BasicScoringFnParams", "description": "Parameters for basic scoring function configuration." }, - "BenchmarkConfig": { - "type": "object", - "properties": { - "eval_candidate": { - "oneOf": [ - { - "$ref": "#/components/schemas/ModelCandidate" - }, - { - "$ref": "#/components/schemas/AgentCandidate" - } - ], - "discriminator": { - "propertyName": "type", - "mapping": { - "model": "#/components/schemas/ModelCandidate", - "agent": "#/components/schemas/AgentCandidate" - } - }, - "description": "The candidate to evaluate." - }, - "scoring_params": { - "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/ScoringFnParams" - }, - "description": "Map between scoring function id and parameters for each scoring function you want to run" - }, - "num_examples": { - "type": "integer", - "description": "(Optional) The number of examples to evaluate. If not provided, all examples in the dataset will be evaluated" - } - }, - "additionalProperties": false, - "required": [ - "eval_candidate", - "scoring_params" - ], - "title": "BenchmarkConfig", - "description": "A benchmark configuration for evaluation." - }, - "LLMAsJudgeScoringFnParams": { - "type": "object", - "properties": { - "type": { - "$ref": "#/components/schemas/ScoringFnParamsType", - "const": "llm_as_judge", - "default": "llm_as_judge", - "description": "The type of scoring function parameters, always llm_as_judge" - }, - "judge_model": { - "type": "string", - "description": "Identifier of the LLM model to use as a judge for scoring" - }, - "prompt_template": { - "type": "string", - "description": "(Optional) Custom prompt template for the judge model" - }, - "judge_score_regexes": { - "type": "array", - "items": { - "type": "string" - }, - "description": "Regexes to extract the answer from generated response" - }, - "aggregation_functions": { - "type": "array", - "items": { - "$ref": "#/components/schemas/AggregationFunctionType" - }, - "description": "Aggregation functions to apply to the scores of each row" - } - }, - "additionalProperties": false, - "required": [ - "type", - "judge_model", - "judge_score_regexes", - "aggregation_functions" - ], - "title": "LLMAsJudgeScoringFnParams", - "description": "Parameters for LLM-as-judge scoring function configuration." - }, - "ModelCandidate": { - "type": "object", - "properties": { - "type": { - "type": "string", - "const": "model", - "default": "model" - }, - "model": { - "type": "string", - "description": "The model ID to evaluate." - }, - "sampling_params": { - "$ref": "#/components/schemas/SamplingParams", - "description": "The sampling parameters for the model." - }, - "system_message": { - "$ref": "#/components/schemas/SystemMessage", - "description": "(Optional) The system message providing instructions or context to the model." - } - }, - "additionalProperties": false, - "required": [ - "type", - "model", - "sampling_params" - ], - "title": "ModelCandidate", - "description": "A model candidate for evaluation." - }, - "RegexParserScoringFnParams": { - "type": "object", - "properties": { - "type": { - "$ref": "#/components/schemas/ScoringFnParamsType", - "const": "regex_parser", - "default": "regex_parser", - "description": "The type of scoring function parameters, always regex_parser" - }, - "parsing_regexes": { - "type": "array", - "items": { - "type": "string" - }, - "description": "Regex to extract the answer from generated response" - }, - "aggregation_functions": { - "type": "array", - "items": { - "$ref": "#/components/schemas/AggregationFunctionType" - }, - "description": "Aggregation functions to apply to the scores of each row" - } - }, - "additionalProperties": false, - "required": [ - "type", - "parsing_regexes", - "aggregation_functions" - ], - "title": "RegexParserScoringFnParams", - "description": "Parameters for regex parser scoring function configuration." - }, - "ScoringFnParams": { - "oneOf": [ - { - "$ref": "#/components/schemas/LLMAsJudgeScoringFnParams" - }, - { - "$ref": "#/components/schemas/RegexParserScoringFnParams" - }, - { - "$ref": "#/components/schemas/BasicScoringFnParams" - } - ], - "discriminator": { - "propertyName": "type", - "mapping": { - "llm_as_judge": "#/components/schemas/LLMAsJudgeScoringFnParams", - "regex_parser": "#/components/schemas/RegexParserScoringFnParams", - "basic": "#/components/schemas/BasicScoringFnParams" - } - } - }, - "ScoringFnParamsType": { - "type": "string", - "enum": [ - "llm_as_judge", - "regex_parser", - "basic" - ], - "title": "ScoringFnParamsType", - "description": "Types of scoring function parameter configurations." - }, - "SystemMessage": { - "type": "object", - "properties": { - "role": { - "type": "string", - "const": "system", - "default": "system", - "description": "Must be \"system\" to identify this as a system message" - }, - "content": { - "$ref": "#/components/schemas/InterleavedContent", - "description": "The content of the \"system prompt\". If multiple system messages are provided, they are concatenated. The underlying Llama Stack code may also add other system messages (for example, for formatting tool definitions)." - } - }, - "additionalProperties": false, - "required": [ - "role", - "content" - ], - "title": "SystemMessage", - "description": "A system message providing instructions or context to the model." - }, - "EvaluateRowsRequest": { - "type": "object", - "properties": { - "input_rows": { - "type": "array", - "items": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - } - }, - "description": "The rows to evaluate." - }, - "scoring_functions": { - "type": "array", - "items": { - "type": "string" - }, - "description": "The scoring functions to use for the evaluation." - }, - "benchmark_config": { - "$ref": "#/components/schemas/BenchmarkConfig", - "description": "The configuration for the benchmark." - } - }, - "additionalProperties": false, - "required": [ - "input_rows", - "scoring_functions", - "benchmark_config" - ], - "title": "EvaluateRowsRequest" - }, - "EvaluateResponse": { - "type": "object", - "properties": { - "generations": { - "type": "array", - "items": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - } - }, - "description": "The generations from the evaluation." - }, - "scores": { - "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/ScoringResult" - }, - "description": "The scores from the evaluation." - } - }, - "additionalProperties": false, - "required": [ - "generations", - "scores" - ], - "title": "EvaluateResponse", - "description": "The response from an evaluation." - }, - "ScoringResult": { - "type": "object", - "properties": { - "score_rows": { - "type": "array", - "items": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - } - }, - "description": "The scoring result for each row. Each row is a map of column name to value." - }, - "aggregated_results": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "Map of metric name to aggregated value" - } - }, - "additionalProperties": false, - "required": [ - "score_rows", - "aggregated_results" - ], - "title": "ScoringResult", - "description": "A scoring result for a single row." - }, - "Agent": { - "type": "object", - "properties": { - "agent_id": { - "type": "string", - "description": "Unique identifier for the agent" - }, - "agent_config": { - "$ref": "#/components/schemas/AgentConfig", - "description": "Configuration settings for the agent" - }, - "created_at": { - "type": "string", - "format": "date-time", - "description": "Timestamp when the agent was created" - } - }, - "additionalProperties": false, - "required": [ - "agent_id", - "agent_config", - "created_at" - ], - "title": "Agent", - "description": "An agent instance with configuration and metadata." - }, - "Session": { - "type": "object", - "properties": { - "session_id": { - "type": "string", - "description": "Unique identifier for the conversation session" - }, - "session_name": { - "type": "string", - "description": "Human-readable name for the session" - }, - "turns": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Turn" - }, - "description": "List of all turns that have occurred in this session" - }, - "started_at": { - "type": "string", - "format": "date-time", - "description": "Timestamp when the session was created" - } - }, - "additionalProperties": false, - "required": [ - "session_id", - "session_name", - "turns", - "started_at" - ], - "title": "Session", - "description": "A single session of an interaction with an Agentic System." - }, - "AgentStepResponse": { - "type": "object", - "properties": { - "step": { - "oneOf": [ - { - "$ref": "#/components/schemas/InferenceStep" - }, - { - "$ref": "#/components/schemas/ToolExecutionStep" - }, - { - "$ref": "#/components/schemas/ShieldCallStep" - }, - { - "$ref": "#/components/schemas/MemoryRetrievalStep" - } - ], - "discriminator": { - "propertyName": "step_type", - "mapping": { - "inference": "#/components/schemas/InferenceStep", - "tool_execution": "#/components/schemas/ToolExecutionStep", - "shield_call": "#/components/schemas/ShieldCallStep", - "memory_retrieval": "#/components/schemas/MemoryRetrievalStep" - } - }, - "description": "The complete step data and execution details" - } - }, - "additionalProperties": false, - "required": [ - "step" - ], - "title": "AgentStepResponse", - "description": "Response containing details of a specific agent step." - }, - "Benchmark": { - "type": "object", - "properties": { - "identifier": { - "type": "string" - }, - "provider_resource_id": { - "type": "string" - }, - "provider_id": { - "type": "string" - }, - "type": { - "type": "string", - "enum": [ - "model", - "shield", - "vector_db", - "dataset", - "scoring_function", - "benchmark", - "tool", - "tool_group", - "prompt" - ], - "const": "benchmark", - "default": "benchmark", - "description": "The resource type, always benchmark" - }, - "dataset_id": { - "type": "string", - "description": "Identifier of the dataset to use for the benchmark evaluation" - }, - "scoring_functions": { - "type": "array", - "items": { - "type": "string" - }, - "description": "List of scoring function identifiers to apply during evaluation" - }, - "metadata": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "Metadata for this evaluation task" - } - }, - "additionalProperties": false, - "required": [ - "identifier", - "provider_id", - "type", - "dataset_id", - "scoring_functions", - "metadata" - ], - "title": "Benchmark", - "description": "A benchmark resource for evaluating model performance." - }, - "OpenAIAssistantMessageParam": { - "type": "object", - "properties": { - "role": { - "type": "string", - "const": "assistant", - "default": "assistant", - "description": "Must be \"assistant\" to identify this as the model's response" - }, - "content": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "array", - "items": { - "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" - } - } - ], - "description": "The content of the model's response" - }, - "name": { - "type": "string", - "description": "(Optional) The name of the assistant message participant." - }, - "tool_calls": { - "type": "array", - "items": { - "$ref": "#/components/schemas/OpenAIChatCompletionToolCall" - }, - "description": "List of tool calls. Each tool call is an OpenAIChatCompletionToolCall object." - } - }, - "additionalProperties": false, - "required": [ - "role" - ], - "title": "OpenAIAssistantMessageParam", - "description": "A message containing the model's (assistant) response in an OpenAI-compatible chat completion request." - }, - "OpenAIChatCompletionContentPartImageParam": { - "type": "object", - "properties": { - "type": { - "type": "string", - "const": "image_url", - "default": "image_url", - "description": "Must be \"image_url\" to identify this as image content" - }, - "image_url": { - "$ref": "#/components/schemas/OpenAIImageURL", - "description": "Image URL specification and processing details" - } - }, - "additionalProperties": false, - "required": [ - "type", - "image_url" - ], - "title": "OpenAIChatCompletionContentPartImageParam", - "description": "Image content part for OpenAI-compatible chat completion messages." - }, - "OpenAIChatCompletionContentPartParam": { - "oneOf": [ - { - "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" - }, - { - "$ref": "#/components/schemas/OpenAIChatCompletionContentPartImageParam" - }, - { - "$ref": "#/components/schemas/OpenAIFile" - } - ], - "discriminator": { - "propertyName": "type", - "mapping": { - "text": "#/components/schemas/OpenAIChatCompletionContentPartTextParam", - "image_url": "#/components/schemas/OpenAIChatCompletionContentPartImageParam", - "file": "#/components/schemas/OpenAIFile" - } - } - }, - "OpenAIChatCompletionContentPartTextParam": { - "type": "object", - "properties": { - "type": { - "type": "string", - "const": "text", - "default": "text", - "description": "Must be \"text\" to identify this as text content" - }, - "text": { - "type": "string", - "description": "The text content of the message" - } - }, - "additionalProperties": false, - "required": [ - "type", - "text" - ], - "title": "OpenAIChatCompletionContentPartTextParam", - "description": "Text content part for OpenAI-compatible chat completion messages." - }, - "OpenAIChatCompletionToolCall": { - "type": "object", - "properties": { - "index": { - "type": "integer", - "description": "(Optional) Index of the tool call in the list" - }, - "id": { - "type": "string", - "description": "(Optional) Unique identifier for the tool call" - }, - "type": { - "type": "string", - "const": "function", - "default": "function", - "description": "Must be \"function\" to identify this as a function call" - }, - "function": { - "$ref": "#/components/schemas/OpenAIChatCompletionToolCallFunction", - "description": "(Optional) Function call details" - } - }, - "additionalProperties": false, - "required": [ - "type" - ], - "title": "OpenAIChatCompletionToolCall", - "description": "Tool call specification for OpenAI-compatible chat completion responses." - }, - "OpenAIChatCompletionToolCallFunction": { - "type": "object", - "properties": { - "name": { - "type": "string", - "description": "(Optional) Name of the function to call" - }, - "arguments": { - "type": "string", - "description": "(Optional) Arguments to pass to the function as a JSON string" - } - }, - "additionalProperties": false, - "title": "OpenAIChatCompletionToolCallFunction", - "description": "Function call details for OpenAI-compatible tool calls." - }, - "OpenAIChoice": { - "type": "object", - "properties": { - "message": { - "oneOf": [ - { - "$ref": "#/components/schemas/OpenAIUserMessageParam" - }, - { - "$ref": "#/components/schemas/OpenAISystemMessageParam" - }, - { - "$ref": "#/components/schemas/OpenAIAssistantMessageParam" - }, - { - "$ref": "#/components/schemas/OpenAIToolMessageParam" - }, - { - "$ref": "#/components/schemas/OpenAIDeveloperMessageParam" - } - ], - "discriminator": { - "propertyName": "role", - "mapping": { - "user": "#/components/schemas/OpenAIUserMessageParam", - "system": "#/components/schemas/OpenAISystemMessageParam", - "assistant": "#/components/schemas/OpenAIAssistantMessageParam", - "tool": "#/components/schemas/OpenAIToolMessageParam", - "developer": "#/components/schemas/OpenAIDeveloperMessageParam" - } - }, - "description": "The message from the model" - }, - "finish_reason": { - "type": "string", - "description": "The reason the model stopped generating" - }, - "index": { - "type": "integer", - "description": "The index of the choice" - }, - "logprobs": { - "$ref": "#/components/schemas/OpenAIChoiceLogprobs", - "description": "(Optional) The log probabilities for the tokens in the message" - } - }, - "additionalProperties": false, - "required": [ - "message", - "finish_reason", - "index" - ], - "title": "OpenAIChoice", - "description": "A choice from an OpenAI-compatible chat completion response." - }, - "OpenAIChoiceLogprobs": { - "type": "object", - "properties": { - "content": { - "type": "array", - "items": { - "$ref": "#/components/schemas/OpenAITokenLogProb" - }, - "description": "(Optional) The log probabilities for the tokens in the message" - }, - "refusal": { - "type": "array", - "items": { - "$ref": "#/components/schemas/OpenAITokenLogProb" - }, - "description": "(Optional) The log probabilities for the tokens in the message" - } - }, - "additionalProperties": false, - "title": "OpenAIChoiceLogprobs", - "description": "The log probabilities for the tokens in the message from an OpenAI-compatible chat completion response." - }, - "OpenAIDeveloperMessageParam": { - "type": "object", - "properties": { - "role": { - "type": "string", - "const": "developer", - "default": "developer", - "description": "Must be \"developer\" to identify this as a developer message" - }, - "content": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "array", - "items": { - "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" - } - } - ], - "description": "The content of the developer message" - }, - "name": { - "type": "string", - "description": "(Optional) The name of the developer message participant." - } - }, - "additionalProperties": false, - "required": [ - "role", - "content" - ], - "title": "OpenAIDeveloperMessageParam", - "description": "A message from the developer in an OpenAI-compatible chat completion request." - }, - "OpenAIFile": { - "type": "object", - "properties": { - "type": { - "type": "string", - "const": "file", - "default": "file" - }, - "file": { - "$ref": "#/components/schemas/OpenAIFileFile" - } - }, - "additionalProperties": false, - "required": [ - "type", - "file" - ], - "title": "OpenAIFile" - }, - "OpenAIFileFile": { - "type": "object", - "properties": { - "file_data": { - "type": "string" - }, - "file_id": { - "type": "string" - }, - "filename": { - "type": "string" - } - }, - "additionalProperties": false, - "title": "OpenAIFileFile" - }, - "OpenAIImageURL": { - "type": "object", - "properties": { - "url": { - "type": "string", - "description": "URL of the image to include in the message" - }, - "detail": { - "type": "string", - "description": "(Optional) Level of detail for image processing. Can be \"low\", \"high\", or \"auto\"" - } - }, - "additionalProperties": false, - "required": [ - "url" - ], - "title": "OpenAIImageURL", - "description": "Image URL specification for OpenAI-compatible chat completion messages." - }, - "OpenAIMessageParam": { - "oneOf": [ - { - "$ref": "#/components/schemas/OpenAIUserMessageParam" - }, - { - "$ref": "#/components/schemas/OpenAISystemMessageParam" - }, - { - "$ref": "#/components/schemas/OpenAIAssistantMessageParam" - }, - { - "$ref": "#/components/schemas/OpenAIToolMessageParam" - }, - { - "$ref": "#/components/schemas/OpenAIDeveloperMessageParam" - } - ], - "discriminator": { - "propertyName": "role", - "mapping": { - "user": "#/components/schemas/OpenAIUserMessageParam", - "system": "#/components/schemas/OpenAISystemMessageParam", - "assistant": "#/components/schemas/OpenAIAssistantMessageParam", - "tool": "#/components/schemas/OpenAIToolMessageParam", - "developer": "#/components/schemas/OpenAIDeveloperMessageParam" - } - } - }, - "OpenAISystemMessageParam": { - "type": "object", - "properties": { - "role": { - "type": "string", - "const": "system", - "default": "system", - "description": "Must be \"system\" to identify this as a system message" - }, - "content": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "array", - "items": { - "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" - } - } - ], - "description": "The content of the \"system prompt\". If multiple system messages are provided, they are concatenated. The underlying Llama Stack code may also add other system messages (for example, for formatting tool definitions)." - }, - "name": { - "type": "string", - "description": "(Optional) The name of the system message participant." - } - }, - "additionalProperties": false, - "required": [ - "role", - "content" - ], - "title": "OpenAISystemMessageParam", - "description": "A system message providing instructions or context to the model." - }, - "OpenAITokenLogProb": { - "type": "object", - "properties": { - "token": { - "type": "string" - }, - "bytes": { - "type": "array", - "items": { - "type": "integer" - } - }, - "logprob": { - "type": "number" - }, - "top_logprobs": { - "type": "array", - "items": { - "$ref": "#/components/schemas/OpenAITopLogProb" - } - } - }, - "additionalProperties": false, - "required": [ - "token", - "logprob", - "top_logprobs" - ], - "title": "OpenAITokenLogProb", - "description": "The log probability for a token from an OpenAI-compatible chat completion response." - }, - "OpenAIToolMessageParam": { - "type": "object", - "properties": { - "role": { - "type": "string", - "const": "tool", - "default": "tool", - "description": "Must be \"tool\" to identify this as a tool response" - }, - "tool_call_id": { - "type": "string", - "description": "Unique identifier for the tool call this response is for" - }, - "content": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "array", - "items": { - "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" - } - } - ], - "description": "The response content from the tool" - } - }, - "additionalProperties": false, - "required": [ - "role", - "tool_call_id", - "content" - ], - "title": "OpenAIToolMessageParam", - "description": "A message representing the result of a tool invocation in an OpenAI-compatible chat completion request." - }, - "OpenAITopLogProb": { - "type": "object", - "properties": { - "token": { - "type": "string" - }, - "bytes": { - "type": "array", - "items": { - "type": "integer" - } - }, - "logprob": { - "type": "number" - } - }, - "additionalProperties": false, - "required": [ - "token", - "logprob" - ], - "title": "OpenAITopLogProb", - "description": "The top log probability for a token from an OpenAI-compatible chat completion response." - }, - "OpenAIUserMessageParam": { - "type": "object", - "properties": { - "role": { - "type": "string", - "const": "user", - "default": "user", - "description": "Must be \"user\" to identify this as a user message" - }, - "content": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "array", - "items": { - "$ref": "#/components/schemas/OpenAIChatCompletionContentPartParam" - } - } - ], - "description": "The content of the message, which can include text and other media" - }, - "name": { - "type": "string", - "description": "(Optional) The name of the user message participant." - } - }, - "additionalProperties": false, - "required": [ - "role", - "content" - ], - "title": "OpenAIUserMessageParam", - "description": "A message from the user in an OpenAI-compatible chat completion request." - }, - "OpenAICompletionWithInputMessages": { - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "The ID of the chat completion" - }, - "choices": { - "type": "array", - "items": { - "$ref": "#/components/schemas/OpenAIChoice" - }, - "description": "List of choices" - }, - "object": { - "type": "string", - "const": "chat.completion", - "default": "chat.completion", - "description": "The object type, which will be \"chat.completion\"" - }, - "created": { - "type": "integer", - "description": "The Unix timestamp in seconds when the chat completion was created" - }, - "model": { - "type": "string", - "description": "The model that was used to generate the chat completion" - }, - "input_messages": { - "type": "array", - "items": { - "$ref": "#/components/schemas/OpenAIMessageParam" - } - } - }, - "additionalProperties": false, - "required": [ - "id", - "choices", - "object", - "created", - "model", - "input_messages" - ], - "title": "OpenAICompletionWithInputMessages" - }, - "Dataset": { - "type": "object", - "properties": { - "identifier": { - "type": "string" - }, - "provider_resource_id": { - "type": "string" - }, - "provider_id": { - "type": "string" - }, - "type": { - "type": "string", - "enum": [ - "model", - "shield", - "vector_db", - "dataset", - "scoring_function", - "benchmark", - "tool", - "tool_group", - "prompt" - ], - "const": "dataset", - "default": "dataset", - "description": "Type of resource, always 'dataset' for datasets" - }, - "purpose": { - "type": "string", - "enum": [ - "post-training/messages", - "eval/question-answer", - "eval/messages-answer" - ], - "description": "Purpose of the dataset indicating its intended use" - }, - "source": { - "oneOf": [ - { - "$ref": "#/components/schemas/URIDataSource" - }, - { - "$ref": "#/components/schemas/RowsDataSource" - } - ], - "discriminator": { - "propertyName": "type", - "mapping": { - "uri": "#/components/schemas/URIDataSource", - "rows": "#/components/schemas/RowsDataSource" - } - }, - "description": "Data source configuration for the dataset" - }, - "metadata": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "Additional metadata for the dataset" - } - }, - "additionalProperties": false, - "required": [ - "identifier", - "provider_id", - "type", - "purpose", - "source", - "metadata" - ], - "title": "Dataset", - "description": "Dataset resource for storing and accessing training or evaluation data." - }, - "RowsDataSource": { - "type": "object", - "properties": { - "type": { - "type": "string", - "const": "rows", - "default": "rows" - }, - "rows": { - "type": "array", - "items": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - } - }, - "description": "The dataset is stored in rows. E.g. - [ {\"messages\": [{\"role\": \"user\", \"content\": \"Hello, world!\"}, {\"role\": \"assistant\", \"content\": \"Hello, world!\"}]} ]" - } - }, - "additionalProperties": false, - "required": [ - "type", - "rows" - ], - "title": "RowsDataSource", - "description": "A dataset stored in rows." - }, - "URIDataSource": { - "type": "object", - "properties": { - "type": { - "type": "string", - "const": "uri", - "default": "uri" - }, - "uri": { - "type": "string", - "description": "The dataset can be obtained from a URI. E.g. - \"https://mywebsite.com/mydata.jsonl\" - \"lsfs://mydata.jsonl\" - \"data:csv;base64,{base64_content}\"" - } - }, - "additionalProperties": false, - "required": [ - "type", - "uri" - ], - "title": "URIDataSource", - "description": "A dataset that can be obtained from a URI." - }, - "Model": { - "type": "object", - "properties": { - "identifier": { - "type": "string", - "description": "Unique identifier for this resource in llama stack" - }, - "provider_resource_id": { - "type": "string", - "description": "Unique identifier for this resource in the provider" - }, - "provider_id": { - "type": "string", - "description": "ID of the provider that owns this resource" - }, - "type": { - "type": "string", - "enum": [ - "model", - "shield", - "vector_db", - "dataset", - "scoring_function", - "benchmark", - "tool", - "tool_group", - "prompt" - ], - "const": "model", - "default": "model", - "description": "The resource type, always 'model' for model resources" - }, - "metadata": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "Any additional metadata for this model" - }, - "model_type": { - "$ref": "#/components/schemas/ModelType", - "default": "llm", - "description": "The type of model (LLM or embedding model)" - } - }, - "additionalProperties": false, - "required": [ - "identifier", - "provider_id", - "type", - "metadata", - "model_type" - ], - "title": "Model", - "description": "A model resource representing an AI model registered in Llama Stack." - }, - "ModelType": { - "type": "string", - "enum": [ - "llm", - "embedding" - ], - "title": "ModelType", - "description": "Enumeration of supported model types in Llama Stack." - }, - "AgentTurnInputType": { - "type": "object", - "properties": { - "type": { - "type": "string", - "const": "agent_turn_input", - "default": "agent_turn_input", - "description": "Discriminator type. Always \"agent_turn_input\"" - } - }, - "additionalProperties": false, - "required": [ - "type" - ], - "title": "AgentTurnInputType", - "description": "Parameter type for agent turn input." - }, - "ArrayType": { - "type": "object", - "properties": { - "type": { - "type": "string", - "const": "array", - "default": "array", - "description": "Discriminator type. Always \"array\"" - } - }, - "additionalProperties": false, - "required": [ - "type" - ], - "title": "ArrayType", - "description": "Parameter type for array values." - }, "BooleanType": { "type": "object", "properties": { @@ -12614,6 +8898,48 @@ "title": "JsonType", "description": "Parameter type for JSON values." }, + "LLMAsJudgeScoringFnParams": { + "type": "object", + "properties": { + "type": { + "$ref": "#/components/schemas/ScoringFnParamsType", + "const": "llm_as_judge", + "default": "llm_as_judge", + "description": "The type of scoring function parameters, always llm_as_judge" + }, + "judge_model": { + "type": "string", + "description": "Identifier of the LLM model to use as a judge for scoring" + }, + "prompt_template": { + "type": "string", + "description": "(Optional) Custom prompt template for the judge model" + }, + "judge_score_regexes": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Regexes to extract the answer from generated response" + }, + "aggregation_functions": { + "type": "array", + "items": { + "$ref": "#/components/schemas/AggregationFunctionType" + }, + "description": "Aggregation functions to apply to the scores of each row" + } + }, + "additionalProperties": false, + "required": [ + "type", + "judge_model", + "judge_score_regexes", + "aggregation_functions" + ], + "title": "LLMAsJudgeScoringFnParams", + "description": "Parameters for LLM-as-judge scoring function configuration." + }, "NumberType": { "type": "object", "properties": { @@ -12648,6 +8974,39 @@ "title": "ObjectType", "description": "Parameter type for object values." }, + "RegexParserScoringFnParams": { + "type": "object", + "properties": { + "type": { + "$ref": "#/components/schemas/ScoringFnParamsType", + "const": "regex_parser", + "default": "regex_parser", + "description": "The type of scoring function parameters, always regex_parser" + }, + "parsing_regexes": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Regex to extract the answer from generated response" + }, + "aggregation_functions": { + "type": "array", + "items": { + "$ref": "#/components/schemas/AggregationFunctionType" + }, + "description": "Aggregation functions to apply to the scores of each row" + } + }, + "additionalProperties": false, + "required": [ + "type", + "parsing_regexes", + "aggregation_functions" + ], + "title": "RegexParserScoringFnParams", + "description": "Parameters for regex parser scoring function configuration." + }, "ScoringFn": { "type": "object", "properties": { @@ -12769,6 +9128,37 @@ "title": "ScoringFn", "description": "A scoring function resource for evaluating model outputs." }, + "ScoringFnParams": { + "oneOf": [ + { + "$ref": "#/components/schemas/LLMAsJudgeScoringFnParams" + }, + { + "$ref": "#/components/schemas/RegexParserScoringFnParams" + }, + { + "$ref": "#/components/schemas/BasicScoringFnParams" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "llm_as_judge": "#/components/schemas/LLMAsJudgeScoringFnParams", + "regex_parser": "#/components/schemas/RegexParserScoringFnParams", + "basic": "#/components/schemas/BasicScoringFnParams" + } + } + }, + "ScoringFnParamsType": { + "type": "string", + "enum": [ + "llm_as_judge", + "regex_parser", + "basic" + ], + "title": "ScoringFnParamsType", + "description": "Types of scoring function parameter configurations." + }, "StringType": { "type": "object", "properties": { @@ -12803,6 +9193,302 @@ "title": "UnionType", "description": "Parameter type for union values." }, + "ListScoringFunctionsResponse": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ScoringFn" + } + } + }, + "additionalProperties": false, + "required": [ + "data" + ], + "title": "ListScoringFunctionsResponse" + }, + "ParamType": { + "oneOf": [ + { + "$ref": "#/components/schemas/StringType" + }, + { + "$ref": "#/components/schemas/NumberType" + }, + { + "$ref": "#/components/schemas/BooleanType" + }, + { + "$ref": "#/components/schemas/ArrayType" + }, + { + "$ref": "#/components/schemas/ObjectType" + }, + { + "$ref": "#/components/schemas/JsonType" + }, + { + "$ref": "#/components/schemas/UnionType" + }, + { + "$ref": "#/components/schemas/ChatCompletionInputType" + }, + { + "$ref": "#/components/schemas/CompletionInputType" + }, + { + "$ref": "#/components/schemas/AgentTurnInputType" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "string": "#/components/schemas/StringType", + "number": "#/components/schemas/NumberType", + "boolean": "#/components/schemas/BooleanType", + "array": "#/components/schemas/ArrayType", + "object": "#/components/schemas/ObjectType", + "json": "#/components/schemas/JsonType", + "union": "#/components/schemas/UnionType", + "chat_completion_input": "#/components/schemas/ChatCompletionInputType", + "completion_input": "#/components/schemas/CompletionInputType", + "agent_turn_input": "#/components/schemas/AgentTurnInputType" + } + } + }, + "RegisterScoringFunctionRequest": { + "type": "object", + "properties": { + "scoring_fn_id": { + "type": "string", + "description": "The ID of the scoring function to register." + }, + "description": { + "type": "string", + "description": "The description of the scoring function." + }, + "return_type": { + "$ref": "#/components/schemas/ParamType", + "description": "The return type of the scoring function." + }, + "provider_scoring_fn_id": { + "type": "string", + "description": "The ID of the provider scoring function to use for the scoring function." + }, + "provider_id": { + "type": "string", + "description": "The ID of the provider to use for the scoring function." + }, + "params": { + "$ref": "#/components/schemas/ScoringFnParams", + "description": "The parameters for the scoring function for benchmark eval, these can be overridden for app eval." + } + }, + "additionalProperties": false, + "required": [ + "scoring_fn_id", + "description", + "return_type" + ], + "title": "RegisterScoringFunctionRequest" + }, + "ScoreRequest": { + "type": "object", + "properties": { + "input_rows": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + } + }, + "description": "The rows to score." + }, + "scoring_functions": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/components/schemas/ScoringFnParams" + }, + { + "type": "null" + } + ] + }, + "description": "The scoring functions to use for the scoring." + } + }, + "additionalProperties": false, + "required": [ + "input_rows", + "scoring_functions" + ], + "title": "ScoreRequest" + }, + "ScoreResponse": { + "type": "object", + "properties": { + "results": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/ScoringResult" + }, + "description": "A map of scoring function name to ScoringResult." + } + }, + "additionalProperties": false, + "required": [ + "results" + ], + "title": "ScoreResponse", + "description": "The response from scoring." + }, + "ScoringResult": { + "type": "object", + "properties": { + "score_rows": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + } + }, + "description": "The scoring result for each row. Each row is a map of column name to value." + }, + "aggregated_results": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + }, + "description": "Map of metric name to aggregated value" + } + }, + "additionalProperties": false, + "required": [ + "score_rows", + "aggregated_results" + ], + "title": "ScoringResult", + "description": "A scoring result for a single row." + }, + "ScoreBatchRequest": { + "type": "object", + "properties": { + "dataset_id": { + "type": "string", + "description": "The ID of the dataset to score." + }, + "scoring_functions": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/components/schemas/ScoringFnParams" + }, + { + "type": "null" + } + ] + }, + "description": "The scoring functions to use for the scoring." + }, + "save_results_dataset": { + "type": "boolean", + "description": "Whether to save the results to a dataset." + } + }, + "additionalProperties": false, + "required": [ + "dataset_id", + "scoring_functions", + "save_results_dataset" + ], + "title": "ScoreBatchRequest" + }, + "ScoreBatchResponse": { + "type": "object", + "properties": { + "dataset_id": { + "type": "string", + "description": "(Optional) The identifier of the dataset that was scored" + }, + "results": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/ScoringResult" + }, + "description": "A map of scoring function name to ScoringResult" + } + }, + "additionalProperties": false, + "required": [ + "results" + ], + "title": "ScoreBatchResponse", + "description": "Response from batch scoring operations on datasets." + }, "Shield": { "type": "object", "properties": { @@ -12868,556 +9554,13 @@ "title": "Shield", "description": "A safety shield resource that can be used to check content." }, - "Span": { - "type": "object", - "properties": { - "span_id": { - "type": "string", - "description": "Unique identifier for the span" - }, - "trace_id": { - "type": "string", - "description": "Unique identifier for the trace this span belongs to" - }, - "parent_span_id": { - "type": "string", - "description": "(Optional) Unique identifier for the parent span, if this is a child span" - }, - "name": { - "type": "string", - "description": "Human-readable name describing the operation this span represents" - }, - "start_time": { - "type": "string", - "format": "date-time", - "description": "Timestamp when the operation began" - }, - "end_time": { - "type": "string", - "format": "date-time", - "description": "(Optional) Timestamp when the operation finished, if completed" - }, - "attributes": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "(Optional) Key-value pairs containing additional metadata about the span" - } - }, - "additionalProperties": false, - "required": [ - "span_id", - "trace_id", - "name", - "start_time" - ], - "title": "Span", - "description": "A span representing a single operation within a trace." - }, - "GetSpanTreeRequest": { - "type": "object", - "properties": { - "attributes_to_return": { - "type": "array", - "items": { - "type": "string" - }, - "description": "The attributes to return in the tree." - }, - "max_depth": { - "type": "integer", - "description": "The maximum depth of the tree." - } - }, - "additionalProperties": false, - "title": "GetSpanTreeRequest" - }, - "SpanStatus": { - "type": "string", - "enum": [ - "ok", - "error" - ], - "title": "SpanStatus", - "description": "The status of a span indicating whether it completed successfully or with an error." - }, - "SpanWithStatus": { - "type": "object", - "properties": { - "span_id": { - "type": "string", - "description": "Unique identifier for the span" - }, - "trace_id": { - "type": "string", - "description": "Unique identifier for the trace this span belongs to" - }, - "parent_span_id": { - "type": "string", - "description": "(Optional) Unique identifier for the parent span, if this is a child span" - }, - "name": { - "type": "string", - "description": "Human-readable name describing the operation this span represents" - }, - "start_time": { - "type": "string", - "format": "date-time", - "description": "Timestamp when the operation began" - }, - "end_time": { - "type": "string", - "format": "date-time", - "description": "(Optional) Timestamp when the operation finished, if completed" - }, - "attributes": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "(Optional) Key-value pairs containing additional metadata about the span" - }, - "status": { - "$ref": "#/components/schemas/SpanStatus", - "description": "(Optional) The current status of the span" - } - }, - "additionalProperties": false, - "required": [ - "span_id", - "trace_id", - "name", - "start_time" - ], - "title": "SpanWithStatus", - "description": "A span that includes status information." - }, - "QuerySpanTreeResponse": { - "type": "object", - "properties": { - "data": { - "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/SpanWithStatus" - }, - "description": "Dictionary mapping span IDs to spans with status information" - } - }, - "additionalProperties": false, - "required": [ - "data" - ], - "title": "QuerySpanTreeResponse", - "description": "Response containing a tree structure of spans." - }, - "Tool": { - "type": "object", - "properties": { - "identifier": { - "type": "string" - }, - "provider_resource_id": { - "type": "string" - }, - "provider_id": { - "type": "string" - }, - "type": { - "type": "string", - "enum": [ - "model", - "shield", - "vector_db", - "dataset", - "scoring_function", - "benchmark", - "tool", - "tool_group", - "prompt" - ], - "const": "tool", - "default": "tool", - "description": "Type of resource, always 'tool'" - }, - "toolgroup_id": { - "type": "string", - "description": "ID of the tool group this tool belongs to" - }, - "description": { - "type": "string", - "description": "Human-readable description of what the tool does" - }, - "parameters": { - "type": "array", - "items": { - "$ref": "#/components/schemas/ToolParameter" - }, - "description": "List of parameters this tool accepts" - }, - "metadata": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "(Optional) Additional metadata about the tool" - } - }, - "additionalProperties": false, - "required": [ - "identifier", - "provider_id", - "type", - "toolgroup_id", - "description", - "parameters" - ], - "title": "Tool", - "description": "A tool that can be invoked by agents." - }, - "ToolGroup": { - "type": "object", - "properties": { - "identifier": { - "type": "string" - }, - "provider_resource_id": { - "type": "string" - }, - "provider_id": { - "type": "string" - }, - "type": { - "type": "string", - "enum": [ - "model", - "shield", - "vector_db", - "dataset", - "scoring_function", - "benchmark", - "tool", - "tool_group", - "prompt" - ], - "const": "tool_group", - "default": "tool_group", - "description": "Type of resource, always 'tool_group'" - }, - "mcp_endpoint": { - "$ref": "#/components/schemas/URL", - "description": "(Optional) Model Context Protocol endpoint for remote tools" - }, - "args": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "(Optional) Additional arguments for the tool group" - } - }, - "additionalProperties": false, - "required": [ - "identifier", - "provider_id", - "type" - ], - "title": "ToolGroup", - "description": "A group of related tools managed together." - }, - "Trace": { - "type": "object", - "properties": { - "trace_id": { - "type": "string", - "description": "Unique identifier for the trace" - }, - "root_span_id": { - "type": "string", - "description": "Unique identifier for the root span that started this trace" - }, - "start_time": { - "type": "string", - "format": "date-time", - "description": "Timestamp when the trace began" - }, - "end_time": { - "type": "string", - "format": "date-time", - "description": "(Optional) Timestamp when the trace finished, if completed" - } - }, - "additionalProperties": false, - "required": [ - "trace_id", - "root_span_id", - "start_time" - ], - "title": "Trace", - "description": "A trace representing the complete execution path of a request across multiple operations." - }, - "Checkpoint": { - "type": "object", - "properties": { - "identifier": { - "type": "string", - "description": "Unique identifier for the checkpoint" - }, - "created_at": { - "type": "string", - "format": "date-time", - "description": "Timestamp when the checkpoint was created" - }, - "epoch": { - "type": "integer", - "description": "Training epoch when the checkpoint was saved" - }, - "post_training_job_id": { - "type": "string", - "description": "Identifier of the training job that created this checkpoint" - }, - "path": { - "type": "string", - "description": "File system path where the checkpoint is stored" - }, - "training_metrics": { - "$ref": "#/components/schemas/PostTrainingMetric", - "description": "(Optional) Training metrics associated with this checkpoint" - } - }, - "additionalProperties": false, - "required": [ - "identifier", - "created_at", - "epoch", - "post_training_job_id", - "path" - ], - "title": "Checkpoint", - "description": "Checkpoint created during training runs." - }, - "PostTrainingJobArtifactsResponse": { - "type": "object", - "properties": { - "job_uuid": { - "type": "string", - "description": "Unique identifier for the training job" - }, - "checkpoints": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Checkpoint" - }, - "description": "List of model checkpoints created during training" - } - }, - "additionalProperties": false, - "required": [ - "job_uuid", - "checkpoints" - ], - "title": "PostTrainingJobArtifactsResponse", - "description": "Artifacts of a finetuning job." - }, - "PostTrainingMetric": { - "type": "object", - "properties": { - "epoch": { - "type": "integer", - "description": "Training epoch number" - }, - "train_loss": { - "type": "number", - "description": "Loss value on the training dataset" - }, - "validation_loss": { - "type": "number", - "description": "Loss value on the validation dataset" - }, - "perplexity": { - "type": "number", - "description": "Perplexity metric indicating model confidence" - } - }, - "additionalProperties": false, - "required": [ - "epoch", - "train_loss", - "validation_loss", - "perplexity" - ], - "title": "PostTrainingMetric", - "description": "Training metrics captured during post-training jobs." - }, - "PostTrainingJobStatusResponse": { - "type": "object", - "properties": { - "job_uuid": { - "type": "string", - "description": "Unique identifier for the training job" - }, - "status": { - "type": "string", - "enum": [ - "completed", - "in_progress", - "failed", - "scheduled", - "cancelled" - ], - "description": "Current status of the training job" - }, - "scheduled_at": { - "type": "string", - "format": "date-time", - "description": "(Optional) Timestamp when the job was scheduled" - }, - "started_at": { - "type": "string", - "format": "date-time", - "description": "(Optional) Timestamp when the job execution began" - }, - "completed_at": { - "type": "string", - "format": "date-time", - "description": "(Optional) Timestamp when the job finished, if completed" - }, - "resources_allocated": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "(Optional) Information about computational resources allocated to the job" - }, - "checkpoints": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Checkpoint" - }, - "description": "List of model checkpoints created during training" - } - }, - "additionalProperties": false, - "required": [ - "job_uuid", - "status", - "checkpoints" - ], - "title": "PostTrainingJobStatusResponse", - "description": "Status of a finetuning job." - }, - "ListPostTrainingJobsResponse": { + "ListShieldsResponse": { "type": "object", "properties": { "data": { "type": "array", "items": { - "type": "object", - "properties": { - "job_uuid": { - "type": "string" - } - }, - "additionalProperties": false, - "required": [ - "job_uuid" - ], - "title": "PostTrainingJob" + "$ref": "#/components/schemas/Shield" } } }, @@ -13425,325 +9568,24 @@ "required": [ "data" ], - "title": "ListPostTrainingJobsResponse" + "title": "ListShieldsResponse" }, - "VectorDB": { + "RegisterShieldRequest": { "type": "object", "properties": { - "identifier": { - "type": "string" - }, - "provider_resource_id": { - "type": "string" - }, - "provider_id": { - "type": "string" - }, - "type": { + "shield_id": { "type": "string", - "enum": [ - "model", - "shield", - "vector_db", - "dataset", - "scoring_function", - "benchmark", - "tool", - "tool_group", - "prompt" - ], - "const": "vector_db", - "default": "vector_db", - "description": "Type of resource, always 'vector_db' for vector databases" + "description": "The identifier of the shield to register." }, - "embedding_model": { + "provider_shield_id": { "type": "string", - "description": "Name of the embedding model to use for vector generation" - }, - "embedding_dimension": { - "type": "integer", - "description": "Dimension of the embedding vectors" - }, - "vector_db_name": { - "type": "string" - } - }, - "additionalProperties": false, - "required": [ - "identifier", - "provider_id", - "type", - "embedding_model", - "embedding_dimension" - ], - "title": "VectorDB", - "description": "Vector database resource for storing and querying vector embeddings." - }, - "HealthInfo": { - "type": "object", - "properties": { - "status": { - "type": "string", - "enum": [ - "OK", - "Error", - "Not Implemented" - ], - "description": "Current health status of the service" - } - }, - "additionalProperties": false, - "required": [ - "status" - ], - "title": "HealthInfo", - "description": "Health status information for the service." - }, - "RAGDocument": { - "type": "object", - "properties": { - "document_id": { - "type": "string", - "description": "The unique identifier for the document." - }, - "content": { - "oneOf": [ - { - "type": "string" - }, - { - "$ref": "#/components/schemas/InterleavedContentItem" - }, - { - "type": "array", - "items": { - "$ref": "#/components/schemas/InterleavedContentItem" - } - }, - { - "$ref": "#/components/schemas/URL" - } - ], - "description": "The content of the document." - }, - "mime_type": { - "type": "string", - "description": "The MIME type of the document." - }, - "metadata": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "Additional metadata for the document." - } - }, - "additionalProperties": false, - "required": [ - "document_id", - "content", - "metadata" - ], - "title": "RAGDocument", - "description": "A document to be used for document ingestion in the RAG Tool." - }, - "InsertRequest": { - "type": "object", - "properties": { - "documents": { - "type": "array", - "items": { - "$ref": "#/components/schemas/RAGDocument" - }, - "description": "List of documents to index in the RAG system" - }, - "vector_db_id": { - "type": "string", - "description": "ID of the vector database to store the document embeddings" - }, - "chunk_size_in_tokens": { - "type": "integer", - "description": "(Optional) Size in tokens for document chunking during indexing" - } - }, - "additionalProperties": false, - "required": [ - "documents", - "vector_db_id", - "chunk_size_in_tokens" - ], - "title": "InsertRequest" - }, - "Chunk": { - "type": "object", - "properties": { - "content": { - "$ref": "#/components/schemas/InterleavedContent", - "description": "The content of the chunk, which can be interleaved text, images, or other types." - }, - "metadata": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "Metadata associated with the chunk that will be used in the model context during inference." - }, - "embedding": { - "type": "array", - "items": { - "type": "number" - }, - "description": "Optional embedding for the chunk. If not provided, it will be computed later." - }, - "stored_chunk_id": { - "type": "string", - "description": "The chunk ID that is stored in the vector database. Used for backend functionality." - }, - "chunk_metadata": { - "$ref": "#/components/schemas/ChunkMetadata", - "description": "Metadata for the chunk that will NOT be used in the context during inference. The `chunk_metadata` is required backend functionality." - } - }, - "additionalProperties": false, - "required": [ - "content", - "metadata" - ], - "title": "Chunk", - "description": "A chunk of content that can be inserted into a vector database." - }, - "ChunkMetadata": { - "type": "object", - "properties": { - "chunk_id": { - "type": "string", - "description": "The ID of the chunk. If not set, it will be generated based on the document ID and content." - }, - "document_id": { - "type": "string", - "description": "The ID of the document this chunk belongs to." - }, - "source": { - "type": "string", - "description": "The source of the content, such as a URL, file path, or other identifier." - }, - "created_timestamp": { - "type": "integer", - "description": "An optional timestamp indicating when the chunk was created." - }, - "updated_timestamp": { - "type": "integer", - "description": "An optional timestamp indicating when the chunk was last updated." - }, - "chunk_window": { - "type": "string", - "description": "The window of the chunk, which can be used to group related chunks together." - }, - "chunk_tokenizer": { - "type": "string", - "description": "The tokenizer used to create the chunk. Default is Tiktoken." - }, - "chunk_embedding_model": { - "type": "string", - "description": "The embedding model used to create the chunk's embedding." - }, - "chunk_embedding_dimension": { - "type": "integer", - "description": "The dimension of the embedding vector for the chunk." - }, - "content_token_count": { - "type": "integer", - "description": "The number of tokens in the content of the chunk." - }, - "metadata_token_count": { - "type": "integer", - "description": "The number of tokens in the metadata of the chunk." - } - }, - "additionalProperties": false, - "title": "ChunkMetadata", - "description": "`ChunkMetadata` is backend metadata for a `Chunk` that is used to store additional information about the chunk that will not be used in the context during inference, but is required for backend functionality. The `ChunkMetadata` is set during chunk creation in `MemoryToolRuntimeImpl().insert()`and is not expected to change after. Use `Chunk.metadata` for metadata that will be used in the context during inference." - }, - "InsertChunksRequest": { - "type": "object", - "properties": { - "vector_db_id": { - "type": "string", - "description": "The identifier of the vector database to insert the chunks into." - }, - "chunks": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Chunk" - }, - "description": "The chunks to insert. Each `Chunk` should contain content which can be interleaved text, images, or other types. `metadata`: `dict[str, Any]` and `embedding`: `List[float]` are optional. If `metadata` is provided, you configure how Llama Stack formats the chunk during generation. If `embedding` is not provided, it will be computed later." - }, - "ttl_seconds": { - "type": "integer", - "description": "The time to live of the chunks." - } - }, - "additionalProperties": false, - "required": [ - "vector_db_id", - "chunks" - ], - "title": "InsertChunksRequest" - }, - "ProviderInfo": { - "type": "object", - "properties": { - "api": { - "type": "string", - "description": "The API name this provider implements" + "description": "The identifier of the shield in the provider." }, "provider_id": { "type": "string", - "description": "Unique identifier for the provider" + "description": "The identifier of the provider." }, - "provider_type": { - "type": "string", - "description": "The type of provider implementation" - }, - "config": { + "params": { "type": "object", "additionalProperties": { "oneOf": [ @@ -13767,137 +9609,53 @@ } ] }, - "description": "Configuration parameters for the provider" - }, - "health": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "Current health status of the provider" + "description": "The parameters of the shield." } }, "additionalProperties": false, "required": [ - "api", - "provider_id", - "provider_type", - "config", - "health" + "shield_id" ], - "title": "ProviderInfo", - "description": "Information about a registered provider including its configuration and health status." + "title": "RegisterShieldRequest" }, - "InvokeToolRequest": { + "SyntheticDataGenerateRequest": { "type": "object", "properties": { - "tool_name": { - "type": "string", - "description": "The name of the tool to invoke." - }, - "kwargs": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] + "dialogs": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Message" }, - "description": "A dictionary of arguments to pass to the tool." + "description": "List of conversation messages to use as input for synthetic data generation" + }, + "filtering_function": { + "type": "string", + "enum": [ + "none", + "random", + "top_k", + "top_p", + "top_k_top_p", + "sigmoid" + ], + "description": "Type of filtering to apply to generated synthetic data samples" + }, + "model": { + "type": "string", + "description": "(Optional) The identifier of the model to use. The model must be registered with Llama Stack and available via the /models endpoint" } }, "additionalProperties": false, "required": [ - "tool_name", - "kwargs" + "dialogs", + "filtering_function" ], - "title": "InvokeToolRequest" + "title": "SyntheticDataGenerateRequest" }, - "ToolInvocationResult": { + "SyntheticDataGenerationResponse": { "type": "object", "properties": { - "content": { - "$ref": "#/components/schemas/InterleavedContent", - "description": "(Optional) The output content from the tool execution" - }, - "error_message": { - "type": "string", - "description": "(Optional) Error message if the tool execution failed" - }, - "error_code": { - "type": "integer", - "description": "(Optional) Numeric error code if the tool execution failed" - }, - "metadata": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "(Optional) Additional metadata about the tool execution" - } - }, - "additionalProperties": false, - "title": "ToolInvocationResult", - "description": "Result of a tool invocation." - }, - "PaginatedResponse": { - "type": "object", - "properties": { - "data": { + "synthetic_data": { "type": "array", "items": { "type": "object", @@ -13924,527 +9682,41 @@ ] } }, - "description": "The list of items for the current page" + "description": "List of generated synthetic data samples that passed the filtering criteria" }, - "has_more": { - "type": "boolean", - "description": "Whether there are more items available after this set" - }, - "url": { - "type": "string", - "description": "The URL for accessing this list" - } - }, - "additionalProperties": false, - "required": [ - "data", - "has_more" - ], - "title": "PaginatedResponse", - "description": "A generic paginated response that follows a simple format." - }, - "Job": { - "type": "object", - "properties": { - "job_id": { - "type": "string", - "description": "Unique identifier for the job" - }, - "status": { - "type": "string", - "enum": [ - "completed", - "in_progress", - "failed", - "scheduled", - "cancelled" - ], - "description": "Current execution status of the job" - } - }, - "additionalProperties": false, - "required": [ - "job_id", - "status" - ], - "title": "Job", - "description": "A job execution instance with status tracking." - }, - "ListBenchmarksResponse": { - "type": "object", - "properties": { - "data": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Benchmark" - } - } - }, - "additionalProperties": false, - "required": [ - "data" - ], - "title": "ListBenchmarksResponse" - }, - "Order": { - "type": "string", - "enum": [ - "asc", - "desc" - ], - "title": "Order", - "description": "Sort order for paginated responses." - }, - "ListOpenAIChatCompletionResponse": { - "type": "object", - "properties": { - "data": { - "type": "array", - "items": { - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "The ID of the chat completion" + "statistics": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" }, - "choices": { - "type": "array", - "items": { - "$ref": "#/components/schemas/OpenAIChoice" - }, - "description": "List of choices" + { + "type": "boolean" }, - "object": { - "type": "string", - "const": "chat.completion", - "default": "chat.completion", - "description": "The object type, which will be \"chat.completion\"" + { + "type": "number" }, - "created": { - "type": "integer", - "description": "The Unix timestamp in seconds when the chat completion was created" + { + "type": "string" }, - "model": { - "type": "string", - "description": "The model that was used to generate the chat completion" + { + "type": "array" }, - "input_messages": { - "type": "array", - "items": { - "$ref": "#/components/schemas/OpenAIMessageParam" - } + { + "type": "object" } - }, - "additionalProperties": false, - "required": [ - "id", - "choices", - "object", - "created", - "model", - "input_messages" - ], - "title": "OpenAICompletionWithInputMessages" + ] }, - "description": "List of chat completion objects with their input messages" - }, - "has_more": { - "type": "boolean", - "description": "Whether there are more completions available beyond this list" - }, - "first_id": { - "type": "string", - "description": "ID of the first completion in this list" - }, - "last_id": { - "type": "string", - "description": "ID of the last completion in this list" - }, - "object": { - "type": "string", - "const": "list", - "default": "list", - "description": "Must be \"list\" to identify this as a list response" + "description": "(Optional) Statistical information about the generation process and filtering results" } }, "additionalProperties": false, "required": [ - "data", - "has_more", - "first_id", - "last_id", - "object" + "synthetic_data" ], - "title": "ListOpenAIChatCompletionResponse", - "description": "Response from listing OpenAI-compatible chat completions." - }, - "ListDatasetsResponse": { - "type": "object", - "properties": { - "data": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Dataset" - }, - "description": "List of datasets" - } - }, - "additionalProperties": false, - "required": [ - "data" - ], - "title": "ListDatasetsResponse", - "description": "Response from listing datasets." - }, - "ListModelsResponse": { - "type": "object", - "properties": { - "data": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Model" - } - } - }, - "additionalProperties": false, - "required": [ - "data" - ], - "title": "ListModelsResponse" - }, - "ListOpenAIResponseInputItem": { - "type": "object", - "properties": { - "data": { - "type": "array", - "items": { - "$ref": "#/components/schemas/OpenAIResponseInput" - }, - "description": "List of input items" - }, - "object": { - "type": "string", - "const": "list", - "default": "list", - "description": "Object type identifier, always \"list\"" - } - }, - "additionalProperties": false, - "required": [ - "data", - "object" - ], - "title": "ListOpenAIResponseInputItem", - "description": "List container for OpenAI response input items." - }, - "ListOpenAIResponseObject": { - "type": "object", - "properties": { - "data": { - "type": "array", - "items": { - "$ref": "#/components/schemas/OpenAIResponseObjectWithInput" - }, - "description": "List of response objects with their input context" - }, - "has_more": { - "type": "boolean", - "description": "Whether there are more results available beyond this page" - }, - "first_id": { - "type": "string", - "description": "Identifier of the first item in this page" - }, - "last_id": { - "type": "string", - "description": "Identifier of the last item in this page" - }, - "object": { - "type": "string", - "const": "list", - "default": "list", - "description": "Object type identifier, always \"list\"" - } - }, - "additionalProperties": false, - "required": [ - "data", - "has_more", - "first_id", - "last_id", - "object" - ], - "title": "ListOpenAIResponseObject", - "description": "Paginated list of OpenAI response objects with navigation metadata." - }, - "OpenAIResponseObjectWithInput": { - "type": "object", - "properties": { - "created_at": { - "type": "integer", - "description": "Unix timestamp when the response was created" - }, - "error": { - "$ref": "#/components/schemas/OpenAIResponseError", - "description": "(Optional) Error details if the response generation failed" - }, - "id": { - "type": "string", - "description": "Unique identifier for this response" - }, - "model": { - "type": "string", - "description": "Model identifier used for generation" - }, - "object": { - "type": "string", - "const": "response", - "default": "response", - "description": "Object type identifier, always \"response\"" - }, - "output": { - "type": "array", - "items": { - "$ref": "#/components/schemas/OpenAIResponseOutput" - }, - "description": "List of generated output items (messages, tool calls, etc.)" - }, - "parallel_tool_calls": { - "type": "boolean", - "default": false, - "description": "Whether tool calls can be executed in parallel" - }, - "previous_response_id": { - "type": "string", - "description": "(Optional) ID of the previous response in a conversation" - }, - "status": { - "type": "string", - "description": "Current status of the response generation" - }, - "temperature": { - "type": "number", - "description": "(Optional) Sampling temperature used for generation" - }, - "text": { - "$ref": "#/components/schemas/OpenAIResponseText", - "description": "Text formatting configuration for the response" - }, - "top_p": { - "type": "number", - "description": "(Optional) Nucleus sampling parameter used for generation" - }, - "truncation": { - "type": "string", - "description": "(Optional) Truncation strategy applied to the response" - }, - "input": { - "type": "array", - "items": { - "$ref": "#/components/schemas/OpenAIResponseInput" - }, - "description": "List of input items that led to this response" - } - }, - "additionalProperties": false, - "required": [ - "created_at", - "id", - "model", - "object", - "output", - "parallel_tool_calls", - "status", - "text", - "input" - ], - "title": "OpenAIResponseObjectWithInput", - "description": "OpenAI response object extended with input context information." - }, - "ListPromptsResponse": { - "type": "object", - "properties": { - "data": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Prompt" - } - } - }, - "additionalProperties": false, - "required": [ - "data" - ], - "title": "ListPromptsResponse", - "description": "Response model to list prompts." - }, - "ListProvidersResponse": { - "type": "object", - "properties": { - "data": { - "type": "array", - "items": { - "$ref": "#/components/schemas/ProviderInfo" - }, - "description": "List of provider information objects" - } - }, - "additionalProperties": false, - "required": [ - "data" - ], - "title": "ListProvidersResponse", - "description": "Response containing a list of all available providers." - }, - "RouteInfo": { - "type": "object", - "properties": { - "route": { - "type": "string", - "description": "The API endpoint path" - }, - "method": { - "type": "string", - "description": "HTTP method for the route" - }, - "provider_types": { - "type": "array", - "items": { - "type": "string" - }, - "description": "List of provider types that implement this route" - } - }, - "additionalProperties": false, - "required": [ - "route", - "method", - "provider_types" - ], - "title": "RouteInfo", - "description": "Information about an API route including its path, method, and implementing providers." - }, - "ListRoutesResponse": { - "type": "object", - "properties": { - "data": { - "type": "array", - "items": { - "$ref": "#/components/schemas/RouteInfo" - }, - "description": "List of available route information objects" - } - }, - "additionalProperties": false, - "required": [ - "data" - ], - "title": "ListRoutesResponse", - "description": "Response containing a list of all available API routes." - }, - "ListToolDefsResponse": { - "type": "object", - "properties": { - "data": { - "type": "array", - "items": { - "$ref": "#/components/schemas/ToolDef" - }, - "description": "List of tool definitions" - } - }, - "additionalProperties": false, - "required": [ - "data" - ], - "title": "ListToolDefsResponse", - "description": "Response containing a list of tool definitions." - }, - "ListScoringFunctionsResponse": { - "type": "object", - "properties": { - "data": { - "type": "array", - "items": { - "$ref": "#/components/schemas/ScoringFn" - } - } - }, - "additionalProperties": false, - "required": [ - "data" - ], - "title": "ListScoringFunctionsResponse" - }, - "ListShieldsResponse": { - "type": "object", - "properties": { - "data": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Shield" - } - } - }, - "additionalProperties": false, - "required": [ - "data" - ], - "title": "ListShieldsResponse" - }, - "ListToolGroupsResponse": { - "type": "object", - "properties": { - "data": { - "type": "array", - "items": { - "$ref": "#/components/schemas/ToolGroup" - }, - "description": "List of tool groups" - } - }, - "additionalProperties": false, - "required": [ - "data" - ], - "title": "ListToolGroupsResponse", - "description": "Response containing a list of tool groups." - }, - "ListToolsResponse": { - "type": "object", - "properties": { - "data": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Tool" - }, - "description": "List of tools" - } - }, - "additionalProperties": false, - "required": [ - "data" - ], - "title": "ListToolsResponse", - "description": "Response containing a list of tools." - }, - "ListVectorDBsResponse": { - "type": "object", - "properties": { - "data": { - "type": "array", - "items": { - "$ref": "#/components/schemas/VectorDB" - }, - "description": "List of vector databases" - } - }, - "additionalProperties": false, - "required": [ - "data" - ], - "title": "ListVectorDBsResponse", - "description": "Response from listing vector databases." + "title": "SyntheticDataGenerationResponse", + "description": "Response from the synthetic data generation. Batch of (prompt, response, score) tuples that pass the threshold." }, "Event": { "oneOf": [ @@ -14616,6 +9888,15 @@ "title": "SpanStartPayload", "description": "Payload for a span start event." }, + "SpanStatus": { + "type": "string", + "enum": [ + "ok", + "error" + ], + "title": "SpanStatus", + "description": "The status of a span indicating whether it completed successfully or with an error." + }, "StructuredLogEvent": { "type": "object", "properties": { @@ -14785,92 +10066,14 @@ ], "title": "LogEventRequest" }, - "VectorStoreChunkingStrategy": { - "oneOf": [ - { - "$ref": "#/components/schemas/VectorStoreChunkingStrategyAuto" - }, - { - "$ref": "#/components/schemas/VectorStoreChunkingStrategyStatic" - } - ], - "discriminator": { - "propertyName": "type", - "mapping": { - "auto": "#/components/schemas/VectorStoreChunkingStrategyAuto", - "static": "#/components/schemas/VectorStoreChunkingStrategyStatic" - } - } - }, - "VectorStoreChunkingStrategyAuto": { + "InvokeToolRequest": { "type": "object", "properties": { - "type": { + "tool_name": { "type": "string", - "const": "auto", - "default": "auto", - "description": "Strategy type, always \"auto\" for automatic chunking" - } - }, - "additionalProperties": false, - "required": [ - "type" - ], - "title": "VectorStoreChunkingStrategyAuto", - "description": "Automatic chunking strategy for vector store files." - }, - "VectorStoreChunkingStrategyStatic": { - "type": "object", - "properties": { - "type": { - "type": "string", - "const": "static", - "default": "static", - "description": "Strategy type, always \"static\" for static chunking" + "description": "The name of the tool to invoke." }, - "static": { - "$ref": "#/components/schemas/VectorStoreChunkingStrategyStaticConfig", - "description": "Configuration parameters for the static chunking strategy" - } - }, - "additionalProperties": false, - "required": [ - "type", - "static" - ], - "title": "VectorStoreChunkingStrategyStatic", - "description": "Static chunking strategy with configurable parameters." - }, - "VectorStoreChunkingStrategyStaticConfig": { - "type": "object", - "properties": { - "chunk_overlap_tokens": { - "type": "integer", - "default": 400, - "description": "Number of tokens to overlap between adjacent chunks" - }, - "max_chunk_size_tokens": { - "type": "integer", - "default": 800, - "description": "Maximum number of tokens per chunk, must be between 100 and 4096" - } - }, - "additionalProperties": false, - "required": [ - "chunk_overlap_tokens", - "max_chunk_size_tokens" - ], - "title": "VectorStoreChunkingStrategyStaticConfig", - "description": "Configuration for static chunking strategy." - }, - "OpenaiAttachFileToVectorStoreRequest": { - "type": "object", - "properties": { - "file_id": { - "type": "string", - "description": "The ID of the file to attach to the vector store." - }, - "attributes": { + "kwargs": { "type": "object", "additionalProperties": { "oneOf": [ @@ -14894,61 +10097,32 @@ } ] }, - "description": "The key-value attributes stored with the file, which can be used for filtering." - }, - "chunking_strategy": { - "$ref": "#/components/schemas/VectorStoreChunkingStrategy", - "description": "The chunking strategy to use for the file." + "description": "A dictionary of arguments to pass to the tool." } }, "additionalProperties": false, "required": [ - "file_id" + "tool_name", + "kwargs" ], - "title": "OpenaiAttachFileToVectorStoreRequest" + "title": "InvokeToolRequest" }, - "VectorStoreFileLastError": { + "ToolInvocationResult": { "type": "object", "properties": { - "code": { - "oneOf": [ - { - "type": "string", - "const": "server_error" - }, - { - "type": "string", - "const": "rate_limit_exceeded" - } - ], - "description": "Error code indicating the type of failure" + "content": { + "$ref": "#/components/schemas/InterleavedContent", + "description": "(Optional) The output content from the tool execution" }, - "message": { + "error_message": { "type": "string", - "description": "Human-readable error message describing the failure" - } - }, - "additionalProperties": false, - "required": [ - "code", - "message" - ], - "title": "VectorStoreFileLastError", - "description": "Error information for failed vector store file processing." - }, - "VectorStoreFileObject": { - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "Unique identifier for the file" + "description": "(Optional) Error message if the tool execution failed" }, - "object": { - "type": "string", - "default": "vector_store.file", - "description": "Object type identifier, always \"vector_store.file\"" + "error_code": { + "type": "integer", + "description": "(Optional) Numeric error code if the tool execution failed" }, - "attributes": { + "metadata": { "type": "object", "additionalProperties": { "oneOf": [ @@ -14972,174 +10146,32 @@ } ] }, - "description": "Key-value attributes associated with the file" - }, - "chunking_strategy": { - "oneOf": [ - { - "$ref": "#/components/schemas/VectorStoreChunkingStrategyAuto" - }, - { - "$ref": "#/components/schemas/VectorStoreChunkingStrategyStatic" - } - ], - "discriminator": { - "propertyName": "type", - "mapping": { - "auto": "#/components/schemas/VectorStoreChunkingStrategyAuto", - "static": "#/components/schemas/VectorStoreChunkingStrategyStatic" - } - }, - "description": "Strategy used for splitting the file into chunks" - }, - "created_at": { - "type": "integer", - "description": "Timestamp when the file was added to the vector store" - }, - "last_error": { - "$ref": "#/components/schemas/VectorStoreFileLastError", - "description": "(Optional) Error information if file processing failed" - }, - "status": { - "$ref": "#/components/schemas/VectorStoreFileStatus", - "description": "Current processing status of the file" - }, - "usage_bytes": { - "type": "integer", - "default": 0, - "description": "Storage space used by this file in bytes" - }, - "vector_store_id": { - "type": "string", - "description": "ID of the vector store containing this file" + "description": "(Optional) Additional metadata about the tool execution" } }, "additionalProperties": false, - "required": [ - "id", - "object", - "attributes", - "chunking_strategy", - "created_at", - "status", - "usage_bytes", - "vector_store_id" - ], - "title": "VectorStoreFileObject", - "description": "OpenAI Vector Store File object." + "title": "ToolInvocationResult", + "description": "Result of a tool invocation." }, - "VectorStoreFileStatus": { - "oneOf": [ - { - "type": "string", - "const": "completed" - }, - { - "type": "string", - "const": "in_progress" - }, - { - "type": "string", - "const": "cancelled" - }, - { - "type": "string", - "const": "failed" - } - ] - }, - "VectorStoreFileBatchObject": { - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "Unique identifier for the file batch" - }, - "object": { - "type": "string", - "default": "vector_store.file_batch", - "description": "Object type identifier, always \"vector_store.file_batch\"" - }, - "created_at": { - "type": "integer", - "description": "Timestamp when the file batch was created" - }, - "vector_store_id": { - "type": "string", - "description": "ID of the vector store containing the file batch" - }, - "status": { - "$ref": "#/components/schemas/VectorStoreFileStatus", - "description": "Current processing status of the file batch" - }, - "file_counts": { - "$ref": "#/components/schemas/VectorStoreFileCounts", - "description": "File processing status counts for the batch" - } - }, - "additionalProperties": false, - "required": [ - "id", - "object", - "created_at", - "vector_store_id", - "status", - "file_counts" - ], - "title": "VectorStoreFileBatchObject", - "description": "OpenAI Vector Store File Batch object." - }, - "VectorStoreFileCounts": { - "type": "object", - "properties": { - "completed": { - "type": "integer", - "description": "Number of files that have been successfully processed" - }, - "cancelled": { - "type": "integer", - "description": "Number of files that had their processing cancelled" - }, - "failed": { - "type": "integer", - "description": "Number of files that failed to process" - }, - "in_progress": { - "type": "integer", - "description": "Number of files currently being processed" - }, - "total": { - "type": "integer", - "description": "Total number of files in the vector store" - } - }, - "additionalProperties": false, - "required": [ - "completed", - "cancelled", - "failed", - "in_progress", - "total" - ], - "title": "VectorStoreFileCounts", - "description": "File processing status counts for a vector store." - }, - "OpenAIJSONSchema": { + "ToolDef": { "type": "object", "properties": { "name": { "type": "string", - "description": "Name of the schema" + "description": "Name of the tool" }, "description": { "type": "string", - "description": "(Optional) Description of the schema" + "description": "(Optional) Human-readable description of what the tool does" }, - "strict": { - "type": "boolean", - "description": "(Optional) Whether to enforce strict adherence to the schema" + "parameters": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ToolParameter" + }, + "description": "(Optional) List of parameters this tool accepts" }, - "schema": { + "metadata": { "type": "object", "additionalProperties": { "oneOf": [ @@ -15163,755 +10195,126 @@ } ] }, - "description": "(Optional) The JSON schema definition" + "description": "(Optional) Additional metadata about the tool" } }, "additionalProperties": false, "required": [ "name" ], - "title": "OpenAIJSONSchema", - "description": "JSON schema specification for OpenAI-compatible structured response format." + "title": "ToolDef", + "description": "Tool definition used in runtime contexts." }, - "OpenAIResponseFormatJSONObject": { + "ToolParameter": { "type": "object", "properties": { - "type": { + "name": { "type": "string", - "const": "json_object", - "default": "json_object", - "description": "Must be \"json_object\" to indicate generic JSON object response format" - } - }, - "additionalProperties": false, - "required": [ - "type" - ], - "title": "OpenAIResponseFormatJSONObject", - "description": "JSON object response format for OpenAI-compatible chat completion requests." - }, - "OpenAIResponseFormatJSONSchema": { - "type": "object", - "properties": { - "type": { + "description": "Name of the parameter" + }, + "parameter_type": { "type": "string", - "const": "json_schema", - "default": "json_schema", - "description": "Must be \"json_schema\" to indicate structured JSON response format" + "description": "Type of the parameter (e.g., string, integer)" }, - "json_schema": { - "$ref": "#/components/schemas/OpenAIJSONSchema", - "description": "The JSON schema specification for the response" - } - }, - "additionalProperties": false, - "required": [ - "type", - "json_schema" - ], - "title": "OpenAIResponseFormatJSONSchema", - "description": "JSON schema response format for OpenAI-compatible chat completion requests." - }, - "OpenAIResponseFormatParam": { - "oneOf": [ - { - "$ref": "#/components/schemas/OpenAIResponseFormatText" - }, - { - "$ref": "#/components/schemas/OpenAIResponseFormatJSONSchema" - }, - { - "$ref": "#/components/schemas/OpenAIResponseFormatJSONObject" - } - ], - "discriminator": { - "propertyName": "type", - "mapping": { - "text": "#/components/schemas/OpenAIResponseFormatText", - "json_schema": "#/components/schemas/OpenAIResponseFormatJSONSchema", - "json_object": "#/components/schemas/OpenAIResponseFormatJSONObject" - } - } - }, - "OpenAIResponseFormatText": { - "type": "object", - "properties": { - "type": { + "description": { "type": "string", - "const": "text", - "default": "text", - "description": "Must be \"text\" to indicate plain text response format" - } - }, - "additionalProperties": false, - "required": [ - "type" - ], - "title": "OpenAIResponseFormatText", - "description": "Text response format for OpenAI-compatible chat completion requests." - }, - "OpenaiChatCompletionRequest": { - "type": "object", - "properties": { - "model": { - "type": "string", - "description": "The identifier of the model to use. The model must be registered with Llama Stack and available via the /models endpoint." + "description": "Human-readable description of what the parameter does" }, - "messages": { - "type": "array", - "items": { - "$ref": "#/components/schemas/OpenAIMessageParam" - }, - "description": "List of messages in the conversation." + "required": { + "type": "boolean", + "default": true, + "description": "Whether this parameter is required for tool invocation" }, - "frequency_penalty": { - "type": "number", - "description": "(Optional) The penalty for repeated tokens." - }, - "function_call": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - } - } - ], - "description": "(Optional) The function call to use." - }, - "functions": { - "type": "array", - "items": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - } - }, - "description": "(Optional) List of functions to use." - }, - "logit_bias": { + "items": { "type": "object", - "additionalProperties": { - "type": "number" - }, - "description": "(Optional) The logit bias to use." + "description": "Type of the elements when parameter_type is array" }, - "logprobs": { - "type": "boolean", - "description": "(Optional) The log probabilities to use." + "title": { + "type": "string", + "description": "(Optional) Title of the parameter" }, - "max_completion_tokens": { - "type": "integer", - "description": "(Optional) The maximum number of tokens to generate." - }, - "max_tokens": { - "type": "integer", - "description": "(Optional) The maximum number of tokens to generate." - }, - "n": { - "type": "integer", - "description": "(Optional) The number of completions to generate." - }, - "parallel_tool_calls": { - "type": "boolean", - "description": "(Optional) Whether to parallelize tool calls." - }, - "presence_penalty": { - "type": "number", - "description": "(Optional) The penalty for repeated tokens." - }, - "response_format": { - "$ref": "#/components/schemas/OpenAIResponseFormatParam", - "description": "(Optional) The response format to use." - }, - "seed": { - "type": "integer", - "description": "(Optional) The seed to use." - }, - "stop": { + "default": { "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, { "type": "string" }, { - "type": "array", - "items": { - "type": "string" - } - } - ], - "description": "(Optional) The stop tokens to use." - }, - "stream": { - "type": "boolean", - "description": "(Optional) Whether to stream the response." - }, - "stream_options": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "(Optional) The stream options to use." - }, - "temperature": { - "type": "number", - "description": "(Optional) The temperature to use." - }, - "tool_choice": { - "oneOf": [ - { - "type": "string" + "type": "array" }, { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - } + "type": "object" } ], - "description": "(Optional) The tool choice to use." - }, - "tools": { - "type": "array", - "items": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - } - }, - "description": "(Optional) The tools to use." - }, - "top_logprobs": { - "type": "integer", - "description": "(Optional) The top log probabilities to use." - }, - "top_p": { - "type": "number", - "description": "(Optional) The top p to use." - }, - "user": { - "type": "string", - "description": "(Optional) The user to use." + "description": "(Optional) Default value for the parameter if not provided" } }, "additionalProperties": false, "required": [ - "model", - "messages" + "name", + "parameter_type", + "description", + "required" ], - "title": "OpenaiChatCompletionRequest" + "title": "ToolParameter", + "description": "Parameter definition for a tool." }, - "OpenAIChatCompletion": { + "ListToolDefsResponse": { "type": "object", "properties": { - "id": { - "type": "string", - "description": "The ID of the chat completion" - }, - "choices": { + "data": { "type": "array", "items": { - "$ref": "#/components/schemas/OpenAIChoice" + "$ref": "#/components/schemas/ToolDef" }, - "description": "List of choices" - }, - "object": { - "type": "string", - "const": "chat.completion", - "default": "chat.completion", - "description": "The object type, which will be \"chat.completion\"" - }, - "created": { - "type": "integer", - "description": "The Unix timestamp in seconds when the chat completion was created" - }, - "model": { - "type": "string", - "description": "The model that was used to generate the chat completion" + "description": "List of tool definitions" } }, "additionalProperties": false, "required": [ - "id", - "choices", - "object", - "created", - "model" + "data" ], - "title": "OpenAIChatCompletion", - "description": "Response from an OpenAI-compatible chat completion request." + "title": "ListToolDefsResponse", + "description": "Response containing a list of tool definitions." }, - "OpenAIChatCompletionChunk": { + "RAGDocument": { "type": "object", "properties": { - "id": { + "document_id": { "type": "string", - "description": "The ID of the chat completion" + "description": "The unique identifier for the document." }, - "choices": { - "type": "array", - "items": { - "$ref": "#/components/schemas/OpenAIChunkChoice" - }, - "description": "List of choices" - }, - "object": { - "type": "string", - "const": "chat.completion.chunk", - "default": "chat.completion.chunk", - "description": "The object type, which will be \"chat.completion.chunk\"" - }, - "created": { - "type": "integer", - "description": "The Unix timestamp in seconds when the chat completion was created" - }, - "model": { - "type": "string", - "description": "The model that was used to generate the chat completion" - } - }, - "additionalProperties": false, - "required": [ - "id", - "choices", - "object", - "created", - "model" - ], - "title": "OpenAIChatCompletionChunk", - "description": "Chunk from a streaming response to an OpenAI-compatible chat completion request." - }, - "OpenAIChoiceDelta": { - "type": "object", - "properties": { "content": { - "type": "string", - "description": "(Optional) The content of the delta" - }, - "refusal": { - "type": "string", - "description": "(Optional) The refusal of the delta" - }, - "role": { - "type": "string", - "description": "(Optional) The role of the delta" - }, - "tool_calls": { - "type": "array", - "items": { - "$ref": "#/components/schemas/OpenAIChatCompletionToolCall" - }, - "description": "(Optional) The tool calls of the delta" - } - }, - "additionalProperties": false, - "title": "OpenAIChoiceDelta", - "description": "A delta from an OpenAI-compatible chat completion streaming response." - }, - "OpenAIChunkChoice": { - "type": "object", - "properties": { - "delta": { - "$ref": "#/components/schemas/OpenAIChoiceDelta", - "description": "The delta from the chunk" - }, - "finish_reason": { - "type": "string", - "description": "The reason the model stopped generating" - }, - "index": { - "type": "integer", - "description": "The index of the choice" - }, - "logprobs": { - "$ref": "#/components/schemas/OpenAIChoiceLogprobs", - "description": "(Optional) The log probabilities for the tokens in the message" - } - }, - "additionalProperties": false, - "required": [ - "delta", - "finish_reason", - "index" - ], - "title": "OpenAIChunkChoice", - "description": "A chunk choice from an OpenAI-compatible chat completion streaming response." - }, - "OpenaiCompletionRequest": { - "type": "object", - "properties": { - "model": { - "type": "string", - "description": "The identifier of the model to use. The model must be registered with Llama Stack and available via the /models endpoint." - }, - "prompt": { "oneOf": [ { "type": "string" }, { - "type": "array", - "items": { - "type": "string" - } + "$ref": "#/components/schemas/InterleavedContentItem" }, { "type": "array", "items": { - "type": "integer" + "$ref": "#/components/schemas/InterleavedContentItem" } }, { - "type": "array", - "items": { - "type": "array", - "items": { - "type": "integer" - } - } + "$ref": "#/components/schemas/URL" } ], - "description": "The prompt to generate a completion for." + "description": "The content of the document." }, - "best_of": { - "type": "integer", - "description": "(Optional) The number of completions to generate." - }, - "echo": { - "type": "boolean", - "description": "(Optional) Whether to echo the prompt." - }, - "frequency_penalty": { - "type": "number", - "description": "(Optional) The penalty for repeated tokens." - }, - "logit_bias": { - "type": "object", - "additionalProperties": { - "type": "number" - }, - "description": "(Optional) The logit bias to use." - }, - "logprobs": { - "type": "boolean", - "description": "(Optional) The log probabilities to use." - }, - "max_tokens": { - "type": "integer", - "description": "(Optional) The maximum number of tokens to generate." - }, - "n": { - "type": "integer", - "description": "(Optional) The number of completions to generate." - }, - "presence_penalty": { - "type": "number", - "description": "(Optional) The penalty for repeated tokens." - }, - "seed": { - "type": "integer", - "description": "(Optional) The seed to use." - }, - "stop": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "array", - "items": { - "type": "string" - } - } - ], - "description": "(Optional) The stop tokens to use." - }, - "stream": { - "type": "boolean", - "description": "(Optional) Whether to stream the response." - }, - "stream_options": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "(Optional) The stream options to use." - }, - "temperature": { - "type": "number", - "description": "(Optional) The temperature to use." - }, - "top_p": { - "type": "number", - "description": "(Optional) The top p to use." - }, - "user": { + "mime_type": { "type": "string", - "description": "(Optional) The user to use." - }, - "guided_choice": { - "type": "array", - "items": { - "type": "string" - } - }, - "prompt_logprobs": { - "type": "integer" - }, - "suffix": { - "type": "string", - "description": "(Optional) The suffix that should be appended to the completion." - } - }, - "additionalProperties": false, - "required": [ - "model", - "prompt" - ], - "title": "OpenaiCompletionRequest" - }, - "OpenAICompletion": { - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "choices": { - "type": "array", - "items": { - "$ref": "#/components/schemas/OpenAICompletionChoice" - } - }, - "created": { - "type": "integer" - }, - "model": { - "type": "string" - }, - "object": { - "type": "string", - "const": "text_completion", - "default": "text_completion" - } - }, - "additionalProperties": false, - "required": [ - "id", - "choices", - "created", - "model", - "object" - ], - "title": "OpenAICompletion", - "description": "Response from an OpenAI-compatible completion request." - }, - "OpenAICompletionChoice": { - "type": "object", - "properties": { - "finish_reason": { - "type": "string" - }, - "text": { - "type": "string" - }, - "index": { - "type": "integer" - }, - "logprobs": { - "$ref": "#/components/schemas/OpenAIChoiceLogprobs" - } - }, - "additionalProperties": false, - "required": [ - "finish_reason", - "text", - "index" - ], - "title": "OpenAICompletionChoice", - "description": "A choice from an OpenAI-compatible completion response." - }, - "OpenaiCreateVectorStoreRequest": { - "type": "object", - "properties": { - "name": { - "type": "string", - "description": "A name for the vector store." - }, - "file_ids": { - "type": "array", - "items": { - "type": "string" - }, - "description": "A list of File IDs that the vector store should use. Useful for tools like `file_search` that can access files." - }, - "expires_after": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "The expiration policy for a vector store." - }, - "chunking_strategy": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy." + "description": "The MIME type of the document." }, "metadata": { "type": "object", @@ -15937,1268 +10340,44 @@ } ] }, - "description": "Set of 16 key-value pairs that can be attached to an object." - }, - "embedding_model": { - "type": "string", - "description": "The embedding model to use for this vector store." - }, - "embedding_dimension": { - "type": "integer", - "description": "The dimension of the embedding vectors (default: 384)." - }, - "provider_id": { - "type": "string", - "description": "The ID of the provider to use for this vector store." - } - }, - "additionalProperties": false, - "title": "OpenaiCreateVectorStoreRequest" - }, - "VectorStoreObject": { - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "Unique identifier for the vector store" - }, - "object": { - "type": "string", - "default": "vector_store", - "description": "Object type identifier, always \"vector_store\"" - }, - "created_at": { - "type": "integer", - "description": "Timestamp when the vector store was created" - }, - "name": { - "type": "string", - "description": "(Optional) Name of the vector store" - }, - "usage_bytes": { - "type": "integer", - "default": 0, - "description": "Storage space used by the vector store in bytes" - }, - "file_counts": { - "$ref": "#/components/schemas/VectorStoreFileCounts", - "description": "File processing status counts for the vector store" - }, - "status": { - "type": "string", - "default": "completed", - "description": "Current status of the vector store" - }, - "expires_after": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "(Optional) Expiration policy for the vector store" - }, - "expires_at": { - "type": "integer", - "description": "(Optional) Timestamp when the vector store will expire" - }, - "last_active_at": { - "type": "integer", - "description": "(Optional) Timestamp of last activity on the vector store" - }, - "metadata": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "Set of key-value pairs that can be attached to the vector store" + "description": "Additional metadata for the document." } }, "additionalProperties": false, "required": [ - "id", - "object", - "created_at", - "usage_bytes", - "file_counts", - "status", + "document_id", + "content", "metadata" ], - "title": "VectorStoreObject", - "description": "OpenAI Vector Store object." + "title": "RAGDocument", + "description": "A document to be used for document ingestion in the RAG Tool." }, - "OpenaiCreateVectorStoreFileBatchRequest": { + "InsertRequest": { "type": "object", "properties": { - "file_ids": { + "documents": { "type": "array", "items": { - "type": "string" + "$ref": "#/components/schemas/RAGDocument" }, - "description": "A list of File IDs that the vector store should use." + "description": "List of documents to index in the RAG system" }, - "attributes": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "(Optional) Key-value attributes to store with the files." - }, - "chunking_strategy": { - "$ref": "#/components/schemas/VectorStoreChunkingStrategy", - "description": "(Optional) The chunking strategy used to chunk the file(s). Defaults to auto." - } - }, - "additionalProperties": false, - "required": [ - "file_ids" - ], - "title": "OpenaiCreateVectorStoreFileBatchRequest" - }, - "OpenAIFileDeleteResponse": { - "type": "object", - "properties": { - "id": { + "vector_db_id": { "type": "string", - "description": "The file identifier that was deleted" + "description": "ID of the vector database to store the document embeddings" }, - "object": { - "type": "string", - "const": "file", - "default": "file", - "description": "The object type, which is always \"file\"" - }, - "deleted": { - "type": "boolean", - "description": "Whether the file was successfully deleted" - } - }, - "additionalProperties": false, - "required": [ - "id", - "object", - "deleted" - ], - "title": "OpenAIFileDeleteResponse", - "description": "Response for deleting a file in OpenAI Files API." - }, - "VectorStoreDeleteResponse": { - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "Unique identifier of the deleted vector store" - }, - "object": { - "type": "string", - "default": "vector_store.deleted", - "description": "Object type identifier for the deletion response" - }, - "deleted": { - "type": "boolean", - "default": true, - "description": "Whether the deletion operation was successful" - } - }, - "additionalProperties": false, - "required": [ - "id", - "object", - "deleted" - ], - "title": "VectorStoreDeleteResponse", - "description": "Response from deleting a vector store." - }, - "VectorStoreFileDeleteResponse": { - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "Unique identifier of the deleted file" - }, - "object": { - "type": "string", - "default": "vector_store.file.deleted", - "description": "Object type identifier for the deletion response" - }, - "deleted": { - "type": "boolean", - "default": true, - "description": "Whether the deletion operation was successful" - } - }, - "additionalProperties": false, - "required": [ - "id", - "object", - "deleted" - ], - "title": "VectorStoreFileDeleteResponse", - "description": "Response from deleting a vector store file." - }, - "OpenaiEmbeddingsRequest": { - "type": "object", - "properties": { - "model": { - "type": "string", - "description": "The identifier of the model to use. The model must be an embedding model registered with Llama Stack and available via the /models endpoint." - }, - "input": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "array", - "items": { - "type": "string" - } - } - ], - "description": "Input text to embed, encoded as a string or array of strings. To embed multiple inputs in a single request, pass an array of strings." - }, - "encoding_format": { - "type": "string", - "description": "(Optional) The format to return the embeddings in. Can be either \"float\" or \"base64\". Defaults to \"float\"." - }, - "dimensions": { + "chunk_size_in_tokens": { "type": "integer", - "description": "(Optional) The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3 and later models." - }, - "user": { - "type": "string", - "description": "(Optional) A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse." + "description": "(Optional) Size in tokens for document chunking during indexing" } }, "additionalProperties": false, "required": [ - "model", - "input" + "documents", + "vector_db_id", + "chunk_size_in_tokens" ], - "title": "OpenaiEmbeddingsRequest" - }, - "OpenAIEmbeddingData": { - "type": "object", - "properties": { - "object": { - "type": "string", - "const": "embedding", - "default": "embedding", - "description": "The object type, which will be \"embedding\"" - }, - "embedding": { - "oneOf": [ - { - "type": "array", - "items": { - "type": "number" - } - }, - { - "type": "string" - } - ], - "description": "The embedding vector as a list of floats (when encoding_format=\"float\") or as a base64-encoded string (when encoding_format=\"base64\")" - }, - "index": { - "type": "integer", - "description": "The index of the embedding in the input list" - } - }, - "additionalProperties": false, - "required": [ - "object", - "embedding", - "index" - ], - "title": "OpenAIEmbeddingData", - "description": "A single embedding data object from an OpenAI-compatible embeddings response." - }, - "OpenAIEmbeddingUsage": { - "type": "object", - "properties": { - "prompt_tokens": { - "type": "integer", - "description": "The number of tokens in the input" - }, - "total_tokens": { - "type": "integer", - "description": "The total number of tokens used" - } - }, - "additionalProperties": false, - "required": [ - "prompt_tokens", - "total_tokens" - ], - "title": "OpenAIEmbeddingUsage", - "description": "Usage information for an OpenAI-compatible embeddings response." - }, - "OpenAIEmbeddingsResponse": { - "type": "object", - "properties": { - "object": { - "type": "string", - "const": "list", - "default": "list", - "description": "The object type, which will be \"list\"" - }, - "data": { - "type": "array", - "items": { - "$ref": "#/components/schemas/OpenAIEmbeddingData" - }, - "description": "List of embedding data objects" - }, - "model": { - "type": "string", - "description": "The model that was used to generate the embeddings" - }, - "usage": { - "$ref": "#/components/schemas/OpenAIEmbeddingUsage", - "description": "Usage information" - } - }, - "additionalProperties": false, - "required": [ - "object", - "data", - "model", - "usage" - ], - "title": "OpenAIEmbeddingsResponse", - "description": "Response from an OpenAI-compatible embeddings request." - }, - "OpenAIFilePurpose": { - "type": "string", - "enum": [ - "assistants", - "batch" - ], - "title": "OpenAIFilePurpose", - "description": "Valid purpose values for OpenAI Files API." - }, - "ListOpenAIFileResponse": { - "type": "object", - "properties": { - "data": { - "type": "array", - "items": { - "$ref": "#/components/schemas/OpenAIFileObject" - }, - "description": "List of file objects" - }, - "has_more": { - "type": "boolean", - "description": "Whether there are more files available beyond this page" - }, - "first_id": { - "type": "string", - "description": "ID of the first file in the list for pagination" - }, - "last_id": { - "type": "string", - "description": "ID of the last file in the list for pagination" - }, - "object": { - "type": "string", - "const": "list", - "default": "list", - "description": "The object type, which is always \"list\"" - } - }, - "additionalProperties": false, - "required": [ - "data", - "has_more", - "first_id", - "last_id", - "object" - ], - "title": "ListOpenAIFileResponse", - "description": "Response for listing files in OpenAI Files API." - }, - "OpenAIFileObject": { - "type": "object", - "properties": { - "object": { - "type": "string", - "const": "file", - "default": "file", - "description": "The object type, which is always \"file\"" - }, - "id": { - "type": "string", - "description": "The file identifier, which can be referenced in the API endpoints" - }, - "bytes": { - "type": "integer", - "description": "The size of the file, in bytes" - }, - "created_at": { - "type": "integer", - "description": "The Unix timestamp (in seconds) for when the file was created" - }, - "expires_at": { - "type": "integer", - "description": "The Unix timestamp (in seconds) for when the file expires" - }, - "filename": { - "type": "string", - "description": "The name of the file" - }, - "purpose": { - "type": "string", - "enum": [ - "assistants", - "batch" - ], - "description": "The intended purpose of the file" - } - }, - "additionalProperties": false, - "required": [ - "object", - "id", - "bytes", - "created_at", - "expires_at", - "filename", - "purpose" - ], - "title": "OpenAIFileObject", - "description": "OpenAI File object as defined in the OpenAI Files API." - }, - "VectorStoreListFilesResponse": { - "type": "object", - "properties": { - "object": { - "type": "string", - "default": "list", - "description": "Object type identifier, always \"list\"" - }, - "data": { - "type": "array", - "items": { - "$ref": "#/components/schemas/VectorStoreFileObject" - }, - "description": "List of vector store file objects" - }, - "first_id": { - "type": "string", - "description": "(Optional) ID of the first file in the list for pagination" - }, - "last_id": { - "type": "string", - "description": "(Optional) ID of the last file in the list for pagination" - }, - "has_more": { - "type": "boolean", - "default": false, - "description": "Whether there are more files available beyond this page" - } - }, - "additionalProperties": false, - "required": [ - "object", - "data", - "has_more" - ], - "title": "VectorStoreListFilesResponse", - "description": "Response from listing files in a vector store." - }, - "VectorStoreFilesListInBatchResponse": { - "type": "object", - "properties": { - "object": { - "type": "string", - "default": "list", - "description": "Object type identifier, always \"list\"" - }, - "data": { - "type": "array", - "items": { - "$ref": "#/components/schemas/VectorStoreFileObject" - }, - "description": "List of vector store file objects in the batch" - }, - "first_id": { - "type": "string", - "description": "(Optional) ID of the first file in the list for pagination" - }, - "last_id": { - "type": "string", - "description": "(Optional) ID of the last file in the list for pagination" - }, - "has_more": { - "type": "boolean", - "default": false, - "description": "Whether there are more files available beyond this page" - } - }, - "additionalProperties": false, - "required": [ - "object", - "data", - "has_more" - ], - "title": "VectorStoreFilesListInBatchResponse", - "description": "Response from listing files in a vector store file batch." - }, - "VectorStoreListResponse": { - "type": "object", - "properties": { - "object": { - "type": "string", - "default": "list", - "description": "Object type identifier, always \"list\"" - }, - "data": { - "type": "array", - "items": { - "$ref": "#/components/schemas/VectorStoreObject" - }, - "description": "List of vector store objects" - }, - "first_id": { - "type": "string", - "description": "(Optional) ID of the first vector store in the list for pagination" - }, - "last_id": { - "type": "string", - "description": "(Optional) ID of the last vector store in the list for pagination" - }, - "has_more": { - "type": "boolean", - "default": false, - "description": "Whether there are more vector stores available beyond this page" - } - }, - "additionalProperties": false, - "required": [ - "object", - "data", - "has_more" - ], - "title": "VectorStoreListResponse", - "description": "Response from listing vector stores." - }, - "Response": { - "type": "object", - "title": "Response" - }, - "VectorStoreContent": { - "type": "object", - "properties": { - "type": { - "type": "string", - "const": "text", - "description": "Content type, currently only \"text\" is supported" - }, - "text": { - "type": "string", - "description": "The actual text content" - } - }, - "additionalProperties": false, - "required": [ - "type", - "text" - ], - "title": "VectorStoreContent", - "description": "Content item from a vector store file or search result." - }, - "VectorStoreFileContentsResponse": { - "type": "object", - "properties": { - "file_id": { - "type": "string", - "description": "Unique identifier for the file" - }, - "filename": { - "type": "string", - "description": "Name of the file" - }, - "attributes": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "Key-value attributes associated with the file" - }, - "content": { - "type": "array", - "items": { - "$ref": "#/components/schemas/VectorStoreContent" - }, - "description": "List of content items from the file" - } - }, - "additionalProperties": false, - "required": [ - "file_id", - "filename", - "attributes", - "content" - ], - "title": "VectorStoreFileContentsResponse", - "description": "Response from retrieving the contents of a vector store file." - }, - "OpenaiSearchVectorStoreRequest": { - "type": "object", - "properties": { - "query": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "array", - "items": { - "type": "string" - } - } - ], - "description": "The query string or array for performing the search." - }, - "filters": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "Filters based on file attributes to narrow the search results." - }, - "max_num_results": { - "type": "integer", - "description": "Maximum number of results to return (1 to 50 inclusive, default 10)." - }, - "ranking_options": { - "type": "object", - "properties": { - "ranker": { - "type": "string", - "description": "(Optional) Name of the ranking algorithm to use" - }, - "score_threshold": { - "type": "number", - "default": 0.0, - "description": "(Optional) Minimum relevance score threshold for results" - } - }, - "additionalProperties": false, - "description": "Ranking options for fine-tuning the search results." - }, - "rewrite_query": { - "type": "boolean", - "description": "Whether to rewrite the natural language query for vector search (default false)" - }, - "search_mode": { - "type": "string", - "description": "The search mode to use - \"keyword\", \"vector\", or \"hybrid\" (default \"vector\")" - } - }, - "additionalProperties": false, - "required": [ - "query" - ], - "title": "OpenaiSearchVectorStoreRequest" - }, - "VectorStoreSearchResponse": { - "type": "object", - "properties": { - "file_id": { - "type": "string", - "description": "Unique identifier of the file containing the result" - }, - "filename": { - "type": "string", - "description": "Name of the file containing the result" - }, - "score": { - "type": "number", - "description": "Relevance score for this search result" - }, - "attributes": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "number" - }, - { - "type": "boolean" - } - ] - }, - "description": "(Optional) Key-value attributes associated with the file" - }, - "content": { - "type": "array", - "items": { - "$ref": "#/components/schemas/VectorStoreContent" - }, - "description": "List of content items matching the search query" - } - }, - "additionalProperties": false, - "required": [ - "file_id", - "filename", - "score", - "content" - ], - "title": "VectorStoreSearchResponse", - "description": "Response from searching a vector store." - }, - "VectorStoreSearchResponsePage": { - "type": "object", - "properties": { - "object": { - "type": "string", - "default": "vector_store.search_results.page", - "description": "Object type identifier for the search results page" - }, - "search_query": { - "type": "string", - "description": "The original search query that was executed" - }, - "data": { - "type": "array", - "items": { - "$ref": "#/components/schemas/VectorStoreSearchResponse" - }, - "description": "List of search result objects" - }, - "has_more": { - "type": "boolean", - "default": false, - "description": "Whether there are more results available beyond this page" - }, - "next_page": { - "type": "string", - "description": "(Optional) Token for retrieving the next page of results" - } - }, - "additionalProperties": false, - "required": [ - "object", - "search_query", - "data", - "has_more" - ], - "title": "VectorStoreSearchResponsePage", - "description": "Paginated response from searching a vector store." - }, - "OpenaiUpdateVectorStoreRequest": { - "type": "object", - "properties": { - "name": { - "type": "string", - "description": "The name of the vector store." - }, - "expires_after": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "The expiration policy for a vector store." - }, - "metadata": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "Set of 16 key-value pairs that can be attached to an object." - } - }, - "additionalProperties": false, - "title": "OpenaiUpdateVectorStoreRequest" - }, - "OpenaiUpdateVectorStoreFileRequest": { - "type": "object", - "properties": { - "attributes": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "The updated key-value attributes to store with the file." - } - }, - "additionalProperties": false, - "required": [ - "attributes" - ], - "title": "OpenaiUpdateVectorStoreFileRequest" - }, - "ExpiresAfter": { - "type": "object", - "properties": { - "anchor": { - "type": "string", - "const": "created_at" - }, - "seconds": { - "type": "integer" - } - }, - "additionalProperties": false, - "required": [ - "anchor", - "seconds" - ], - "title": "ExpiresAfter", - "description": "Control expiration of uploaded files.\nParams:\n - anchor, must be \"created_at\"\n - seconds, must be int between 3600 and 2592000 (1 hour to 30 days)" - }, - "DPOAlignmentConfig": { - "type": "object", - "properties": { - "beta": { - "type": "number", - "description": "Temperature parameter for the DPO loss" - }, - "loss_type": { - "$ref": "#/components/schemas/DPOLossType", - "default": "sigmoid", - "description": "The type of loss function to use for DPO" - } - }, - "additionalProperties": false, - "required": [ - "beta", - "loss_type" - ], - "title": "DPOAlignmentConfig", - "description": "Configuration for Direct Preference Optimization (DPO) alignment." - }, - "DPOLossType": { - "type": "string", - "enum": [ - "sigmoid", - "hinge", - "ipo", - "kto_pair" - ], - "title": "DPOLossType" - }, - "DataConfig": { - "type": "object", - "properties": { - "dataset_id": { - "type": "string", - "description": "Unique identifier for the training dataset" - }, - "batch_size": { - "type": "integer", - "description": "Number of samples per training batch" - }, - "shuffle": { - "type": "boolean", - "description": "Whether to shuffle the dataset during training" - }, - "data_format": { - "$ref": "#/components/schemas/DatasetFormat", - "description": "Format of the dataset (instruct or dialog)" - }, - "validation_dataset_id": { - "type": "string", - "description": "(Optional) Unique identifier for the validation dataset" - }, - "packed": { - "type": "boolean", - "default": false, - "description": "(Optional) Whether to pack multiple samples into a single sequence for efficiency" - }, - "train_on_input": { - "type": "boolean", - "default": false, - "description": "(Optional) Whether to compute loss on input tokens as well as output tokens" - } - }, - "additionalProperties": false, - "required": [ - "dataset_id", - "batch_size", - "shuffle", - "data_format" - ], - "title": "DataConfig", - "description": "Configuration for training data and data loading." - }, - "DatasetFormat": { - "type": "string", - "enum": [ - "instruct", - "dialog" - ], - "title": "DatasetFormat", - "description": "Format of the training dataset." - }, - "EfficiencyConfig": { - "type": "object", - "properties": { - "enable_activation_checkpointing": { - "type": "boolean", - "default": false, - "description": "(Optional) Whether to use activation checkpointing to reduce memory usage" - }, - "enable_activation_offloading": { - "type": "boolean", - "default": false, - "description": "(Optional) Whether to offload activations to CPU to save GPU memory" - }, - "memory_efficient_fsdp_wrap": { - "type": "boolean", - "default": false, - "description": "(Optional) Whether to use memory-efficient FSDP wrapping" - }, - "fsdp_cpu_offload": { - "type": "boolean", - "default": false, - "description": "(Optional) Whether to offload FSDP parameters to CPU" - } - }, - "additionalProperties": false, - "title": "EfficiencyConfig", - "description": "Configuration for memory and compute efficiency optimizations." - }, - "OptimizerConfig": { - "type": "object", - "properties": { - "optimizer_type": { - "$ref": "#/components/schemas/OptimizerType", - "description": "Type of optimizer to use (adam, adamw, or sgd)" - }, - "lr": { - "type": "number", - "description": "Learning rate for the optimizer" - }, - "weight_decay": { - "type": "number", - "description": "Weight decay coefficient for regularization" - }, - "num_warmup_steps": { - "type": "integer", - "description": "Number of steps for learning rate warmup" - } - }, - "additionalProperties": false, - "required": [ - "optimizer_type", - "lr", - "weight_decay", - "num_warmup_steps" - ], - "title": "OptimizerConfig", - "description": "Configuration parameters for the optimization algorithm." - }, - "OptimizerType": { - "type": "string", - "enum": [ - "adam", - "adamw", - "sgd" - ], - "title": "OptimizerType", - "description": "Available optimizer algorithms for training." - }, - "TrainingConfig": { - "type": "object", - "properties": { - "n_epochs": { - "type": "integer", - "description": "Number of training epochs to run" - }, - "max_steps_per_epoch": { - "type": "integer", - "default": 1, - "description": "Maximum number of steps to run per epoch" - }, - "gradient_accumulation_steps": { - "type": "integer", - "default": 1, - "description": "Number of steps to accumulate gradients before updating" - }, - "max_validation_steps": { - "type": "integer", - "default": 1, - "description": "(Optional) Maximum number of validation steps per epoch" - }, - "data_config": { - "$ref": "#/components/schemas/DataConfig", - "description": "(Optional) Configuration for data loading and formatting" - }, - "optimizer_config": { - "$ref": "#/components/schemas/OptimizerConfig", - "description": "(Optional) Configuration for the optimization algorithm" - }, - "efficiency_config": { - "$ref": "#/components/schemas/EfficiencyConfig", - "description": "(Optional) Configuration for memory and compute optimizations" - }, - "dtype": { - "type": "string", - "default": "bf16", - "description": "(Optional) Data type for model parameters (bf16, fp16, fp32)" - } - }, - "additionalProperties": false, - "required": [ - "n_epochs", - "max_steps_per_epoch", - "gradient_accumulation_steps" - ], - "title": "TrainingConfig", - "description": "Comprehensive configuration for the training process." - }, - "PreferenceOptimizeRequest": { - "type": "object", - "properties": { - "job_uuid": { - "type": "string", - "description": "The UUID of the job to create." - }, - "finetuned_model": { - "type": "string", - "description": "The model to fine-tune." - }, - "algorithm_config": { - "$ref": "#/components/schemas/DPOAlignmentConfig", - "description": "The algorithm configuration." - }, - "training_config": { - "$ref": "#/components/schemas/TrainingConfig", - "description": "The training configuration." - }, - "hyperparam_search_config": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "The hyperparam search configuration." - }, - "logger_config": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "The logger configuration." - } - }, - "additionalProperties": false, - "required": [ - "job_uuid", - "finetuned_model", - "algorithm_config", - "training_config", - "hyperparam_search_config", - "logger_config" - ], - "title": "PreferenceOptimizeRequest" - }, - "PostTrainingJob": { - "type": "object", - "properties": { - "job_uuid": { - "type": "string" - } - }, - "additionalProperties": false, - "required": [ - "job_uuid" - ], - "title": "PostTrainingJob" + "title": "InsertRequest" }, "DefaultRAGQueryGeneratorConfig": { "type": "object", @@ -17446,6 +10625,483 @@ "title": "RAGQueryResult", "description": "Result of a RAG query containing retrieved content and metadata." }, + "ToolGroup": { + "type": "object", + "properties": { + "identifier": { + "type": "string" + }, + "provider_resource_id": { + "type": "string" + }, + "provider_id": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "model", + "shield", + "vector_db", + "dataset", + "scoring_function", + "benchmark", + "tool", + "tool_group", + "prompt" + ], + "const": "tool_group", + "default": "tool_group", + "description": "Type of resource, always 'tool_group'" + }, + "mcp_endpoint": { + "$ref": "#/components/schemas/URL", + "description": "(Optional) Model Context Protocol endpoint for remote tools" + }, + "args": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + }, + "description": "(Optional) Additional arguments for the tool group" + } + }, + "additionalProperties": false, + "required": [ + "identifier", + "provider_id", + "type" + ], + "title": "ToolGroup", + "description": "A group of related tools managed together." + }, + "ListToolGroupsResponse": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ToolGroup" + }, + "description": "List of tool groups" + } + }, + "additionalProperties": false, + "required": [ + "data" + ], + "title": "ListToolGroupsResponse", + "description": "Response containing a list of tool groups." + }, + "RegisterToolGroupRequest": { + "type": "object", + "properties": { + "toolgroup_id": { + "type": "string", + "description": "The ID of the tool group to register." + }, + "provider_id": { + "type": "string", + "description": "The ID of the provider to use for the tool group." + }, + "mcp_endpoint": { + "$ref": "#/components/schemas/URL", + "description": "The MCP endpoint to use for the tool group." + }, + "args": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + }, + "description": "A dictionary of arguments to pass to the tool group." + } + }, + "additionalProperties": false, + "required": [ + "toolgroup_id", + "provider_id" + ], + "title": "RegisterToolGroupRequest" + }, + "Tool": { + "type": "object", + "properties": { + "identifier": { + "type": "string" + }, + "provider_resource_id": { + "type": "string" + }, + "provider_id": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "model", + "shield", + "vector_db", + "dataset", + "scoring_function", + "benchmark", + "tool", + "tool_group", + "prompt" + ], + "const": "tool", + "default": "tool", + "description": "Type of resource, always 'tool'" + }, + "toolgroup_id": { + "type": "string", + "description": "ID of the tool group this tool belongs to" + }, + "description": { + "type": "string", + "description": "Human-readable description of what the tool does" + }, + "parameters": { + "type": "array", + "items": { + "$ref": "#/components/schemas/ToolParameter" + }, + "description": "List of parameters this tool accepts" + }, + "metadata": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + }, + "description": "(Optional) Additional metadata about the tool" + } + }, + "additionalProperties": false, + "required": [ + "identifier", + "provider_id", + "type", + "toolgroup_id", + "description", + "parameters" + ], + "title": "Tool", + "description": "A tool that can be invoked by agents." + }, + "ListToolsResponse": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Tool" + }, + "description": "List of tools" + } + }, + "additionalProperties": false, + "required": [ + "data" + ], + "title": "ListToolsResponse", + "description": "Response containing a list of tools." + }, + "VectorDB": { + "type": "object", + "properties": { + "identifier": { + "type": "string" + }, + "provider_resource_id": { + "type": "string" + }, + "provider_id": { + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "model", + "shield", + "vector_db", + "dataset", + "scoring_function", + "benchmark", + "tool", + "tool_group", + "prompt" + ], + "const": "vector_db", + "default": "vector_db", + "description": "Type of resource, always 'vector_db' for vector databases" + }, + "embedding_model": { + "type": "string", + "description": "Name of the embedding model to use for vector generation" + }, + "embedding_dimension": { + "type": "integer", + "description": "Dimension of the embedding vectors" + }, + "vector_db_name": { + "type": "string" + } + }, + "additionalProperties": false, + "required": [ + "identifier", + "provider_id", + "type", + "embedding_model", + "embedding_dimension" + ], + "title": "VectorDB", + "description": "Vector database resource for storing and querying vector embeddings." + }, + "ListVectorDBsResponse": { + "type": "object", + "properties": { + "data": { + "type": "array", + "items": { + "$ref": "#/components/schemas/VectorDB" + }, + "description": "List of vector databases" + } + }, + "additionalProperties": false, + "required": [ + "data" + ], + "title": "ListVectorDBsResponse", + "description": "Response from listing vector databases." + }, + "RegisterVectorDbRequest": { + "type": "object", + "properties": { + "vector_db_id": { + "type": "string", + "description": "The identifier of the vector database to register." + }, + "embedding_model": { + "type": "string", + "description": "The embedding model to use." + }, + "embedding_dimension": { + "type": "integer", + "description": "The dimension of the embedding model." + }, + "provider_id": { + "type": "string", + "description": "The identifier of the provider." + }, + "vector_db_name": { + "type": "string", + "description": "The name of the vector database." + }, + "provider_vector_db_id": { + "type": "string", + "description": "The identifier of the vector database in the provider." + } + }, + "additionalProperties": false, + "required": [ + "vector_db_id", + "embedding_model" + ], + "title": "RegisterVectorDbRequest" + }, + "Chunk": { + "type": "object", + "properties": { + "content": { + "$ref": "#/components/schemas/InterleavedContent", + "description": "The content of the chunk, which can be interleaved text, images, or other types." + }, + "metadata": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + }, + "description": "Metadata associated with the chunk that will be used in the model context during inference." + }, + "embedding": { + "type": "array", + "items": { + "type": "number" + }, + "description": "Optional embedding for the chunk. If not provided, it will be computed later." + }, + "stored_chunk_id": { + "type": "string", + "description": "The chunk ID that is stored in the vector database. Used for backend functionality." + }, + "chunk_metadata": { + "$ref": "#/components/schemas/ChunkMetadata", + "description": "Metadata for the chunk that will NOT be used in the context during inference. The `chunk_metadata` is required backend functionality." + } + }, + "additionalProperties": false, + "required": [ + "content", + "metadata" + ], + "title": "Chunk", + "description": "A chunk of content that can be inserted into a vector database." + }, + "ChunkMetadata": { + "type": "object", + "properties": { + "chunk_id": { + "type": "string", + "description": "The ID of the chunk. If not set, it will be generated based on the document ID and content." + }, + "document_id": { + "type": "string", + "description": "The ID of the document this chunk belongs to." + }, + "source": { + "type": "string", + "description": "The source of the content, such as a URL, file path, or other identifier." + }, + "created_timestamp": { + "type": "integer", + "description": "An optional timestamp indicating when the chunk was created." + }, + "updated_timestamp": { + "type": "integer", + "description": "An optional timestamp indicating when the chunk was last updated." + }, + "chunk_window": { + "type": "string", + "description": "The window of the chunk, which can be used to group related chunks together." + }, + "chunk_tokenizer": { + "type": "string", + "description": "The tokenizer used to create the chunk. Default is Tiktoken." + }, + "chunk_embedding_model": { + "type": "string", + "description": "The embedding model used to create the chunk's embedding." + }, + "chunk_embedding_dimension": { + "type": "integer", + "description": "The dimension of the embedding vector for the chunk." + }, + "content_token_count": { + "type": "integer", + "description": "The number of tokens in the content of the chunk." + }, + "metadata_token_count": { + "type": "integer", + "description": "The number of tokens in the metadata of the chunk." + } + }, + "additionalProperties": false, + "title": "ChunkMetadata", + "description": "`ChunkMetadata` is backend metadata for a `Chunk` that is used to store additional information about the chunk that will not be used in the context during inference, but is required for backend functionality. The `ChunkMetadata` is set during chunk creation in `MemoryToolRuntimeImpl().insert()`and is not expected to change after. Use `Chunk.metadata` for metadata that will be used in the context during inference." + }, + "InsertChunksRequest": { + "type": "object", + "properties": { + "vector_db_id": { + "type": "string", + "description": "The identifier of the vector database to insert the chunks into." + }, + "chunks": { + "type": "array", + "items": { + "$ref": "#/components/schemas/Chunk" + }, + "description": "The chunks to insert. Each `Chunk` should contain content which can be interleaved text, images, or other types. `metadata`: `dict[str, Any]` and `embedding`: `List[float]` are optional. If `metadata` is provided, you configure how Llama Stack formats the chunk during generation. If `embedding` is not provided, it will be computed later." + }, + "ttl_seconds": { + "type": "integer", + "description": "The time to live of the chunks." + } + }, + "additionalProperties": false, + "required": [ + "vector_db_id", + "chunks" + ], + "title": "InsertChunksRequest" + }, "QueryChunksRequest": { "type": "object", "properties": { @@ -17517,340 +11173,252 @@ "title": "QueryChunksResponse", "description": "Response from querying chunks in a vector database." }, - "QueryMetricsRequest": { + "VectorStoreFileCounts": { "type": "object", "properties": { - "start_time": { + "completed": { "type": "integer", - "description": "The start time of the metric to query." + "description": "Number of files that have been successfully processed" }, - "end_time": { + "cancelled": { "type": "integer", - "description": "The end time of the metric to query." + "description": "Number of files that had their processing cancelled" }, - "granularity": { + "failed": { + "type": "integer", + "description": "Number of files that failed to process" + }, + "in_progress": { + "type": "integer", + "description": "Number of files currently being processed" + }, + "total": { + "type": "integer", + "description": "Total number of files in the vector store" + } + }, + "additionalProperties": false, + "required": [ + "completed", + "cancelled", + "failed", + "in_progress", + "total" + ], + "title": "VectorStoreFileCounts", + "description": "File processing status counts for a vector store." + }, + "VectorStoreListResponse": { + "type": "object", + "properties": { + "object": { "type": "string", - "description": "The granularity of the metric to query." + "default": "list", + "description": "Object type identifier, always \"list\"" }, - "query_type": { - "type": "string", - "enum": [ - "range", - "instant" - ], - "description": "The type of query to perform." - }, - "label_matchers": { + "data": { "type": "array", "items": { - "type": "object", - "properties": { - "name": { - "type": "string", - "description": "The name of the label to match" - }, - "value": { - "type": "string", - "description": "The value to match against" - }, - "operator": { - "type": "string", - "enum": [ - "=", - "!=", - "=~", - "!~" - ], - "description": "The comparison operator to use for matching", - "default": "=" - } - }, - "additionalProperties": false, - "required": [ - "name", - "value", - "operator" - ], - "title": "MetricLabelMatcher", - "description": "A matcher for filtering metrics by label values." + "$ref": "#/components/schemas/VectorStoreObject" }, - "description": "The label matchers to apply to the metric." + "description": "List of vector store objects" + }, + "first_id": { + "type": "string", + "description": "(Optional) ID of the first vector store in the list for pagination" + }, + "last_id": { + "type": "string", + "description": "(Optional) ID of the last vector store in the list for pagination" + }, + "has_more": { + "type": "boolean", + "default": false, + "description": "Whether there are more vector stores available beyond this page" } }, "additionalProperties": false, "required": [ - "start_time", - "query_type" + "object", + "data", + "has_more" ], - "title": "QueryMetricsRequest" + "title": "VectorStoreListResponse", + "description": "Response from listing vector stores." }, - "MetricDataPoint": { + "VectorStoreObject": { "type": "object", "properties": { - "timestamp": { + "id": { + "type": "string", + "description": "Unique identifier for the vector store" + }, + "object": { + "type": "string", + "default": "vector_store", + "description": "Object type identifier, always \"vector_store\"" + }, + "created_at": { "type": "integer", - "description": "Unix timestamp when the metric value was recorded" + "description": "Timestamp when the vector store was created" }, - "value": { - "type": "number", - "description": "The numeric value of the metric at this timestamp" + "name": { + "type": "string", + "description": "(Optional) Name of the vector store" }, - "unit": { - "type": "string" + "usage_bytes": { + "type": "integer", + "default": 0, + "description": "Storage space used by the vector store in bytes" + }, + "file_counts": { + "$ref": "#/components/schemas/VectorStoreFileCounts", + "description": "File processing status counts for the vector store" + }, + "status": { + "type": "string", + "default": "completed", + "description": "Current status of the vector store" + }, + "expires_after": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + }, + "description": "(Optional) Expiration policy for the vector store" + }, + "expires_at": { + "type": "integer", + "description": "(Optional) Timestamp when the vector store will expire" + }, + "last_active_at": { + "type": "integer", + "description": "(Optional) Timestamp of last activity on the vector store" + }, + "metadata": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + }, + "description": "Set of key-value pairs that can be attached to the vector store" } }, "additionalProperties": false, "required": [ - "timestamp", - "value", - "unit" + "id", + "object", + "created_at", + "usage_bytes", + "file_counts", + "status", + "metadata" ], - "title": "MetricDataPoint", - "description": "A single data point in a metric time series." + "title": "VectorStoreObject", + "description": "OpenAI Vector Store object." }, - "MetricLabel": { + "OpenaiCreateVectorStoreRequest": { "type": "object", "properties": { "name": { "type": "string", - "description": "The name of the label" + "description": "A name for the vector store." }, - "value": { - "type": "string", - "description": "The value of the label" - } - }, - "additionalProperties": false, - "required": [ - "name", - "value" - ], - "title": "MetricLabel", - "description": "A label associated with a metric." - }, - "MetricSeries": { - "type": "object", - "properties": { - "metric": { - "type": "string", - "description": "The name of the metric" - }, - "labels": { - "type": "array", - "items": { - "$ref": "#/components/schemas/MetricLabel" - }, - "description": "List of labels associated with this metric series" - }, - "values": { - "type": "array", - "items": { - "$ref": "#/components/schemas/MetricDataPoint" - }, - "description": "List of data points in chronological order" - } - }, - "additionalProperties": false, - "required": [ - "metric", - "labels", - "values" - ], - "title": "MetricSeries", - "description": "A time series of metric data points." - }, - "QueryMetricsResponse": { - "type": "object", - "properties": { - "data": { - "type": "array", - "items": { - "$ref": "#/components/schemas/MetricSeries" - }, - "description": "List of metric series matching the query criteria" - } - }, - "additionalProperties": false, - "required": [ - "data" - ], - "title": "QueryMetricsResponse", - "description": "Response containing metric time series data." - }, - "QueryCondition": { - "type": "object", - "properties": { - "key": { - "type": "string", - "description": "The attribute key to filter on" - }, - "op": { - "$ref": "#/components/schemas/QueryConditionOp", - "description": "The comparison operator to apply" - }, - "value": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ], - "description": "The value to compare against" - } - }, - "additionalProperties": false, - "required": [ - "key", - "op", - "value" - ], - "title": "QueryCondition", - "description": "A condition for filtering query results." - }, - "QueryConditionOp": { - "type": "string", - "enum": [ - "eq", - "ne", - "gt", - "lt" - ], - "title": "QueryConditionOp", - "description": "Comparison operators for query conditions." - }, - "QuerySpansRequest": { - "type": "object", - "properties": { - "attribute_filters": { - "type": "array", - "items": { - "$ref": "#/components/schemas/QueryCondition" - }, - "description": "The attribute filters to apply to the spans." - }, - "attributes_to_return": { + "file_ids": { "type": "array", "items": { "type": "string" }, - "description": "The attributes to return in the spans." + "description": "A list of File IDs that the vector store should use. Useful for tools like `file_search` that can access files." }, - "max_depth": { - "type": "integer", - "description": "The maximum depth of the tree." - } - }, - "additionalProperties": false, - "required": [ - "attribute_filters", - "attributes_to_return" - ], - "title": "QuerySpansRequest" - }, - "QuerySpansResponse": { - "type": "object", - "properties": { - "data": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Span" + "expires_after": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] }, - "description": "List of spans matching the query criteria" - } - }, - "additionalProperties": false, - "required": [ - "data" - ], - "title": "QuerySpansResponse", - "description": "Response containing a list of spans." - }, - "QueryTracesRequest": { - "type": "object", - "properties": { - "attribute_filters": { - "type": "array", - "items": { - "$ref": "#/components/schemas/QueryCondition" + "description": "The expiration policy for a vector store." + }, + "chunking_strategy": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] }, - "description": "The attribute filters to apply to the traces." - }, - "limit": { - "type": "integer", - "description": "The limit of traces to return." - }, - "offset": { - "type": "integer", - "description": "The offset of the traces to return." - }, - "order_by": { - "type": "array", - "items": { - "type": "string" - }, - "description": "The order by of the traces to return." - } - }, - "additionalProperties": false, - "title": "QueryTracesRequest" - }, - "QueryTracesResponse": { - "type": "object", - "properties": { - "data": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Trace" - }, - "description": "List of traces matching the query criteria" - } - }, - "additionalProperties": false, - "required": [ - "data" - ], - "title": "QueryTracesResponse", - "description": "Response containing a list of traces." - }, - "RegisterBenchmarkRequest": { - "type": "object", - "properties": { - "benchmark_id": { - "type": "string", - "description": "The ID of the benchmark to register." - }, - "dataset_id": { - "type": "string", - "description": "The ID of the dataset to use for the benchmark." - }, - "scoring_functions": { - "type": "array", - "items": { - "type": "string" - }, - "description": "The scoring functions to use for the benchmark." - }, - "provider_benchmark_id": { - "type": "string", - "description": "The ID of the provider benchmark to use for the benchmark." - }, - "provider_id": { - "type": "string", - "description": "The ID of the provider to use for the benchmark." + "description": "The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy." }, "metadata": { "type": "object", @@ -17876,485 +11444,684 @@ } ] }, - "description": "The metadata to use for the benchmark." - } - }, - "additionalProperties": false, - "required": [ - "benchmark_id", - "dataset_id", - "scoring_functions" - ], - "title": "RegisterBenchmarkRequest" - }, - "DataSource": { - "oneOf": [ - { - "$ref": "#/components/schemas/URIDataSource" - }, - { - "$ref": "#/components/schemas/RowsDataSource" - } - ], - "discriminator": { - "propertyName": "type", - "mapping": { - "uri": "#/components/schemas/URIDataSource", - "rows": "#/components/schemas/RowsDataSource" - } - } - }, - "RegisterDatasetRequest": { - "type": "object", - "properties": { - "purpose": { - "type": "string", - "enum": [ - "post-training/messages", - "eval/question-answer", - "eval/messages-answer" - ], - "description": "The purpose of the dataset. One of: - \"post-training/messages\": The dataset contains a messages column with list of messages for post-training. { \"messages\": [ {\"role\": \"user\", \"content\": \"Hello, world!\"}, {\"role\": \"assistant\", \"content\": \"Hello, world!\"}, ] } - \"eval/question-answer\": The dataset contains a question column and an answer column for evaluation. { \"question\": \"What is the capital of France?\", \"answer\": \"Paris\" } - \"eval/messages-answer\": The dataset contains a messages column with list of messages and an answer column for evaluation. { \"messages\": [ {\"role\": \"user\", \"content\": \"Hello, my name is John Doe.\"}, {\"role\": \"assistant\", \"content\": \"Hello, John Doe. How can I help you today?\"}, {\"role\": \"user\", \"content\": \"What's my name?\"}, ], \"answer\": \"John Doe\" }" - }, - "source": { - "$ref": "#/components/schemas/DataSource", - "description": "The data source of the dataset. Ensure that the data source schema is compatible with the purpose of the dataset. Examples: - { \"type\": \"uri\", \"uri\": \"https://mywebsite.com/mydata.jsonl\" } - { \"type\": \"uri\", \"uri\": \"lsfs://mydata.jsonl\" } - { \"type\": \"uri\", \"uri\": \"data:csv;base64,{base64_content}\" } - { \"type\": \"uri\", \"uri\": \"huggingface://llamastack/simpleqa?split=train\" } - { \"type\": \"rows\", \"rows\": [ { \"messages\": [ {\"role\": \"user\", \"content\": \"Hello, world!\"}, {\"role\": \"assistant\", \"content\": \"Hello, world!\"}, ] } ] }" - }, - "metadata": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "The metadata for the dataset. - E.g. {\"description\": \"My dataset\"}." - }, - "dataset_id": { - "type": "string", - "description": "The ID of the dataset. If not provided, an ID will be generated." - } - }, - "additionalProperties": false, - "required": [ - "purpose", - "source" - ], - "title": "RegisterDatasetRequest" - }, - "RegisterModelRequest": { - "type": "object", - "properties": { - "model_id": { - "type": "string", - "description": "The identifier of the model to register." - }, - "provider_model_id": { - "type": "string", - "description": "The identifier of the model in the provider." - }, - "provider_id": { - "type": "string", - "description": "The identifier of the provider." - }, - "metadata": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "Any additional metadata for this model." - }, - "model_type": { - "$ref": "#/components/schemas/ModelType", - "description": "The type of model to register." - } - }, - "additionalProperties": false, - "required": [ - "model_id" - ], - "title": "RegisterModelRequest" - }, - "ParamType": { - "oneOf": [ - { - "$ref": "#/components/schemas/StringType" - }, - { - "$ref": "#/components/schemas/NumberType" - }, - { - "$ref": "#/components/schemas/BooleanType" - }, - { - "$ref": "#/components/schemas/ArrayType" - }, - { - "$ref": "#/components/schemas/ObjectType" - }, - { - "$ref": "#/components/schemas/JsonType" - }, - { - "$ref": "#/components/schemas/UnionType" - }, - { - "$ref": "#/components/schemas/ChatCompletionInputType" - }, - { - "$ref": "#/components/schemas/CompletionInputType" - }, - { - "$ref": "#/components/schemas/AgentTurnInputType" - } - ], - "discriminator": { - "propertyName": "type", - "mapping": { - "string": "#/components/schemas/StringType", - "number": "#/components/schemas/NumberType", - "boolean": "#/components/schemas/BooleanType", - "array": "#/components/schemas/ArrayType", - "object": "#/components/schemas/ObjectType", - "json": "#/components/schemas/JsonType", - "union": "#/components/schemas/UnionType", - "chat_completion_input": "#/components/schemas/ChatCompletionInputType", - "completion_input": "#/components/schemas/CompletionInputType", - "agent_turn_input": "#/components/schemas/AgentTurnInputType" - } - } - }, - "RegisterScoringFunctionRequest": { - "type": "object", - "properties": { - "scoring_fn_id": { - "type": "string", - "description": "The ID of the scoring function to register." - }, - "description": { - "type": "string", - "description": "The description of the scoring function." - }, - "return_type": { - "$ref": "#/components/schemas/ParamType", - "description": "The return type of the scoring function." - }, - "provider_scoring_fn_id": { - "type": "string", - "description": "The ID of the provider scoring function to use for the scoring function." - }, - "provider_id": { - "type": "string", - "description": "The ID of the provider to use for the scoring function." - }, - "params": { - "$ref": "#/components/schemas/ScoringFnParams", - "description": "The parameters for the scoring function for benchmark eval, these can be overridden for app eval." - } - }, - "additionalProperties": false, - "required": [ - "scoring_fn_id", - "description", - "return_type" - ], - "title": "RegisterScoringFunctionRequest" - }, - "RegisterShieldRequest": { - "type": "object", - "properties": { - "shield_id": { - "type": "string", - "description": "The identifier of the shield to register." - }, - "provider_shield_id": { - "type": "string", - "description": "The identifier of the shield in the provider." - }, - "provider_id": { - "type": "string", - "description": "The identifier of the provider." - }, - "params": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "The parameters of the shield." - } - }, - "additionalProperties": false, - "required": [ - "shield_id" - ], - "title": "RegisterShieldRequest" - }, - "RegisterToolGroupRequest": { - "type": "object", - "properties": { - "toolgroup_id": { - "type": "string", - "description": "The ID of the tool group to register." - }, - "provider_id": { - "type": "string", - "description": "The ID of the provider to use for the tool group." - }, - "mcp_endpoint": { - "$ref": "#/components/schemas/URL", - "description": "The MCP endpoint to use for the tool group." - }, - "args": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "A dictionary of arguments to pass to the tool group." - } - }, - "additionalProperties": false, - "required": [ - "toolgroup_id", - "provider_id" - ], - "title": "RegisterToolGroupRequest" - }, - "RegisterVectorDbRequest": { - "type": "object", - "properties": { - "vector_db_id": { - "type": "string", - "description": "The identifier of the vector database to register." + "description": "Set of 16 key-value pairs that can be attached to an object." }, "embedding_model": { "type": "string", - "description": "The embedding model to use." + "description": "The embedding model to use for this vector store." }, "embedding_dimension": { "type": "integer", - "description": "The dimension of the embedding model." + "description": "The dimension of the embedding vectors (default: 384)." }, "provider_id": { "type": "string", - "description": "The identifier of the provider." - }, - "vector_db_name": { - "type": "string", - "description": "The name of the vector database." - }, - "provider_vector_db_id": { - "type": "string", - "description": "The identifier of the vector database in the provider." + "description": "The ID of the provider to use for this vector store." } }, "additionalProperties": false, - "required": [ - "vector_db_id", - "embedding_model" - ], - "title": "RegisterVectorDbRequest" + "title": "OpenaiCreateVectorStoreRequest" }, - "RerankRequest": { + "OpenaiUpdateVectorStoreRequest": { "type": "object", "properties": { - "model": { + "name": { "type": "string", - "description": "The identifier of the reranking model to use." + "description": "The name of the vector store." }, - "query": { - "oneOf": [ - { - "type": "string" - }, - { - "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" - }, - { - "$ref": "#/components/schemas/OpenAIChatCompletionContentPartImageParam" - } - ], - "description": "The search query to rank items against. Can be a string, text content part, or image content part. The input must not exceed the model's max input token length." - }, - "items": { - "type": "array", - "items": { + "expires_after": { + "type": "object", + "additionalProperties": { "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, { "type": "string" }, { - "$ref": "#/components/schemas/OpenAIChatCompletionContentPartTextParam" + "type": "array" }, { - "$ref": "#/components/schemas/OpenAIChatCompletionContentPartImageParam" + "type": "object" } ] }, - "description": "List of items to rerank. Each item can be a string, text content part, or image content part. Each input must not exceed the model's max input token length." + "description": "The expiration policy for a vector store." }, - "max_num_results": { - "type": "integer", - "description": "(Optional) Maximum number of results to return. Default: returns all." + "metadata": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + }, + "description": "Set of 16 key-value pairs that can be attached to an object." + } + }, + "additionalProperties": false, + "title": "OpenaiUpdateVectorStoreRequest" + }, + "VectorStoreDeleteResponse": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "Unique identifier of the deleted vector store" + }, + "object": { + "type": "string", + "default": "vector_store.deleted", + "description": "Object type identifier for the deletion response" + }, + "deleted": { + "type": "boolean", + "default": true, + "description": "Whether the deletion operation was successful" } }, "additionalProperties": false, "required": [ - "model", - "query", - "items" + "id", + "object", + "deleted" ], - "title": "RerankRequest" + "title": "VectorStoreDeleteResponse", + "description": "Response from deleting a vector store." }, - "RerankData": { + "VectorStoreChunkingStrategy": { + "oneOf": [ + { + "$ref": "#/components/schemas/VectorStoreChunkingStrategyAuto" + }, + { + "$ref": "#/components/schemas/VectorStoreChunkingStrategyStatic" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "auto": "#/components/schemas/VectorStoreChunkingStrategyAuto", + "static": "#/components/schemas/VectorStoreChunkingStrategyStatic" + } + } + }, + "VectorStoreChunkingStrategyAuto": { "type": "object", "properties": { - "index": { - "type": "integer", - "description": "The original index of the document in the input list" - }, - "relevance_score": { - "type": "number", - "description": "The relevance score from the model output. Values are inverted when applicable so that higher scores indicate greater relevance." + "type": { + "type": "string", + "const": "auto", + "default": "auto", + "description": "Strategy type, always \"auto\" for automatic chunking" } }, "additionalProperties": false, "required": [ - "index", - "relevance_score" + "type" ], - "title": "RerankData", - "description": "A single rerank result from a reranking response." + "title": "VectorStoreChunkingStrategyAuto", + "description": "Automatic chunking strategy for vector store files." }, - "RerankResponse": { + "VectorStoreChunkingStrategyStatic": { "type": "object", "properties": { + "type": { + "type": "string", + "const": "static", + "default": "static", + "description": "Strategy type, always \"static\" for static chunking" + }, + "static": { + "$ref": "#/components/schemas/VectorStoreChunkingStrategyStaticConfig", + "description": "Configuration parameters for the static chunking strategy" + } + }, + "additionalProperties": false, + "required": [ + "type", + "static" + ], + "title": "VectorStoreChunkingStrategyStatic", + "description": "Static chunking strategy with configurable parameters." + }, + "VectorStoreChunkingStrategyStaticConfig": { + "type": "object", + "properties": { + "chunk_overlap_tokens": { + "type": "integer", + "default": 400, + "description": "Number of tokens to overlap between adjacent chunks" + }, + "max_chunk_size_tokens": { + "type": "integer", + "default": 800, + "description": "Maximum number of tokens per chunk, must be between 100 and 4096" + } + }, + "additionalProperties": false, + "required": [ + "chunk_overlap_tokens", + "max_chunk_size_tokens" + ], + "title": "VectorStoreChunkingStrategyStaticConfig", + "description": "Configuration for static chunking strategy." + }, + "OpenaiCreateVectorStoreFileBatchRequest": { + "type": "object", + "properties": { + "file_ids": { + "type": "array", + "items": { + "type": "string" + }, + "description": "A list of File IDs that the vector store should use." + }, + "attributes": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + }, + "description": "(Optional) Key-value attributes to store with the files." + }, + "chunking_strategy": { + "$ref": "#/components/schemas/VectorStoreChunkingStrategy", + "description": "(Optional) The chunking strategy used to chunk the file(s). Defaults to auto." + } + }, + "additionalProperties": false, + "required": [ + "file_ids" + ], + "title": "OpenaiCreateVectorStoreFileBatchRequest" + }, + "VectorStoreFileBatchObject": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "Unique identifier for the file batch" + }, + "object": { + "type": "string", + "default": "vector_store.file_batch", + "description": "Object type identifier, always \"vector_store.file_batch\"" + }, + "created_at": { + "type": "integer", + "description": "Timestamp when the file batch was created" + }, + "vector_store_id": { + "type": "string", + "description": "ID of the vector store containing the file batch" + }, + "status": { + "$ref": "#/components/schemas/VectorStoreFileStatus", + "description": "Current processing status of the file batch" + }, + "file_counts": { + "$ref": "#/components/schemas/VectorStoreFileCounts", + "description": "File processing status counts for the batch" + } + }, + "additionalProperties": false, + "required": [ + "id", + "object", + "created_at", + "vector_store_id", + "status", + "file_counts" + ], + "title": "VectorStoreFileBatchObject", + "description": "OpenAI Vector Store File Batch object." + }, + "VectorStoreFileStatus": { + "oneOf": [ + { + "type": "string", + "const": "completed" + }, + { + "type": "string", + "const": "in_progress" + }, + { + "type": "string", + "const": "cancelled" + }, + { + "type": "string", + "const": "failed" + } + ] + }, + "VectorStoreFileLastError": { + "type": "object", + "properties": { + "code": { + "oneOf": [ + { + "type": "string", + "const": "server_error" + }, + { + "type": "string", + "const": "rate_limit_exceeded" + } + ], + "description": "Error code indicating the type of failure" + }, + "message": { + "type": "string", + "description": "Human-readable error message describing the failure" + } + }, + "additionalProperties": false, + "required": [ + "code", + "message" + ], + "title": "VectorStoreFileLastError", + "description": "Error information for failed vector store file processing." + }, + "VectorStoreFileObject": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "Unique identifier for the file" + }, + "object": { + "type": "string", + "default": "vector_store.file", + "description": "Object type identifier, always \"vector_store.file\"" + }, + "attributes": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + }, + "description": "Key-value attributes associated with the file" + }, + "chunking_strategy": { + "oneOf": [ + { + "$ref": "#/components/schemas/VectorStoreChunkingStrategyAuto" + }, + { + "$ref": "#/components/schemas/VectorStoreChunkingStrategyStatic" + } + ], + "discriminator": { + "propertyName": "type", + "mapping": { + "auto": "#/components/schemas/VectorStoreChunkingStrategyAuto", + "static": "#/components/schemas/VectorStoreChunkingStrategyStatic" + } + }, + "description": "Strategy used for splitting the file into chunks" + }, + "created_at": { + "type": "integer", + "description": "Timestamp when the file was added to the vector store" + }, + "last_error": { + "$ref": "#/components/schemas/VectorStoreFileLastError", + "description": "(Optional) Error information if file processing failed" + }, + "status": { + "$ref": "#/components/schemas/VectorStoreFileStatus", + "description": "Current processing status of the file" + }, + "usage_bytes": { + "type": "integer", + "default": 0, + "description": "Storage space used by this file in bytes" + }, + "vector_store_id": { + "type": "string", + "description": "ID of the vector store containing this file" + } + }, + "additionalProperties": false, + "required": [ + "id", + "object", + "attributes", + "chunking_strategy", + "created_at", + "status", + "usage_bytes", + "vector_store_id" + ], + "title": "VectorStoreFileObject", + "description": "OpenAI Vector Store File object." + }, + "VectorStoreFilesListInBatchResponse": { + "type": "object", + "properties": { + "object": { + "type": "string", + "default": "list", + "description": "Object type identifier, always \"list\"" + }, "data": { "type": "array", "items": { - "$ref": "#/components/schemas/RerankData" + "$ref": "#/components/schemas/VectorStoreFileObject" }, - "description": "List of rerank result objects, sorted by relevance score (descending)" + "description": "List of vector store file objects in the batch" + }, + "first_id": { + "type": "string", + "description": "(Optional) ID of the first file in the list for pagination" + }, + "last_id": { + "type": "string", + "description": "(Optional) ID of the last file in the list for pagination" + }, + "has_more": { + "type": "boolean", + "default": false, + "description": "Whether there are more files available beyond this page" } }, "additionalProperties": false, "required": [ - "data" + "object", + "data", + "has_more" ], - "title": "RerankResponse", - "description": "Response from a reranking request." + "title": "VectorStoreFilesListInBatchResponse", + "description": "Response from listing files in a vector store file batch." }, - "ResumeAgentTurnRequest": { + "VectorStoreListFilesResponse": { "type": "object", "properties": { - "tool_responses": { + "object": { + "type": "string", + "default": "list", + "description": "Object type identifier, always \"list\"" + }, + "data": { "type": "array", "items": { - "$ref": "#/components/schemas/ToolResponse" + "$ref": "#/components/schemas/VectorStoreFileObject" }, - "description": "The tool call responses to resume the turn with." + "description": "List of vector store file objects" }, - "stream": { + "first_id": { + "type": "string", + "description": "(Optional) ID of the first file in the list for pagination" + }, + "last_id": { + "type": "string", + "description": "(Optional) ID of the last file in the list for pagination" + }, + "has_more": { "type": "boolean", - "description": "Whether to stream the response." + "default": false, + "description": "Whether there are more files available beyond this page" } }, "additionalProperties": false, "required": [ - "tool_responses" + "object", + "data", + "has_more" ], - "title": "ResumeAgentTurnRequest" + "title": "VectorStoreListFilesResponse", + "description": "Response from listing files in a vector store." }, - "RunEvalRequest": { + "OpenaiAttachFileToVectorStoreRequest": { "type": "object", "properties": { - "benchmark_config": { - "$ref": "#/components/schemas/BenchmarkConfig", - "description": "The configuration for the benchmark." + "file_id": { + "type": "string", + "description": "The ID of the file to attach to the vector store." + }, + "attributes": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + }, + "description": "The key-value attributes stored with the file, which can be used for filtering." + }, + "chunking_strategy": { + "$ref": "#/components/schemas/VectorStoreChunkingStrategy", + "description": "The chunking strategy to use for the file." } }, "additionalProperties": false, "required": [ - "benchmark_config" + "file_id" ], - "title": "RunEvalRequest" + "title": "OpenaiAttachFileToVectorStoreRequest" }, - "RunModerationRequest": { + "OpenaiUpdateVectorStoreFileRequest": { "type": "object", "properties": { - "input": { + "attributes": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + }, + "description": "The updated key-value attributes to store with the file." + } + }, + "additionalProperties": false, + "required": [ + "attributes" + ], + "title": "OpenaiUpdateVectorStoreFileRequest" + }, + "VectorStoreFileDeleteResponse": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "Unique identifier of the deleted file" + }, + "object": { + "type": "string", + "default": "vector_store.file.deleted", + "description": "Object type identifier for the deletion response" + }, + "deleted": { + "type": "boolean", + "default": true, + "description": "Whether the deletion operation was successful" + } + }, + "additionalProperties": false, + "required": [ + "id", + "object", + "deleted" + ], + "title": "VectorStoreFileDeleteResponse", + "description": "Response from deleting a vector store file." + }, + "VectorStoreContent": { + "type": "object", + "properties": { + "type": { + "type": "string", + "const": "text", + "description": "Content type, currently only \"text\" is supported" + }, + "text": { + "type": "string", + "description": "The actual text content" + } + }, + "additionalProperties": false, + "required": [ + "type", + "text" + ], + "title": "VectorStoreContent", + "description": "Content item from a vector store file or search result." + }, + "VectorStoreFileContentsResponse": { + "type": "object", + "properties": { + "file_id": { + "type": "string", + "description": "Unique identifier for the file" + }, + "filename": { + "type": "string", + "description": "Name of the file" + }, + "attributes": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "array" + }, + { + "type": "object" + } + ] + }, + "description": "Key-value attributes associated with the file" + }, + "content": { + "type": "array", + "items": { + "$ref": "#/components/schemas/VectorStoreContent" + }, + "description": "List of content items from the file" + } + }, + "additionalProperties": false, + "required": [ + "file_id", + "filename", + "attributes", + "content" + ], + "title": "VectorStoreFileContentsResponse", + "description": "Response from retrieving the contents of a vector store file." + }, + "OpenaiSearchVectorStoreRequest": { + "type": "object", + "properties": { + "query": { "oneOf": [ { "type": "string" @@ -18366,156 +12133,9 @@ } } ], - "description": "Input (or inputs) to classify. Can be a single string, an array of strings, or an array of multi-modal input objects similar to other models." + "description": "The query string or array for performing the search." }, - "model": { - "type": "string", - "description": "The content moderation model you would like to use." - } - }, - "additionalProperties": false, - "required": [ - "input", - "model" - ], - "title": "RunModerationRequest" - }, - "ModerationObject": { - "type": "object", - "properties": { - "id": { - "type": "string", - "description": "The unique identifier for the moderation request." - }, - "model": { - "type": "string", - "description": "The model used to generate the moderation results." - }, - "results": { - "type": "array", - "items": { - "$ref": "#/components/schemas/ModerationObjectResults" - }, - "description": "A list of moderation objects" - } - }, - "additionalProperties": false, - "required": [ - "id", - "model", - "results" - ], - "title": "ModerationObject", - "description": "A moderation object." - }, - "ModerationObjectResults": { - "type": "object", - "properties": { - "flagged": { - "type": "boolean", - "description": "Whether any of the below categories are flagged." - }, - "categories": { - "type": "object", - "additionalProperties": { - "type": "boolean" - }, - "description": "A list of the categories, and whether they are flagged or not." - }, - "category_applied_input_types": { - "type": "object", - "additionalProperties": { - "type": "array", - "items": { - "type": "string" - } - }, - "description": "A list of the categories along with the input type(s) that the score applies to." - }, - "category_scores": { - "type": "object", - "additionalProperties": { - "type": "number" - }, - "description": "A list of the categories along with their scores as predicted by model." - }, - "user_message": { - "type": "string" - }, - "metadata": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - } - } - }, - "additionalProperties": false, - "required": [ - "flagged", - "metadata" - ], - "title": "ModerationObjectResults", - "description": "A moderation object." - }, - "Message": { - "oneOf": [ - { - "$ref": "#/components/schemas/UserMessage" - }, - { - "$ref": "#/components/schemas/SystemMessage" - }, - { - "$ref": "#/components/schemas/ToolResponseMessage" - }, - { - "$ref": "#/components/schemas/CompletionMessage" - } - ], - "discriminator": { - "propertyName": "role", - "mapping": { - "user": "#/components/schemas/UserMessage", - "system": "#/components/schemas/SystemMessage", - "tool": "#/components/schemas/ToolResponseMessage", - "assistant": "#/components/schemas/CompletionMessage" - } - } - }, - "RunShieldRequest": { - "type": "object", - "properties": { - "shield_id": { - "type": "string", - "description": "The identifier of the shield to run." - }, - "messages": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Message" - }, - "description": "The messages to run the shield on." - }, - "params": { + "filters": { "type": "object", "additionalProperties": { "oneOf": [ @@ -18539,519 +12159,131 @@ } ] }, - "description": "The parameters of the shield." - } - }, - "additionalProperties": false, - "required": [ - "shield_id", - "messages", - "params" - ], - "title": "RunShieldRequest" - }, - "RunShieldResponse": { - "type": "object", - "properties": { - "violation": { - "$ref": "#/components/schemas/SafetyViolation", - "description": "(Optional) Safety violation detected by the shield, if any" - } - }, - "additionalProperties": false, - "title": "RunShieldResponse", - "description": "Response from running a safety shield." - }, - "SaveSpansToDatasetRequest": { - "type": "object", - "properties": { - "attribute_filters": { - "type": "array", - "items": { - "$ref": "#/components/schemas/QueryCondition" - }, - "description": "The attribute filters to apply to the spans." + "description": "Filters based on file attributes to narrow the search results." }, - "attributes_to_save": { - "type": "array", - "items": { - "type": "string" - }, - "description": "The attributes to save to the dataset." - }, - "dataset_id": { - "type": "string", - "description": "The ID of the dataset to save the spans to." - }, - "max_depth": { + "max_num_results": { "type": "integer", - "description": "The maximum depth of the tree." - } - }, - "additionalProperties": false, - "required": [ - "attribute_filters", - "attributes_to_save", - "dataset_id" - ], - "title": "SaveSpansToDatasetRequest" - }, - "ScoreRequest": { - "type": "object", - "properties": { - "input_rows": { - "type": "array", - "items": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] + "description": "Maximum number of results to return (1 to 50 inclusive, default 10)." + }, + "ranking_options": { + "type": "object", + "properties": { + "ranker": { + "type": "string", + "description": "(Optional) Name of the ranking algorithm to use" + }, + "score_threshold": { + "type": "number", + "default": 0.0, + "description": "(Optional) Minimum relevance score threshold for results" } }, - "description": "The rows to score." + "additionalProperties": false, + "description": "Ranking options for fine-tuning the search results." }, - "scoring_functions": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "$ref": "#/components/schemas/ScoringFnParams" - }, - { - "type": "null" - } - ] - }, - "description": "The scoring functions to use for the scoring." - } - }, - "additionalProperties": false, - "required": [ - "input_rows", - "scoring_functions" - ], - "title": "ScoreRequest" - }, - "ScoreResponse": { - "type": "object", - "properties": { - "results": { - "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/ScoringResult" - }, - "description": "A map of scoring function name to ScoringResult." - } - }, - "additionalProperties": false, - "required": [ - "results" - ], - "title": "ScoreResponse", - "description": "The response from scoring." - }, - "ScoreBatchRequest": { - "type": "object", - "properties": { - "dataset_id": { - "type": "string", - "description": "The ID of the dataset to score." - }, - "scoring_functions": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "$ref": "#/components/schemas/ScoringFnParams" - }, - { - "type": "null" - } - ] - }, - "description": "The scoring functions to use for the scoring." - }, - "save_results_dataset": { + "rewrite_query": { "type": "boolean", - "description": "Whether to save the results to a dataset." + "description": "Whether to rewrite the natural language query for vector search (default false)" + }, + "search_mode": { + "type": "string", + "description": "The search mode to use - \"keyword\", \"vector\", or \"hybrid\" (default \"vector\")" } }, "additionalProperties": false, "required": [ - "dataset_id", - "scoring_functions", - "save_results_dataset" + "query" ], - "title": "ScoreBatchRequest" + "title": "OpenaiSearchVectorStoreRequest" }, - "ScoreBatchResponse": { + "VectorStoreSearchResponse": { "type": "object", "properties": { - "dataset_id": { + "file_id": { "type": "string", - "description": "(Optional) The identifier of the dataset that was scored" + "description": "Unique identifier of the file containing the result" }, - "results": { + "filename": { + "type": "string", + "description": "Name of the file containing the result" + }, + "score": { + "type": "number", + "description": "Relevance score for this search result" + }, + "attributes": { "type": "object", "additionalProperties": { - "$ref": "#/components/schemas/ScoringResult" + "oneOf": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "boolean" + } + ] }, - "description": "A map of scoring function name to ScoringResult" - } - }, - "additionalProperties": false, - "required": [ - "results" - ], - "title": "ScoreBatchResponse", - "description": "Response from batch scoring operations on datasets." - }, - "SetDefaultVersionRequest": { - "type": "object", - "properties": { - "version": { - "type": "integer", - "description": "The version to set as default." - } - }, - "additionalProperties": false, - "required": [ - "version" - ], - "title": "SetDefaultVersionRequest" - }, - "AlgorithmConfig": { - "oneOf": [ - { - "$ref": "#/components/schemas/LoraFinetuningConfig" + "description": "(Optional) Key-value attributes associated with the file" }, - { - "$ref": "#/components/schemas/QATFinetuningConfig" - } - ], - "discriminator": { - "propertyName": "type", - "mapping": { - "LoRA": "#/components/schemas/LoraFinetuningConfig", - "QAT": "#/components/schemas/QATFinetuningConfig" - } - } - }, - "LoraFinetuningConfig": { - "type": "object", - "properties": { - "type": { - "type": "string", - "const": "LoRA", - "default": "LoRA", - "description": "Algorithm type identifier, always \"LoRA\"" - }, - "lora_attn_modules": { + "content": { "type": "array", "items": { - "type": "string" + "$ref": "#/components/schemas/VectorStoreContent" }, - "description": "List of attention module names to apply LoRA to" + "description": "List of content items matching the search query" + } + }, + "additionalProperties": false, + "required": [ + "file_id", + "filename", + "score", + "content" + ], + "title": "VectorStoreSearchResponse", + "description": "Response from searching a vector store." + }, + "VectorStoreSearchResponsePage": { + "type": "object", + "properties": { + "object": { + "type": "string", + "default": "vector_store.search_results.page", + "description": "Object type identifier for the search results page" }, - "apply_lora_to_mlp": { - "type": "boolean", - "description": "Whether to apply LoRA to MLP layers" + "search_query": { + "type": "string", + "description": "The original search query that was executed" }, - "apply_lora_to_output": { - "type": "boolean", - "description": "Whether to apply LoRA to output projection layers" + "data": { + "type": "array", + "items": { + "$ref": "#/components/schemas/VectorStoreSearchResponse" + }, + "description": "List of search result objects" }, - "rank": { - "type": "integer", - "description": "Rank of the LoRA adaptation (lower rank = fewer parameters)" - }, - "alpha": { - "type": "integer", - "description": "LoRA scaling parameter that controls adaptation strength" - }, - "use_dora": { + "has_more": { "type": "boolean", "default": false, - "description": "(Optional) Whether to use DoRA (Weight-Decomposed Low-Rank Adaptation)" + "description": "Whether there are more results available beyond this page" }, - "quantize_base": { - "type": "boolean", - "default": false, - "description": "(Optional) Whether to quantize the base model weights" + "next_page": { + "type": "string", + "description": "(Optional) Token for retrieving the next page of results" } }, "additionalProperties": false, "required": [ - "type", - "lora_attn_modules", - "apply_lora_to_mlp", - "apply_lora_to_output", - "rank", - "alpha" + "object", + "search_query", + "data", + "has_more" ], - "title": "LoraFinetuningConfig", - "description": "Configuration for Low-Rank Adaptation (LoRA) fine-tuning." - }, - "QATFinetuningConfig": { - "type": "object", - "properties": { - "type": { - "type": "string", - "const": "QAT", - "default": "QAT", - "description": "Algorithm type identifier, always \"QAT\"" - }, - "quantizer_name": { - "type": "string", - "description": "Name of the quantization algorithm to use" - }, - "group_size": { - "type": "integer", - "description": "Size of groups for grouped quantization" - } - }, - "additionalProperties": false, - "required": [ - "type", - "quantizer_name", - "group_size" - ], - "title": "QATFinetuningConfig", - "description": "Configuration for Quantization-Aware Training (QAT) fine-tuning." - }, - "SupervisedFineTuneRequest": { - "type": "object", - "properties": { - "job_uuid": { - "type": "string", - "description": "The UUID of the job to create." - }, - "training_config": { - "$ref": "#/components/schemas/TrainingConfig", - "description": "The training configuration." - }, - "hyperparam_search_config": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "The hyperparam search configuration." - }, - "logger_config": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "The logger configuration." - }, - "model": { - "type": "string", - "description": "The model to fine-tune." - }, - "checkpoint_dir": { - "type": "string", - "description": "The directory to save checkpoint(s) to." - }, - "algorithm_config": { - "$ref": "#/components/schemas/AlgorithmConfig", - "description": "The algorithm configuration." - } - }, - "additionalProperties": false, - "required": [ - "job_uuid", - "training_config", - "hyperparam_search_config", - "logger_config" - ], - "title": "SupervisedFineTuneRequest" - }, - "SyntheticDataGenerateRequest": { - "type": "object", - "properties": { - "dialogs": { - "type": "array", - "items": { - "$ref": "#/components/schemas/Message" - }, - "description": "List of conversation messages to use as input for synthetic data generation" - }, - "filtering_function": { - "type": "string", - "enum": [ - "none", - "random", - "top_k", - "top_p", - "top_k_top_p", - "sigmoid" - ], - "description": "Type of filtering to apply to generated synthetic data samples" - }, - "model": { - "type": "string", - "description": "(Optional) The identifier of the model to use. The model must be registered with Llama Stack and available via the /models endpoint" - } - }, - "additionalProperties": false, - "required": [ - "dialogs", - "filtering_function" - ], - "title": "SyntheticDataGenerateRequest" - }, - "SyntheticDataGenerationResponse": { - "type": "object", - "properties": { - "synthetic_data": { - "type": "array", - "items": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - } - }, - "description": "List of generated synthetic data samples that passed the filtering criteria" - }, - "statistics": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "type": "null" - }, - { - "type": "boolean" - }, - { - "type": "number" - }, - { - "type": "string" - }, - { - "type": "array" - }, - { - "type": "object" - } - ] - }, - "description": "(Optional) Statistical information about the generation process and filtering results" - } - }, - "additionalProperties": false, - "required": [ - "synthetic_data" - ], - "title": "SyntheticDataGenerationResponse", - "description": "Response from the synthetic data generation. Batch of (prompt, response, score) tuples that pass the threshold." - }, - "UpdatePromptRequest": { - "type": "object", - "properties": { - "prompt": { - "type": "string", - "description": "The updated prompt text content." - }, - "version": { - "type": "integer", - "description": "The current version of the prompt being updated." - }, - "variables": { - "type": "array", - "items": { - "type": "string" - }, - "description": "Updated list of variable names that can be used in the prompt template." - }, - "set_as_default": { - "type": "boolean", - "description": "Set the new version as the default (default=True)." - } - }, - "additionalProperties": false, - "required": [ - "prompt", - "version", - "set_as_default" - ], - "title": "UpdatePromptRequest" + "title": "VectorStoreSearchResponsePage", + "description": "Paginated response from searching a vector store." }, "VersionInfo": { "type": "object", @@ -19140,24 +12372,12 @@ "tags": [ { "name": "Agents", - "description": "Main functionalities provided by this API:\n- Create agents with specific instructions and ability to use tools.\n- Interactions with agents are grouped into sessions (\"threads\"), and each interaction is called a \"turn\".\n- Agents can be provided with various tools (see the ToolGroups and ToolRuntime APIs for more details).\n- Agents can be provided with various shields (see the Safety API for more details).\n- Agents can also use Memory to retrieve information from knowledge bases. See the RAG Tool and Vector IO APIs for more details.", - "x-displayName": "Agents API for creating and interacting with agentic systems." + "description": "APIs for creating and interacting with agentic systems.\n\n## Responses API\n\nThe Responses API provides OpenAI-compatible functionality with enhanced capabilities for dynamic, stateful interactions.\n\n> **✅ STABLE**: This API is production-ready with backward compatibility guarantees. Recommended for production applications.\n\n### ✅ Supported Tools\n\nThe Responses API supports the following tool types:\n\n- **`web_search`**: Search the web for current information and real-time data\n- **`file_search`**: Search through uploaded files and vector stores\n - Supports dynamic `vector_store_ids` per call\n - Compatible with OpenAI file search patterns\n- **`function`**: Call custom functions with JSON schema validation\n- **`mcp_tool`**: Model Context Protocol integration\n\n### ✅ Supported Fields & Features\n\n**Core Capabilities:**\n- **Dynamic Configuration**: Switch models, vector stores, and tools per request without pre-configuration\n- **Conversation Branching**: Use `previous_response_id` to branch conversations and explore different paths\n- **Rich Annotations**: Automatic file citations, URL citations, and container file citations\n- **Status Tracking**: Monitor tool call execution status and handle failures gracefully\n\n### 🚧 Work in Progress\n\n- Full real-time response streaming support\n- `tool_choice` parameter\n- `max_tool_calls` parameter\n- Built-in tools (code interpreter, containers API)\n- Safety & guardrails\n- `reasoning` capabilities\n- `service_tier`\n- `logprobs`\n- `max_output_tokens`\n- `metadata` handling\n- `instructions`\n- `incomplete_details`\n- `background`", + "x-displayName": "Agents" }, { - "name": "Benchmarks" - }, - { - "name": "DatasetIO" - }, - { - "name": "Datasets" - }, - { - "name": "Eval", - "x-displayName": "Llama Stack Evaluation API for running evaluations on model and agent candidates." - }, - { - "name": "Files" + "name": "Files", + "description": "" }, { "name": "Inference", @@ -19165,51 +12385,62 @@ "x-displayName": "Llama Stack Inference API for generating completions, chat completions, and embeddings." }, { - "name": "Inspect" + "name": "Inspect", + "description": "" }, { - "name": "Models" - }, - { - "name": "PostTraining (Coming Soon)" + "name": "Models", + "description": "" }, { "name": "Prompts", + "description": "", "x-displayName": "Protocol for prompt management operations." }, { "name": "Providers", + "description": "", "x-displayName": "Providers API for inspecting, listing, and modifying providers and their configurations." }, { - "name": "Safety" + "name": "Safety", + "description": "" }, { - "name": "Scoring" + "name": "Scoring", + "description": "" }, { - "name": "ScoringFunctions" + "name": "ScoringFunctions", + "description": "" }, { - "name": "Shields" + "name": "Shields", + "description": "" }, { - "name": "SyntheticDataGeneration (Coming Soon)" + "name": "SyntheticDataGeneration (Coming Soon)", + "description": "" }, { - "name": "Telemetry" + "name": "Telemetry", + "description": "" }, { - "name": "ToolGroups" + "name": "ToolGroups", + "description": "" }, { - "name": "ToolRuntime" + "name": "ToolRuntime", + "description": "" }, { - "name": "VectorDBs" + "name": "VectorDBs", + "description": "" }, { - "name": "VectorIO" + "name": "VectorIO", + "description": "" } ], "x-tagGroups": [ @@ -19217,15 +12448,10 @@ "name": "Operations", "tags": [ "Agents", - "Benchmarks", - "DatasetIO", - "Datasets", - "Eval", "Files", "Inference", "Inspect", "Models", - "PostTraining (Coming Soon)", "Prompts", "Providers", "Safety", diff --git a/docs/static/llama-stack-spec.yaml b/docs/static/llama-stack-spec.yaml index bf8357333..733e2cd21 100644 --- a/docs/static/llama-stack-spec.yaml +++ b/docs/static/llama-stack-spec.yaml @@ -7,2612 +7,11 @@ info: a set of endpoints and their corresponding interfaces that are tailored to best leverage Llama Models. + + **✅ STABLE**: Production-ready APIs with backward compatibility guarantees. servers: - url: http://any-hosted-llama-stack.com paths: - /v1/datasetio/append-rows/{dataset_id}: - post: - responses: - '200': - description: OK - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - DatasetIO - summary: Append rows to a dataset. - description: Append rows to a dataset. - parameters: - - name: dataset_id - in: path - description: >- - The ID of the dataset to append the rows to. - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/AppendRowsRequest' - required: true - /v1alpha/post-training/job/cancel: - post: - responses: - '200': - description: OK - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - PostTraining (Coming Soon) - summary: Cancel a training job. - description: Cancel a training job. - parameters: [] - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CancelTrainingJobRequest' - required: true - /v1/post-training/job/cancel: - post: - responses: - '200': - description: OK - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - PostTraining (Coming Soon) - summary: Cancel a training job. - description: Cancel a training job. - parameters: [] - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CancelTrainingJobRequest' - required: true - /v1alpha/agents: - get: - responses: - '200': - description: A PaginatedResponse. - content: - application/json: - schema: - $ref: '#/components/schemas/PaginatedResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Agents - summary: List all agents. - description: List all agents. - parameters: - - name: start_index - in: query - description: The index to start the pagination from. - required: false - schema: - type: integer - - name: limit - in: query - description: The number of agents to return. - required: false - schema: - type: integer - post: - responses: - '200': - description: >- - An AgentCreateResponse with the agent ID. - content: - application/json: - schema: - $ref: '#/components/schemas/AgentCreateResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Agents - summary: >- - Create an agent with the given configuration. - description: >- - Create an agent with the given configuration. - parameters: [] - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateAgentRequest' - required: true - /v1/agents: - get: - responses: - '200': - description: A PaginatedResponse. - content: - application/json: - schema: - $ref: '#/components/schemas/PaginatedResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Agents - summary: List all agents. - description: List all agents. - parameters: - - name: start_index - in: query - description: The index to start the pagination from. - required: false - schema: - type: integer - - name: limit - in: query - description: The number of agents to return. - required: false - schema: - type: integer - post: - responses: - '200': - description: >- - An AgentCreateResponse with the agent ID. - content: - application/json: - schema: - $ref: '#/components/schemas/AgentCreateResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Agents - summary: >- - Create an agent with the given configuration. - description: >- - Create an agent with the given configuration. - parameters: [] - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateAgentRequest' - required: true - /v1alpha/agents/{agent_id}/session: - post: - responses: - '200': - description: An AgentSessionCreateResponse. - content: - application/json: - schema: - $ref: '#/components/schemas/AgentSessionCreateResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Agents - summary: Create a new session for an agent. - description: Create a new session for an agent. - parameters: - - name: agent_id - in: path - description: >- - The ID of the agent to create the session for. - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateAgentSessionRequest' - required: true - /v1/agents/{agent_id}/session: - post: - responses: - '200': - description: An AgentSessionCreateResponse. - content: - application/json: - schema: - $ref: '#/components/schemas/AgentSessionCreateResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Agents - summary: Create a new session for an agent. - description: Create a new session for an agent. - parameters: - - name: agent_id - in: path - description: >- - The ID of the agent to create the session for. - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateAgentSessionRequest' - required: true - /v1alpha/agents/{agent_id}/session/{session_id}/turn: - post: - responses: - '200': - description: >- - If stream=False, returns a Turn object. If stream=True, returns an SSE - event stream of AgentTurnResponseStreamChunk. - content: - application/json: - schema: - $ref: '#/components/schemas/Turn' - text/event-stream: - schema: - $ref: '#/components/schemas/AgentTurnResponseStreamChunk' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Agents - summary: Create a new turn for an agent. - description: Create a new turn for an agent. - parameters: - - name: agent_id - in: path - description: >- - The ID of the agent to create the turn for. - required: true - schema: - type: string - - name: session_id - in: path - description: >- - The ID of the session to create the turn for. - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateAgentTurnRequest' - required: true - /v1/agents/{agent_id}/session/{session_id}/turn: - post: - responses: - '200': - description: >- - If stream=False, returns a Turn object. If stream=True, returns an SSE - event stream of AgentTurnResponseStreamChunk. - content: - application/json: - schema: - $ref: '#/components/schemas/Turn' - text/event-stream: - schema: - $ref: '#/components/schemas/AgentTurnResponseStreamChunk' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Agents - summary: Create a new turn for an agent. - description: Create a new turn for an agent. - parameters: - - name: agent_id - in: path - description: >- - The ID of the agent to create the turn for. - required: true - schema: - type: string - - name: session_id - in: path - description: >- - The ID of the session to create the turn for. - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateAgentTurnRequest' - required: true - /v1/responses: - get: - responses: - '200': - description: A ListOpenAIResponseObject. - content: - application/json: - schema: - $ref: '#/components/schemas/ListOpenAIResponseObject' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Agents - summary: List all OpenAI responses. - description: List all OpenAI responses. - parameters: - - name: after - in: query - description: The ID of the last response to return. - required: false - schema: - type: string - - name: limit - in: query - description: The number of responses to return. - required: false - schema: - type: integer - - name: model - in: query - description: The model to filter responses by. - required: false - schema: - type: string - - name: order - in: query - description: >- - The order to sort responses by when sorted by created_at ('asc' or 'desc'). - required: false - schema: - $ref: '#/components/schemas/Order' - post: - responses: - '200': - description: An OpenAIResponseObject. - content: - application/json: - schema: - $ref: '#/components/schemas/OpenAIResponseObject' - text/event-stream: - schema: - $ref: '#/components/schemas/OpenAIResponseObjectStream' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Agents - summary: Create a new OpenAI response. - description: Create a new OpenAI response. - parameters: [] - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreateOpenaiResponseRequest' - required: true - /v1/prompts: - get: - responses: - '200': - description: >- - A ListPromptsResponse containing all prompts. - content: - application/json: - schema: - $ref: '#/components/schemas/ListPromptsResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Prompts - summary: List all prompts. - description: List all prompts. - parameters: [] - post: - responses: - '200': - description: The created Prompt resource. - content: - application/json: - schema: - $ref: '#/components/schemas/Prompt' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Prompts - summary: Create a new prompt. - description: Create a new prompt. - parameters: [] - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/CreatePromptRequest' - required: true - /v1alpha/agents/{agent_id}: - get: - responses: - '200': - description: An Agent of the agent. - content: - application/json: - schema: - $ref: '#/components/schemas/Agent' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Agents - summary: Describe an agent by its ID. - description: Describe an agent by its ID. - parameters: - - name: agent_id - in: path - description: ID of the agent. - required: true - schema: - type: string - delete: - responses: - '200': - description: OK - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Agents - summary: >- - Delete an agent by its ID and its associated sessions and turns. - description: >- - Delete an agent by its ID and its associated sessions and turns. - parameters: - - name: agent_id - in: path - description: The ID of the agent to delete. - required: true - schema: - type: string - /v1/agents/{agent_id}: - get: - responses: - '200': - description: An Agent of the agent. - content: - application/json: - schema: - $ref: '#/components/schemas/Agent' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Agents - summary: Describe an agent by its ID. - description: Describe an agent by its ID. - parameters: - - name: agent_id - in: path - description: ID of the agent. - required: true - schema: - type: string - delete: - responses: - '200': - description: OK - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Agents - summary: >- - Delete an agent by its ID and its associated sessions and turns. - description: >- - Delete an agent by its ID and its associated sessions and turns. - parameters: - - name: agent_id - in: path - description: The ID of the agent to delete. - required: true - schema: - type: string - /v1alpha/agents/{agent_id}/session/{session_id}: - get: - responses: - '200': - description: A Session. - content: - application/json: - schema: - $ref: '#/components/schemas/Session' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Agents - summary: Retrieve an agent session by its ID. - description: Retrieve an agent session by its ID. - parameters: - - name: session_id - in: path - description: The ID of the session to get. - required: true - schema: - type: string - - name: agent_id - in: path - description: >- - The ID of the agent to get the session for. - required: true - schema: - type: string - - name: turn_ids - in: query - description: >- - (Optional) List of turn IDs to filter the session by. - required: false - schema: - type: array - items: - type: string - delete: - responses: - '200': - description: OK - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Agents - summary: >- - Delete an agent session by its ID and its associated turns. - description: >- - Delete an agent session by its ID and its associated turns. - parameters: - - name: session_id - in: path - description: The ID of the session to delete. - required: true - schema: - type: string - - name: agent_id - in: path - description: >- - The ID of the agent to delete the session for. - required: true - schema: - type: string - /v1/agents/{agent_id}/session/{session_id}: - get: - responses: - '200': - description: A Session. - content: - application/json: - schema: - $ref: '#/components/schemas/Session' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Agents - summary: Retrieve an agent session by its ID. - description: Retrieve an agent session by its ID. - parameters: - - name: session_id - in: path - description: The ID of the session to get. - required: true - schema: - type: string - - name: agent_id - in: path - description: >- - The ID of the agent to get the session for. - required: true - schema: - type: string - - name: turn_ids - in: query - description: >- - (Optional) List of turn IDs to filter the session by. - required: false - schema: - type: array - items: - type: string - delete: - responses: - '200': - description: OK - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Agents - summary: >- - Delete an agent session by its ID and its associated turns. - description: >- - Delete an agent session by its ID and its associated turns. - parameters: - - name: session_id - in: path - description: The ID of the session to delete. - required: true - schema: - type: string - - name: agent_id - in: path - description: >- - The ID of the agent to delete the session for. - required: true - schema: - type: string - /v1/responses/{response_id}: - get: - responses: - '200': - description: An OpenAIResponseObject. - content: - application/json: - schema: - $ref: '#/components/schemas/OpenAIResponseObject' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Agents - summary: Retrieve an OpenAI response by its ID. - description: Retrieve an OpenAI response by its ID. - parameters: - - name: response_id - in: path - description: >- - The ID of the OpenAI response to retrieve. - required: true - schema: - type: string - delete: - responses: - '200': - description: An OpenAIDeleteResponseObject - content: - application/json: - schema: - $ref: '#/components/schemas/OpenAIDeleteResponseObject' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Agents - summary: Delete an OpenAI response by its ID. - description: Delete an OpenAI response by its ID. - parameters: - - name: response_id - in: path - description: The ID of the OpenAI response to delete. - required: true - schema: - type: string - /v1/prompts/{prompt_id}: - get: - responses: - '200': - description: A Prompt resource. - content: - application/json: - schema: - $ref: '#/components/schemas/Prompt' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Prompts - summary: >- - Get a prompt by its identifier and optional version. - description: >- - Get a prompt by its identifier and optional version. - parameters: - - name: prompt_id - in: path - description: The identifier of the prompt to get. - required: true - schema: - type: string - - name: version - in: query - description: >- - The version of the prompt to get (defaults to latest). - required: false - schema: - type: integer - post: - responses: - '200': - description: >- - The updated Prompt resource with incremented version. - content: - application/json: - schema: - $ref: '#/components/schemas/Prompt' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Prompts - summary: >- - Update an existing prompt (increments version). - description: >- - Update an existing prompt (increments version). - parameters: - - name: prompt_id - in: path - description: The identifier of the prompt to update. - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/UpdatePromptRequest' - required: true - delete: - responses: - '200': - description: OK - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Prompts - summary: Delete a prompt. - description: Delete a prompt. - parameters: - - name: prompt_id - in: path - description: The identifier of the prompt to delete. - required: true - schema: - type: string - /v1alpha/eval/benchmarks/{benchmark_id}/evaluations: - post: - responses: - '200': - description: >- - EvaluateResponse object containing generations and scores. - content: - application/json: - schema: - $ref: '#/components/schemas/EvaluateResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Eval - summary: Evaluate a list of rows on a benchmark. - description: Evaluate a list of rows on a benchmark. - parameters: - - name: benchmark_id - in: path - description: >- - The ID of the benchmark to run the evaluation on. - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/EvaluateRowsRequest' - required: true - /v1/eval/benchmarks/{benchmark_id}/evaluations: - post: - responses: - '200': - description: >- - EvaluateResponse object containing generations and scores. - content: - application/json: - schema: - $ref: '#/components/schemas/EvaluateResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Eval - summary: Evaluate a list of rows on a benchmark. - description: Evaluate a list of rows on a benchmark. - parameters: - - name: benchmark_id - in: path - description: >- - The ID of the benchmark to run the evaluation on. - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/EvaluateRowsRequest' - required: true - /v1alpha/agents/{agent_id}/session/{session_id}/turn/{turn_id}/step/{step_id}: - get: - responses: - '200': - description: An AgentStepResponse. - content: - application/json: - schema: - $ref: '#/components/schemas/AgentStepResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Agents - summary: Retrieve an agent step by its ID. - description: Retrieve an agent step by its ID. - parameters: - - name: agent_id - in: path - description: The ID of the agent to get the step for. - required: true - schema: - type: string - - name: session_id - in: path - description: >- - The ID of the session to get the step for. - required: true - schema: - type: string - - name: turn_id - in: path - description: The ID of the turn to get the step for. - required: true - schema: - type: string - - name: step_id - in: path - description: The ID of the step to get. - required: true - schema: - type: string - /v1/agents/{agent_id}/session/{session_id}/turn/{turn_id}/step/{step_id}: - get: - responses: - '200': - description: An AgentStepResponse. - content: - application/json: - schema: - $ref: '#/components/schemas/AgentStepResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Agents - summary: Retrieve an agent step by its ID. - description: Retrieve an agent step by its ID. - parameters: - - name: agent_id - in: path - description: The ID of the agent to get the step for. - required: true - schema: - type: string - - name: session_id - in: path - description: >- - The ID of the session to get the step for. - required: true - schema: - type: string - - name: turn_id - in: path - description: The ID of the turn to get the step for. - required: true - schema: - type: string - - name: step_id - in: path - description: The ID of the step to get. - required: true - schema: - type: string - /v1alpha/agents/{agent_id}/session/{session_id}/turn/{turn_id}: - get: - responses: - '200': - description: A Turn. - content: - application/json: - schema: - $ref: '#/components/schemas/Turn' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Agents - summary: Retrieve an agent turn by its ID. - description: Retrieve an agent turn by its ID. - parameters: - - name: agent_id - in: path - description: The ID of the agent to get the turn for. - required: true - schema: - type: string - - name: session_id - in: path - description: >- - The ID of the session to get the turn for. - required: true - schema: - type: string - - name: turn_id - in: path - description: The ID of the turn to get. - required: true - schema: - type: string - /v1/agents/{agent_id}/session/{session_id}/turn/{turn_id}: - get: - responses: - '200': - description: A Turn. - content: - application/json: - schema: - $ref: '#/components/schemas/Turn' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Agents - summary: Retrieve an agent turn by its ID. - description: Retrieve an agent turn by its ID. - parameters: - - name: agent_id - in: path - description: The ID of the agent to get the turn for. - required: true - schema: - type: string - - name: session_id - in: path - description: >- - The ID of the session to get the turn for. - required: true - schema: - type: string - - name: turn_id - in: path - description: The ID of the turn to get. - required: true - schema: - type: string - /v1alpha/eval/benchmarks/{benchmark_id}: - get: - responses: - '200': - description: A Benchmark. - content: - application/json: - schema: - $ref: '#/components/schemas/Benchmark' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Benchmarks - summary: Get a benchmark by its ID. - description: Get a benchmark by its ID. - parameters: - - name: benchmark_id - in: path - description: The ID of the benchmark to get. - required: true - schema: - type: string - delete: - responses: - '200': - description: OK - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Benchmarks - summary: Unregister a benchmark. - description: Unregister a benchmark. - parameters: - - name: benchmark_id - in: path - description: The ID of the benchmark to unregister. - required: true - schema: - type: string - /v1/eval/benchmarks/{benchmark_id}: - get: - responses: - '200': - description: A Benchmark. - content: - application/json: - schema: - $ref: '#/components/schemas/Benchmark' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Benchmarks - summary: Get a benchmark by its ID. - description: Get a benchmark by its ID. - parameters: - - name: benchmark_id - in: path - description: The ID of the benchmark to get. - required: true - schema: - type: string - delete: - responses: - '200': - description: OK - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Benchmarks - summary: Unregister a benchmark. - description: Unregister a benchmark. - parameters: - - name: benchmark_id - in: path - description: The ID of the benchmark to unregister. - required: true - schema: - type: string - /v1/chat/completions/{completion_id}: - get: - responses: - '200': - description: A OpenAICompletionWithInputMessages. - content: - application/json: - schema: - $ref: '#/components/schemas/OpenAICompletionWithInputMessages' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Inference - summary: Describe a chat completion by its ID. - description: Describe a chat completion by its ID. - parameters: - - name: completion_id - in: path - description: ID of the chat completion. - required: true - schema: - type: string - /v1/datasets/{dataset_id}: - get: - responses: - '200': - description: A Dataset. - content: - application/json: - schema: - $ref: '#/components/schemas/Dataset' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Datasets - summary: Get a dataset by its ID. - description: Get a dataset by its ID. - parameters: - - name: dataset_id - in: path - description: The ID of the dataset to get. - required: true - schema: - type: string - delete: - responses: - '200': - description: OK - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Datasets - summary: Unregister a dataset by its ID. - description: Unregister a dataset by its ID. - parameters: - - name: dataset_id - in: path - description: The ID of the dataset to unregister. - required: true - schema: - type: string - /v1/models/{model_id}: - get: - responses: - '200': - description: A Model. - content: - application/json: - schema: - $ref: '#/components/schemas/Model' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Models - summary: Get a model by its identifier. - description: Get a model by its identifier. - parameters: - - name: model_id - in: path - description: The identifier of the model to get. - required: true - schema: - type: string - delete: - responses: - '200': - description: OK - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Models - summary: Unregister a model. - description: Unregister a model. - parameters: - - name: model_id - in: path - description: >- - The identifier of the model to unregister. - required: true - schema: - type: string - /v1/scoring-functions/{scoring_fn_id}: - get: - responses: - '200': - description: A ScoringFn. - content: - application/json: - schema: - $ref: '#/components/schemas/ScoringFn' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - ScoringFunctions - summary: Get a scoring function by its ID. - description: Get a scoring function by its ID. - parameters: - - name: scoring_fn_id - in: path - description: The ID of the scoring function to get. - required: true - schema: - type: string - delete: - responses: - '200': - description: OK - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - ScoringFunctions - summary: Unregister a scoring function. - description: Unregister a scoring function. - parameters: - - name: scoring_fn_id - in: path - description: >- - The ID of the scoring function to unregister. - required: true - schema: - type: string - /v1/shields/{identifier}: - get: - responses: - '200': - description: A Shield. - content: - application/json: - schema: - $ref: '#/components/schemas/Shield' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Shields - summary: Get a shield by its identifier. - description: Get a shield by its identifier. - parameters: - - name: identifier - in: path - description: The identifier of the shield to get. - required: true - schema: - type: string - delete: - responses: - '200': - description: OK - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Shields - summary: Unregister a shield. - description: Unregister a shield. - parameters: - - name: identifier - in: path - description: >- - The identifier of the shield to unregister. - required: true - schema: - type: string - /v1/telemetry/traces/{trace_id}/spans/{span_id}: - get: - responses: - '200': - description: A Span. - content: - application/json: - schema: - $ref: '#/components/schemas/Span' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Telemetry - summary: Get a span by its ID. - description: Get a span by its ID. - parameters: - - name: trace_id - in: path - description: >- - The ID of the trace to get the span from. - required: true - schema: - type: string - - name: span_id - in: path - description: The ID of the span to get. - required: true - schema: - type: string - /v1/telemetry/spans/{span_id}/tree: - post: - responses: - '200': - description: A QuerySpanTreeResponse. - content: - application/json: - schema: - $ref: '#/components/schemas/QuerySpanTreeResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Telemetry - summary: Get a span tree by its ID. - description: Get a span tree by its ID. - parameters: - - name: span_id - in: path - description: The ID of the span to get the tree from. - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/GetSpanTreeRequest' - required: true - /v1/tools/{tool_name}: - get: - responses: - '200': - description: A Tool. - content: - application/json: - schema: - $ref: '#/components/schemas/Tool' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - ToolGroups - summary: Get a tool by its name. - description: Get a tool by its name. - parameters: - - name: tool_name - in: path - description: The name of the tool to get. - required: true - schema: - type: string - /v1/toolgroups/{toolgroup_id}: - get: - responses: - '200': - description: A ToolGroup. - content: - application/json: - schema: - $ref: '#/components/schemas/ToolGroup' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - ToolGroups - summary: Get a tool group by its ID. - description: Get a tool group by its ID. - parameters: - - name: toolgroup_id - in: path - description: The ID of the tool group to get. - required: true - schema: - type: string - delete: - responses: - '200': - description: OK - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - ToolGroups - summary: Unregister a tool group. - description: Unregister a tool group. - parameters: - - name: toolgroup_id - in: path - description: The ID of the tool group to unregister. - required: true - schema: - type: string - /v1/telemetry/traces/{trace_id}: - get: - responses: - '200': - description: A Trace. - content: - application/json: - schema: - $ref: '#/components/schemas/Trace' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Telemetry - summary: Get a trace by its ID. - description: Get a trace by its ID. - parameters: - - name: trace_id - in: path - description: The ID of the trace to get. - required: true - schema: - type: string - /v1alpha/post-training/job/artifacts: - get: - responses: - '200': - description: A PostTrainingJobArtifactsResponse. - content: - application/json: - schema: - $ref: '#/components/schemas/PostTrainingJobArtifactsResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - PostTraining (Coming Soon) - summary: Get the artifacts of a training job. - description: Get the artifacts of a training job. - parameters: - - name: job_uuid - in: query - description: >- - The UUID of the job to get the artifacts of. - required: true - schema: - type: string - /v1/post-training/job/artifacts: - get: - responses: - '200': - description: A PostTrainingJobArtifactsResponse. - content: - application/json: - schema: - $ref: '#/components/schemas/PostTrainingJobArtifactsResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - PostTraining (Coming Soon) - summary: Get the artifacts of a training job. - description: Get the artifacts of a training job. - parameters: - - name: job_uuid - in: query - description: >- - The UUID of the job to get the artifacts of. - required: true - schema: - type: string - /v1alpha/post-training/job/status: - get: - responses: - '200': - description: A PostTrainingJobStatusResponse. - content: - application/json: - schema: - $ref: '#/components/schemas/PostTrainingJobStatusResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - PostTraining (Coming Soon) - summary: Get the status of a training job. - description: Get the status of a training job. - parameters: - - name: job_uuid - in: query - description: >- - The UUID of the job to get the status of. - required: true - schema: - type: string - /v1/post-training/job/status: - get: - responses: - '200': - description: A PostTrainingJobStatusResponse. - content: - application/json: - schema: - $ref: '#/components/schemas/PostTrainingJobStatusResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - PostTraining (Coming Soon) - summary: Get the status of a training job. - description: Get the status of a training job. - parameters: - - name: job_uuid - in: query - description: >- - The UUID of the job to get the status of. - required: true - schema: - type: string - /v1alpha/post-training/jobs: - get: - responses: - '200': - description: A ListPostTrainingJobsResponse. - content: - application/json: - schema: - $ref: '#/components/schemas/ListPostTrainingJobsResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - PostTraining (Coming Soon) - summary: Get all training jobs. - description: Get all training jobs. - parameters: [] - /v1/post-training/jobs: - get: - responses: - '200': - description: A ListPostTrainingJobsResponse. - content: - application/json: - schema: - $ref: '#/components/schemas/ListPostTrainingJobsResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - PostTraining (Coming Soon) - summary: Get all training jobs. - description: Get all training jobs. - parameters: [] - /v1/vector-dbs/{vector_db_id}: - get: - responses: - '200': - description: A VectorDB. - content: - application/json: - schema: - $ref: '#/components/schemas/VectorDB' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - VectorDBs - summary: Get a vector database by its identifier. - description: Get a vector database by its identifier. - parameters: - - name: vector_db_id - in: path - description: >- - The identifier of the vector database to get. - required: true - schema: - type: string - delete: - responses: - '200': - description: OK - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - VectorDBs - summary: Unregister a vector database. - description: Unregister a vector database. - parameters: - - name: vector_db_id - in: path - description: >- - The identifier of the vector database to unregister. - required: true - schema: - type: string - /v1/health: - get: - responses: - '200': - description: >- - Health information indicating if the service is operational. - content: - application/json: - schema: - $ref: '#/components/schemas/HealthInfo' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Inspect - summary: >- - Get the current health status of the service. - description: >- - Get the current health status of the service. - parameters: [] - /v1/tool-runtime/rag-tool/insert: - post: - responses: - '200': - description: OK - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - ToolRuntime - summary: >- - Index documents so they can be used by the RAG system. - description: >- - Index documents so they can be used by the RAG system. - parameters: [] - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/InsertRequest' - required: true - /v1/vector-io/insert: - post: - responses: - '200': - description: OK - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - VectorIO - summary: Insert chunks into a vector database. - description: Insert chunks into a vector database. - parameters: [] - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/InsertChunksRequest' - required: true - /v1/providers/{provider_id}: - get: - responses: - '200': - description: >- - A ProviderInfo object containing the provider's details. - content: - application/json: - schema: - $ref: '#/components/schemas/ProviderInfo' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Providers - summary: >- - Get detailed information about a specific provider. - description: >- - Get detailed information about a specific provider. - parameters: - - name: provider_id - in: path - description: The ID of the provider to inspect. - required: true - schema: - type: string - /v1/tool-runtime/invoke: - post: - responses: - '200': - description: A ToolInvocationResult. - content: - application/json: - schema: - $ref: '#/components/schemas/ToolInvocationResult' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - ToolRuntime - summary: Run a tool with the given arguments. - description: Run a tool with the given arguments. - parameters: [] - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/InvokeToolRequest' - required: true - /v1/datasetio/iterrows/{dataset_id}: - get: - responses: - '200': - description: A PaginatedResponse. - content: - application/json: - schema: - $ref: '#/components/schemas/PaginatedResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - DatasetIO - summary: >- - Get a paginated list of rows from a dataset. - description: >- - Get a paginated list of rows from a dataset. - - Uses offset-based pagination where: - - - start_index: The starting index (0-based). If None, starts from beginning. - - - limit: Number of items to return. If None or -1, returns all items. - - - The response includes: - - - data: List of items for the current page. - - - has_more: Whether there are more items available after this set. - parameters: - - name: dataset_id - in: path - description: >- - The ID of the dataset to get the rows from. - required: true - schema: - type: string - - name: start_index - in: query - description: >- - Index into dataset for the first row to get. Get all rows if None. - required: false - schema: - type: integer - - name: limit - in: query - description: The number of rows to get. - required: false - schema: - type: integer - /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}: - get: - responses: - '200': - description: The status of the evaluation job. - content: - application/json: - schema: - $ref: '#/components/schemas/Job' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Eval - summary: Get the status of a job. - description: Get the status of a job. - parameters: - - name: benchmark_id - in: path - description: >- - The ID of the benchmark to run the evaluation on. - required: true - schema: - type: string - - name: job_id - in: path - description: The ID of the job to get the status of. - required: true - schema: - type: string - delete: - responses: - '200': - description: OK - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Eval - summary: Cancel a job. - description: Cancel a job. - parameters: - - name: benchmark_id - in: path - description: >- - The ID of the benchmark to run the evaluation on. - required: true - schema: - type: string - - name: job_id - in: path - description: The ID of the job to cancel. - required: true - schema: - type: string - /v1/eval/benchmarks/{benchmark_id}/jobs/{job_id}: - get: - responses: - '200': - description: The status of the evaluation job. - content: - application/json: - schema: - $ref: '#/components/schemas/Job' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Eval - summary: Get the status of a job. - description: Get the status of a job. - parameters: - - name: benchmark_id - in: path - description: >- - The ID of the benchmark to run the evaluation on. - required: true - schema: - type: string - - name: job_id - in: path - description: The ID of the job to get the status of. - required: true - schema: - type: string - delete: - responses: - '200': - description: OK - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Eval - summary: Cancel a job. - description: Cancel a job. - parameters: - - name: benchmark_id - in: path - description: >- - The ID of the benchmark to run the evaluation on. - required: true - schema: - type: string - - name: job_id - in: path - description: The ID of the job to cancel. - required: true - schema: - type: string - /v1alpha/eval/benchmarks/{benchmark_id}/jobs/{job_id}/result: - get: - responses: - '200': - description: The result of the job. - content: - application/json: - schema: - $ref: '#/components/schemas/EvaluateResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Eval - summary: Get the result of a job. - description: Get the result of a job. - parameters: - - name: benchmark_id - in: path - description: >- - The ID of the benchmark to run the evaluation on. - required: true - schema: - type: string - - name: job_id - in: path - description: The ID of the job to get the result of. - required: true - schema: - type: string - /v1/eval/benchmarks/{benchmark_id}/jobs/{job_id}/result: - get: - responses: - '200': - description: The result of the job. - content: - application/json: - schema: - $ref: '#/components/schemas/EvaluateResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Eval - summary: Get the result of a job. - description: Get the result of a job. - parameters: - - name: benchmark_id - in: path - description: >- - The ID of the benchmark to run the evaluation on. - required: true - schema: - type: string - - name: job_id - in: path - description: The ID of the job to get the result of. - required: true - schema: - type: string - /v1alpha/agents/{agent_id}/sessions: - get: - responses: - '200': - description: A PaginatedResponse. - content: - application/json: - schema: - $ref: '#/components/schemas/PaginatedResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Agents - summary: List all session(s) of a given agent. - description: List all session(s) of a given agent. - parameters: - - name: agent_id - in: path - description: >- - The ID of the agent to list sessions for. - required: true - schema: - type: string - - name: start_index - in: query - description: The index to start the pagination from. - required: false - schema: - type: integer - - name: limit - in: query - description: The number of sessions to return. - required: false - schema: - type: integer - /v1/agents/{agent_id}/sessions: - get: - responses: - '200': - description: A PaginatedResponse. - content: - application/json: - schema: - $ref: '#/components/schemas/PaginatedResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Agents - summary: List all session(s) of a given agent. - description: List all session(s) of a given agent. - parameters: - - name: agent_id - in: path - description: >- - The ID of the agent to list sessions for. - required: true - schema: - type: string - - name: start_index - in: query - description: The index to start the pagination from. - required: false - schema: - type: integer - - name: limit - in: query - description: The number of sessions to return. - required: false - schema: - type: integer - /v1alpha/eval/benchmarks: - get: - responses: - '200': - description: A ListBenchmarksResponse. - content: - application/json: - schema: - $ref: '#/components/schemas/ListBenchmarksResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Benchmarks - summary: List all benchmarks. - description: List all benchmarks. - parameters: [] - post: - responses: - '200': - description: OK - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Benchmarks - summary: Register a benchmark. - description: Register a benchmark. - parameters: [] - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/RegisterBenchmarkRequest' - required: true - /v1/eval/benchmarks: - get: - responses: - '200': - description: A ListBenchmarksResponse. - content: - application/json: - schema: - $ref: '#/components/schemas/ListBenchmarksResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Benchmarks - summary: List all benchmarks. - description: List all benchmarks. - parameters: [] - post: - responses: - '200': - description: OK - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Benchmarks - summary: Register a benchmark. - description: Register a benchmark. - parameters: [] - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/RegisterBenchmarkRequest' - required: true /v1/chat/completions: get: responses: @@ -2665,6 +64,7 @@ paths: required: false schema: $ref: '#/components/schemas/Order' + deprecated: false post: responses: '200': @@ -2700,15 +100,16 @@ paths: schema: $ref: '#/components/schemas/OpenaiChatCompletionRequest' required: true - /v1/datasets: + deprecated: false + /v1/chat/completions/{completion_id}: get: responses: '200': - description: A ListDatasetsResponse. + description: A OpenAICompletionWithInputMessages. content: application/json: schema: - $ref: '#/components/schemas/ListDatasetsResponse' + $ref: '#/components/schemas/OpenAICompletionWithInputMessages' '400': $ref: '#/components/responses/BadRequest400' '429': @@ -2720,18 +121,26 @@ paths: default: $ref: '#/components/responses/DefaultError' tags: - - Datasets - summary: List all datasets. - description: List all datasets. - parameters: [] + - Inference + summary: Describe a chat completion by its ID. + description: Describe a chat completion by its ID. + parameters: + - name: completion_id + in: path + description: ID of the chat completion. + required: true + schema: + type: string + deprecated: false + /v1/completions: post: responses: '200': - description: A Dataset. + description: An OpenAICompletion. content: application/json: schema: - $ref: '#/components/schemas/Dataset' + $ref: '#/components/schemas/OpenAICompletion' '400': $ref: '#/components/responses/BadRequest400' '429': @@ -2743,16 +152,328 @@ paths: default: $ref: '#/components/responses/DefaultError' tags: - - Datasets - summary: Register a new dataset. - description: Register a new dataset. + - Inference + summary: >- + Generate an OpenAI-compatible completion for the given prompt using the specified + model. + description: >- + Generate an OpenAI-compatible completion for the given prompt using the specified + model. parameters: [] requestBody: content: application/json: schema: - $ref: '#/components/schemas/RegisterDatasetRequest' + $ref: '#/components/schemas/OpenaiCompletionRequest' required: true + deprecated: false + /v1/embeddings: + post: + responses: + '200': + description: >- + An OpenAIEmbeddingsResponse containing the embeddings. + content: + application/json: + schema: + $ref: '#/components/schemas/OpenAIEmbeddingsResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Inference + summary: >- + Generate OpenAI-compatible embeddings for the given input using the specified + model. + description: >- + Generate OpenAI-compatible embeddings for the given input using the specified + model. + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/OpenaiEmbeddingsRequest' + required: true + deprecated: false + /v1/files: + get: + responses: + '200': + description: >- + An ListOpenAIFileResponse containing the list of files. + content: + application/json: + schema: + $ref: '#/components/schemas/ListOpenAIFileResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Files + summary: >- + Returns a list of files that belong to the user's organization. + description: >- + Returns a list of files that belong to the user's organization. + parameters: + - name: after + in: query + description: >- + A cursor for use in pagination. `after` is an object ID that defines your + place in the list. For instance, if you make a list request and receive + 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo + in order to fetch the next page of the list. + required: false + schema: + type: string + - name: limit + in: query + description: >- + A limit on the number of objects to be returned. Limit can range between + 1 and 10,000, and the default is 10,000. + required: false + schema: + type: integer + - name: order + in: query + description: >- + Sort order by the `created_at` timestamp of the objects. `asc` for ascending + order and `desc` for descending order. + required: false + schema: + $ref: '#/components/schemas/Order' + - name: purpose + in: query + description: >- + Only return files with the given purpose. + required: false + schema: + $ref: '#/components/schemas/OpenAIFilePurpose' + deprecated: false + post: + responses: + '200': + description: >- + An OpenAIFileObject representing the uploaded file. + content: + application/json: + schema: + $ref: '#/components/schemas/OpenAIFileObject' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Files + summary: >- + Upload a file that can be used across various endpoints. + description: >- + Upload a file that can be used across various endpoints. + + The file upload should be a multipart form request with: + + - file: The File object (not file name) to be uploaded. + + - purpose: The intended purpose of the uploaded file. + + - expires_after: Optional form values describing expiration for the file. + parameters: [] + requestBody: + content: + multipart/form-data: + schema: + type: object + properties: + file: + type: string + format: binary + purpose: + $ref: '#/components/schemas/OpenAIFilePurpose' + expires_after: + $ref: '#/components/schemas/ExpiresAfter' + required: + - file + - purpose + required: true + deprecated: false + /v1/files/{file_id}: + get: + responses: + '200': + description: >- + An OpenAIFileObject containing file information. + content: + application/json: + schema: + $ref: '#/components/schemas/OpenAIFileObject' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Files + summary: >- + Returns information about a specific file. + description: >- + Returns information about a specific file. + parameters: + - name: file_id + in: path + description: >- + The ID of the file to use for this request. + required: true + schema: + type: string + deprecated: false + delete: + responses: + '200': + description: >- + An OpenAIFileDeleteResponse indicating successful deletion. + content: + application/json: + schema: + $ref: '#/components/schemas/OpenAIFileDeleteResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Files + summary: Delete a file. + description: Delete a file. + parameters: + - name: file_id + in: path + description: >- + The ID of the file to use for this request. + required: true + schema: + type: string + deprecated: false + /v1/files/{file_id}/content: + get: + responses: + '200': + description: >- + The raw file content as a binary response. + content: + application/json: + schema: + $ref: '#/components/schemas/Response' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Files + summary: >- + Returns the contents of the specified file. + description: >- + Returns the contents of the specified file. + parameters: + - name: file_id + in: path + description: >- + The ID of the file to use for this request. + required: true + schema: + type: string + deprecated: false + /v1/health: + get: + responses: + '200': + description: >- + Health information indicating if the service is operational. + content: + application/json: + schema: + $ref: '#/components/schemas/HealthInfo' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Inspect + summary: >- + Get the current health status of the service. + description: >- + Get the current health status of the service. + parameters: [] + deprecated: false + /v1/inspect/routes: + get: + responses: + '200': + description: >- + Response containing information about all available routes. + content: + application/json: + schema: + $ref: '#/components/schemas/ListRoutesResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Inspect + summary: >- + List all available API routes with their methods and implementing providers. + description: >- + List all available API routes with their methods and implementing providers. + parameters: [] + deprecated: false /v1/models: get: responses: @@ -2777,6 +498,7 @@ paths: summary: List all models. description: List all models. parameters: [] + deprecated: false post: responses: '200': @@ -2806,6 +528,537 @@ paths: schema: $ref: '#/components/schemas/RegisterModelRequest' required: true + deprecated: false + /v1/models/{model_id}: + get: + responses: + '200': + description: A Model. + content: + application/json: + schema: + $ref: '#/components/schemas/Model' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Models + summary: Get a model by its identifier. + description: Get a model by its identifier. + parameters: + - name: model_id + in: path + description: The identifier of the model to get. + required: true + schema: + type: string + deprecated: false + delete: + responses: + '200': + description: OK + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Models + summary: Unregister a model. + description: Unregister a model. + parameters: + - name: model_id + in: path + description: >- + The identifier of the model to unregister. + required: true + schema: + type: string + deprecated: false + /v1/moderations: + post: + responses: + '200': + description: A moderation object. + content: + application/json: + schema: + $ref: '#/components/schemas/ModerationObject' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Safety + summary: >- + Classifies if text and/or image inputs are potentially harmful. + description: >- + Classifies if text and/or image inputs are potentially harmful. + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/RunModerationRequest' + required: true + deprecated: false + /v1/prompts: + get: + responses: + '200': + description: >- + A ListPromptsResponse containing all prompts. + content: + application/json: + schema: + $ref: '#/components/schemas/ListPromptsResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Prompts + summary: List all prompts. + description: List all prompts. + parameters: [] + deprecated: false + post: + responses: + '200': + description: The created Prompt resource. + content: + application/json: + schema: + $ref: '#/components/schemas/Prompt' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Prompts + summary: Create a new prompt. + description: Create a new prompt. + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/CreatePromptRequest' + required: true + deprecated: false + /v1/prompts/{prompt_id}: + get: + responses: + '200': + description: A Prompt resource. + content: + application/json: + schema: + $ref: '#/components/schemas/Prompt' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Prompts + summary: >- + Get a prompt by its identifier and optional version. + description: >- + Get a prompt by its identifier and optional version. + parameters: + - name: prompt_id + in: path + description: The identifier of the prompt to get. + required: true + schema: + type: string + - name: version + in: query + description: >- + The version of the prompt to get (defaults to latest). + required: false + schema: + type: integer + deprecated: false + post: + responses: + '200': + description: >- + The updated Prompt resource with incremented version. + content: + application/json: + schema: + $ref: '#/components/schemas/Prompt' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Prompts + summary: >- + Update an existing prompt (increments version). + description: >- + Update an existing prompt (increments version). + parameters: + - name: prompt_id + in: path + description: The identifier of the prompt to update. + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/UpdatePromptRequest' + required: true + deprecated: false + delete: + responses: + '200': + description: OK + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Prompts + summary: Delete a prompt. + description: Delete a prompt. + parameters: + - name: prompt_id + in: path + description: The identifier of the prompt to delete. + required: true + schema: + type: string + deprecated: false + /v1/prompts/{prompt_id}/set-default-version: + post: + responses: + '200': + description: >- + The prompt with the specified version now set as default. + content: + application/json: + schema: + $ref: '#/components/schemas/Prompt' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Prompts + summary: >- + Set which version of a prompt should be the default in get_prompt (latest). + description: >- + Set which version of a prompt should be the default in get_prompt (latest). + parameters: + - name: prompt_id + in: path + description: The identifier of the prompt. + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/SetDefaultVersionRequest' + required: true + deprecated: false + /v1/prompts/{prompt_id}/versions: + get: + responses: + '200': + description: >- + A ListPromptsResponse containing all versions of the prompt. + content: + application/json: + schema: + $ref: '#/components/schemas/ListPromptsResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Prompts + summary: List all versions of a specific prompt. + description: List all versions of a specific prompt. + parameters: + - name: prompt_id + in: path + description: >- + The identifier of the prompt to list versions for. + required: true + schema: + type: string + deprecated: false + /v1/providers: + get: + responses: + '200': + description: >- + A ListProvidersResponse containing information about all providers. + content: + application/json: + schema: + $ref: '#/components/schemas/ListProvidersResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Providers + summary: List all available providers. + description: List all available providers. + parameters: [] + deprecated: false + /v1/providers/{provider_id}: + get: + responses: + '200': + description: >- + A ProviderInfo object containing the provider's details. + content: + application/json: + schema: + $ref: '#/components/schemas/ProviderInfo' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Providers + summary: >- + Get detailed information about a specific provider. + description: >- + Get detailed information about a specific provider. + parameters: + - name: provider_id + in: path + description: The ID of the provider to inspect. + required: true + schema: + type: string + deprecated: false + /v1/responses: + get: + responses: + '200': + description: A ListOpenAIResponseObject. + content: + application/json: + schema: + $ref: '#/components/schemas/ListOpenAIResponseObject' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Agents + summary: List all OpenAI responses. + description: List all OpenAI responses. + parameters: + - name: after + in: query + description: The ID of the last response to return. + required: false + schema: + type: string + - name: limit + in: query + description: The number of responses to return. + required: false + schema: + type: integer + - name: model + in: query + description: The model to filter responses by. + required: false + schema: + type: string + - name: order + in: query + description: >- + The order to sort responses by when sorted by created_at ('asc' or 'desc'). + required: false + schema: + $ref: '#/components/schemas/Order' + deprecated: false + post: + responses: + '200': + description: An OpenAIResponseObject. + content: + application/json: + schema: + $ref: '#/components/schemas/OpenAIResponseObject' + text/event-stream: + schema: + $ref: '#/components/schemas/OpenAIResponseObjectStream' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Agents + summary: Create a new OpenAI response. + description: Create a new OpenAI response. + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/CreateOpenaiResponseRequest' + required: true + deprecated: false + /v1/responses/{response_id}: + get: + responses: + '200': + description: An OpenAIResponseObject. + content: + application/json: + schema: + $ref: '#/components/schemas/OpenAIResponseObject' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Agents + summary: Retrieve an OpenAI response by its ID. + description: Retrieve an OpenAI response by its ID. + parameters: + - name: response_id + in: path + description: >- + The ID of the OpenAI response to retrieve. + required: true + schema: + type: string + deprecated: false + delete: + responses: + '200': + description: An OpenAIDeleteResponseObject + content: + application/json: + schema: + $ref: '#/components/schemas/OpenAIDeleteResponseObject' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Agents + summary: Delete an OpenAI response by its ID. + description: Delete an OpenAI response by its ID. + parameters: + - name: response_id + in: path + description: The ID of the OpenAI response to delete. + required: true + schema: + type: string + deprecated: false /v1/responses/{response_id}/input_items: get: responses: @@ -2877,16 +1130,16 @@ paths: required: false schema: $ref: '#/components/schemas/Order' - /v1/prompts/{prompt_id}/versions: - get: + deprecated: false + /v1/safety/run-shield: + post: responses: '200': - description: >- - A ListPromptsResponse containing all versions of the prompt. + description: A RunShieldResponse. content: application/json: schema: - $ref: '#/components/schemas/ListPromptsResponse' + $ref: '#/components/schemas/RunShieldResponse' '400': $ref: '#/components/responses/BadRequest400' '429': @@ -2898,27 +1151,103 @@ paths: default: $ref: '#/components/responses/DefaultError' tags: - - Prompts - summary: List all versions of a specific prompt. - description: List all versions of a specific prompt. + - Safety + summary: Run a shield. + description: Run a shield. + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/RunShieldRequest' + required: true + deprecated: false + /v1/scoring-functions: + get: + responses: + '200': + description: A ListScoringFunctionsResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/ListScoringFunctionsResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - ScoringFunctions + summary: List all scoring functions. + description: List all scoring functions. + parameters: [] + deprecated: false + post: + responses: + '200': + description: OK + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - ScoringFunctions + summary: Register a scoring function. + description: Register a scoring function. + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/RegisterScoringFunctionRequest' + required: true + deprecated: false + /v1/scoring-functions/{scoring_fn_id}: + get: + responses: + '200': + description: A ScoringFn. + content: + application/json: + schema: + $ref: '#/components/schemas/ScoringFn' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - ScoringFunctions + summary: Get a scoring function by its ID. + description: Get a scoring function by its ID. parameters: - - name: prompt_id + - name: scoring_fn_id in: path - description: >- - The identifier of the prompt to list versions for. + description: The ID of the scoring function to get. required: true schema: type: string - /v1/providers: - get: + deprecated: false + delete: responses: '200': - description: >- - A ListProvidersResponse containing information about all providers. - content: - application/json: - schema: - $ref: '#/components/schemas/ListProvidersResponse' + description: OK '400': $ref: '#/components/responses/BadRequest400' '429': @@ -2930,20 +1259,59 @@ paths: default: $ref: '#/components/responses/DefaultError' tags: - - Providers - summary: List all available providers. - description: List all available providers. + - ScoringFunctions + summary: Unregister a scoring function. + description: Unregister a scoring function. + parameters: + - name: scoring_fn_id + in: path + description: >- + The ID of the scoring function to unregister. + required: true + schema: + type: string + deprecated: false + /v1/scoring/score: + post: + responses: + '200': + description: >- + A ScoreResponse object containing rows and aggregated results. + content: + application/json: + schema: + $ref: '#/components/schemas/ScoreResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Scoring + summary: Score a list of rows. + description: Score a list of rows. parameters: [] - /v1/inspect/routes: - get: + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/ScoreRequest' + required: true + deprecated: false + /v1/scoring/score-batch: + post: responses: '200': - description: >- - Response containing information about all available routes. + description: A ScoreBatchResponse. content: application/json: schema: - $ref: '#/components/schemas/ListRoutesResponse' + $ref: '#/components/schemas/ScoreBatchResponse' '400': $ref: '#/components/responses/BadRequest400' '429': @@ -2955,12 +1323,222 @@ paths: default: $ref: '#/components/responses/DefaultError' tags: - - Inspect + - Scoring + summary: Score a batch of rows. + description: Score a batch of rows. + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/ScoreBatchRequest' + required: true + deprecated: false + /v1/shields: + get: + responses: + '200': + description: A ListShieldsResponse. + content: + application/json: + schema: + $ref: '#/components/schemas/ListShieldsResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Shields + summary: List all shields. + description: List all shields. + parameters: [] + deprecated: false + post: + responses: + '200': + description: A Shield. + content: + application/json: + schema: + $ref: '#/components/schemas/Shield' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Shields + summary: Register a shield. + description: Register a shield. + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/RegisterShieldRequest' + required: true + deprecated: false + /v1/shields/{identifier}: + get: + responses: + '200': + description: A Shield. + content: + application/json: + schema: + $ref: '#/components/schemas/Shield' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Shields + summary: Get a shield by its identifier. + description: Get a shield by its identifier. + parameters: + - name: identifier + in: path + description: The identifier of the shield to get. + required: true + schema: + type: string + deprecated: false + delete: + responses: + '200': + description: OK + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Shields + summary: Unregister a shield. + description: Unregister a shield. + parameters: + - name: identifier + in: path + description: >- + The identifier of the shield to unregister. + required: true + schema: + type: string + deprecated: false + /v1/synthetic-data-generation/generate: + post: + responses: + '200': + description: >- + Response containing filtered synthetic data samples and optional statistics + content: + application/json: + schema: + $ref: '#/components/schemas/SyntheticDataGenerationResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - SyntheticDataGeneration (Coming Soon) summary: >- - List all available API routes with their methods and implementing providers. + Generate synthetic data based on input dialogs and apply filtering. description: >- - List all available API routes with their methods and implementing providers. + Generate synthetic data based on input dialogs and apply filtering. parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/SyntheticDataGenerateRequest' + required: true + deprecated: false + /v1/telemetry/events: + post: + responses: + '200': + description: OK + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - Telemetry + summary: Log an event. + description: Log an event. + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/LogEventRequest' + required: true + deprecated: false + /v1/tool-runtime/invoke: + post: + responses: + '200': + description: A ToolInvocationResult. + content: + application/json: + schema: + $ref: '#/components/schemas/ToolInvocationResult' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - ToolRuntime + summary: Run a tool with the given arguments. + description: Run a tool with the given arguments. + parameters: [] + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/InvokeToolRequest' + required: true + deprecated: false /v1/tool-runtime/list-tools: get: responses: @@ -2999,30 +1577,8 @@ paths: required: false schema: $ref: '#/components/schemas/URL' - /v1/scoring-functions: - get: - responses: - '200': - description: A ListScoringFunctionsResponse. - content: - application/json: - schema: - $ref: '#/components/schemas/ListScoringFunctionsResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - ScoringFunctions - summary: List all scoring functions. - description: List all scoring functions. - parameters: [] + deprecated: false + /v1/tool-runtime/rag-tool/insert: post: responses: '200': @@ -3038,48 +1594,29 @@ paths: default: $ref: '#/components/responses/DefaultError' tags: - - ScoringFunctions - summary: Register a scoring function. - description: Register a scoring function. + - ToolRuntime + summary: >- + Index documents so they can be used by the RAG system. + description: >- + Index documents so they can be used by the RAG system. parameters: [] requestBody: content: application/json: schema: - $ref: '#/components/schemas/RegisterScoringFunctionRequest' + $ref: '#/components/schemas/InsertRequest' required: true - /v1/shields: - get: - responses: - '200': - description: A ListShieldsResponse. - content: - application/json: - schema: - $ref: '#/components/schemas/ListShieldsResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Shields - summary: List all shields. - description: List all shields. - parameters: [] + deprecated: false + /v1/tool-runtime/rag-tool/query: post: responses: '200': - description: A Shield. + description: >- + RAGQueryResult containing the retrieved content and metadata content: application/json: schema: - $ref: '#/components/schemas/Shield' + $ref: '#/components/schemas/RAGQueryResult' '400': $ref: '#/components/responses/BadRequest400' '429': @@ -3091,16 +1628,19 @@ paths: default: $ref: '#/components/responses/DefaultError' tags: - - Shields - summary: Register a shield. - description: Register a shield. + - ToolRuntime + summary: >- + Query the RAG system for context; typically invoked by the agent. + description: >- + Query the RAG system for context; typically invoked by the agent. parameters: [] requestBody: content: application/json: schema: - $ref: '#/components/schemas/RegisterShieldRequest' + $ref: '#/components/schemas/QueryRequest' required: true + deprecated: false /v1/toolgroups: get: responses: @@ -3125,6 +1665,7 @@ paths: summary: List tool groups with optional provider. description: List tool groups with optional provider. parameters: [] + deprecated: false post: responses: '200': @@ -3150,6 +1691,64 @@ paths: schema: $ref: '#/components/schemas/RegisterToolGroupRequest' required: true + deprecated: false + /v1/toolgroups/{toolgroup_id}: + get: + responses: + '200': + description: A ToolGroup. + content: + application/json: + schema: + $ref: '#/components/schemas/ToolGroup' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - ToolGroups + summary: Get a tool group by its ID. + description: Get a tool group by its ID. + parameters: + - name: toolgroup_id + in: path + description: The ID of the tool group to get. + required: true + schema: + type: string + deprecated: false + delete: + responses: + '200': + description: OK + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - ToolGroups + summary: Unregister a tool group. + description: Unregister a tool group. + parameters: + - name: toolgroup_id + in: path + description: The ID of the tool group to unregister. + required: true + schema: + type: string + deprecated: false /v1/tools: get: responses: @@ -3181,6 +1780,38 @@ paths: required: false schema: type: string + deprecated: false + /v1/tools/{tool_name}: + get: + responses: + '200': + description: A Tool. + content: + application/json: + schema: + $ref: '#/components/schemas/Tool' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - ToolGroups + summary: Get a tool by its name. + description: Get a tool by its name. + parameters: + - name: tool_name + in: path + description: The name of the tool to get. + required: true + schema: + type: string + deprecated: false /v1/vector-dbs: get: responses: @@ -3205,6 +1836,7 @@ paths: summary: List all vector databases. description: List all vector databases. parameters: [] + deprecated: false post: responses: '200': @@ -3234,7 +1866,67 @@ paths: schema: $ref: '#/components/schemas/RegisterVectorDbRequest' required: true - /v1/telemetry/events: + deprecated: false + /v1/vector-dbs/{vector_db_id}: + get: + responses: + '200': + description: A VectorDB. + content: + application/json: + schema: + $ref: '#/components/schemas/VectorDB' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - VectorDBs + summary: Get a vector database by its identifier. + description: Get a vector database by its identifier. + parameters: + - name: vector_db_id + in: path + description: >- + The identifier of the vector database to get. + required: true + schema: + type: string + deprecated: false + delete: + responses: + '200': + description: OK + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - VectorDBs + summary: Unregister a vector database. + description: Unregister a vector database. + parameters: + - name: vector_db_id + in: path + description: >- + The identifier of the vector database to unregister. + required: true + schema: + type: string + deprecated: false + /v1/vector-io/insert: post: responses: '200': @@ -3250,26 +1942,26 @@ paths: default: $ref: '#/components/responses/DefaultError' tags: - - Telemetry - summary: Log an event. - description: Log an event. + - VectorIO + summary: Insert chunks into a vector database. + description: Insert chunks into a vector database. parameters: [] requestBody: content: application/json: schema: - $ref: '#/components/schemas/LogEventRequest' + $ref: '#/components/schemas/InsertChunksRequest' required: true - /v1/vector_stores/{vector_store_id}/files: - get: + deprecated: false + /v1/vector-io/query: + post: responses: '200': - description: >- - A VectorStoreListFilesResponse containing the list of files. + description: A QueryChunksResponse. content: application/json: schema: - $ref: '#/components/schemas/VectorStoreListFilesResponse' + $ref: '#/components/schemas/QueryChunksResponse' '400': $ref: '#/components/responses/BadRequest400' '429': @@ -3282,165 +1974,16 @@ paths: $ref: '#/components/responses/DefaultError' tags: - VectorIO - summary: List files in a vector store. - description: List files in a vector store. - parameters: - - name: vector_store_id - in: path - description: >- - The ID of the vector store to list files from. - required: true - schema: - type: string - - name: limit - in: query - description: >- - (Optional) A limit on the number of objects to be returned. Limit can - range between 1 and 100, and the default is 20. - required: false - schema: - type: integer - - name: order - in: query - description: >- - (Optional) Sort order by the `created_at` timestamp of the objects. `asc` - for ascending order and `desc` for descending order. - required: false - schema: - type: string - - name: after - in: query - description: >- - (Optional) A cursor for use in pagination. `after` is an object ID that - defines your place in the list. - required: false - schema: - type: string - - name: before - in: query - description: >- - (Optional) A cursor for use in pagination. `before` is an object ID that - defines your place in the list. - required: false - schema: - type: string - - name: filter - in: query - description: >- - (Optional) Filter by file status to only return files with the specified - status. - required: false - schema: - $ref: '#/components/schemas/VectorStoreFileStatus' - post: - responses: - '200': - description: >- - A VectorStoreFileObject representing the attached file. - content: - application/json: - schema: - $ref: '#/components/schemas/VectorStoreFileObject' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - VectorIO - summary: Attach a file to a vector store. - description: Attach a file to a vector store. - parameters: - - name: vector_store_id - in: path - description: >- - The ID of the vector store to attach the file to. - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/OpenaiAttachFileToVectorStoreRequest' - required: true - /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel: - post: - responses: - '200': - description: >- - A VectorStoreFileBatchObject representing the cancelled file batch. - content: - application/json: - schema: - $ref: '#/components/schemas/VectorStoreFileBatchObject' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - VectorIO - summary: Cancels a vector store file batch. - description: Cancels a vector store file batch. - parameters: - - name: batch_id - in: path - description: The ID of the file batch to cancel. - required: true - schema: - type: string - - name: vector_store_id - in: path - description: >- - The ID of the vector store containing the file batch. - required: true - schema: - type: string - /v1/completions: - post: - responses: - '200': - description: An OpenAICompletion. - content: - application/json: - schema: - $ref: '#/components/schemas/OpenAICompletion' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Inference - summary: >- - Generate an OpenAI-compatible completion for the given prompt using the specified - model. - description: >- - Generate an OpenAI-compatible completion for the given prompt using the specified - model. + summary: Query chunks from a vector database. + description: Query chunks from a vector database. parameters: [] requestBody: content: application/json: schema: - $ref: '#/components/schemas/OpenaiCompletionRequest' + $ref: '#/components/schemas/QueryChunksRequest' required: true + deprecated: false /v1/vector_stores: get: responses: @@ -3498,6 +2041,7 @@ paths: required: false schema: type: string + deprecated: false post: responses: '200': @@ -3528,6 +2072,107 @@ paths: schema: $ref: '#/components/schemas/OpenaiCreateVectorStoreRequest' required: true + deprecated: false + /v1/vector_stores/{vector_store_id}: + get: + responses: + '200': + description: >- + A VectorStoreObject representing the vector store. + content: + application/json: + schema: + $ref: '#/components/schemas/VectorStoreObject' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - VectorIO + summary: Retrieves a vector store. + description: Retrieves a vector store. + parameters: + - name: vector_store_id + in: path + description: The ID of the vector store to retrieve. + required: true + schema: + type: string + deprecated: false + post: + responses: + '200': + description: >- + A VectorStoreObject representing the updated vector store. + content: + application/json: + schema: + $ref: '#/components/schemas/VectorStoreObject' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - VectorIO + summary: Updates a vector store. + description: Updates a vector store. + parameters: + - name: vector_store_id + in: path + description: The ID of the vector store to update. + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/OpenaiUpdateVectorStoreRequest' + required: true + deprecated: false + delete: + responses: + '200': + description: >- + A VectorStoreDeleteResponse indicating the deletion status. + content: + application/json: + schema: + $ref: '#/components/schemas/VectorStoreDeleteResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - VectorIO + summary: Delete a vector store. + description: Delete a vector store. + parameters: + - name: vector_store_id + in: path + description: The ID of the vector store to delete. + required: true + schema: + type: string + deprecated: false /v1/vector_stores/{vector_store_id}/file_batches: post: responses: @@ -3566,81 +2211,17 @@ paths: schema: $ref: '#/components/schemas/OpenaiCreateVectorStoreFileBatchRequest' required: true - /v1/files/{file_id}: + deprecated: false + /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}: get: responses: '200': description: >- - An OpenAIFileObject containing file information. + A VectorStoreFileBatchObject representing the file batch. content: application/json: schema: - $ref: '#/components/schemas/OpenAIFileObject' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Files - summary: >- - Returns information about a specific file. - description: >- - Returns information about a specific file. - parameters: - - name: file_id - in: path - description: >- - The ID of the file to use for this request. - required: true - schema: - type: string - delete: - responses: - '200': - description: >- - An OpenAIFileDeleteResponse indicating successful deletion. - content: - application/json: - schema: - $ref: '#/components/schemas/OpenAIFileDeleteResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Files - summary: Delete a file. - description: Delete a file. - parameters: - - name: file_id - in: path - description: >- - The ID of the file to use for this request. - required: true - schema: - type: string - /v1/vector_stores/{vector_store_id}: - get: - responses: - '200': - description: >- - A VectorStoreObject representing the vector store. - content: - application/json: - schema: - $ref: '#/components/schemas/VectorStoreObject' + $ref: '#/components/schemas/VectorStoreFileBatchObject' '400': $ref: '#/components/responses/BadRequest400' '429': @@ -3653,24 +2234,33 @@ paths: $ref: '#/components/responses/DefaultError' tags: - VectorIO - summary: Retrieves a vector store. - description: Retrieves a vector store. + summary: Retrieve a vector store file batch. + description: Retrieve a vector store file batch. parameters: - - name: vector_store_id + - name: batch_id in: path - description: The ID of the vector store to retrieve. + description: The ID of the file batch to retrieve. required: true schema: type: string + - name: vector_store_id + in: path + description: >- + The ID of the vector store containing the file batch. + required: true + schema: + type: string + deprecated: false + /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel: post: responses: '200': description: >- - A VectorStoreObject representing the updated vector store. + A VectorStoreFileBatchObject representing the cancelled file batch. content: application/json: schema: - $ref: '#/components/schemas/VectorStoreObject' + $ref: '#/components/schemas/VectorStoreFileBatchObject' '400': $ref: '#/components/responses/BadRequest400' '429': @@ -3683,315 +2273,23 @@ paths: $ref: '#/components/responses/DefaultError' tags: - VectorIO - summary: Updates a vector store. - description: Updates a vector store. + summary: Cancels a vector store file batch. + description: Cancels a vector store file batch. parameters: - - name: vector_store_id + - name: batch_id in: path - description: The ID of the vector store to update. + description: The ID of the file batch to cancel. required: true schema: type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/OpenaiUpdateVectorStoreRequest' - required: true - delete: - responses: - '200': - description: >- - A VectorStoreDeleteResponse indicating the deletion status. - content: - application/json: - schema: - $ref: '#/components/schemas/VectorStoreDeleteResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - VectorIO - summary: Delete a vector store. - description: Delete a vector store. - parameters: - - name: vector_store_id - in: path - description: The ID of the vector store to delete. - required: true - schema: - type: string - /v1/vector_stores/{vector_store_id}/files/{file_id}: - get: - responses: - '200': - description: >- - A VectorStoreFileObject representing the file. - content: - application/json: - schema: - $ref: '#/components/schemas/VectorStoreFileObject' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - VectorIO - summary: Retrieves a vector store file. - description: Retrieves a vector store file. - parameters: - name: vector_store_id in: path description: >- - The ID of the vector store containing the file to retrieve. + The ID of the vector store containing the file batch. required: true schema: type: string - - name: file_id - in: path - description: The ID of the file to retrieve. - required: true - schema: - type: string - post: - responses: - '200': - description: >- - A VectorStoreFileObject representing the updated file. - content: - application/json: - schema: - $ref: '#/components/schemas/VectorStoreFileObject' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - VectorIO - summary: Updates a vector store file. - description: Updates a vector store file. - parameters: - - name: vector_store_id - in: path - description: >- - The ID of the vector store containing the file to update. - required: true - schema: - type: string - - name: file_id - in: path - description: The ID of the file to update. - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/OpenaiUpdateVectorStoreFileRequest' - required: true - delete: - responses: - '200': - description: >- - A VectorStoreFileDeleteResponse indicating the deletion status. - content: - application/json: - schema: - $ref: '#/components/schemas/VectorStoreFileDeleteResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - VectorIO - summary: Delete a vector store file. - description: Delete a vector store file. - parameters: - - name: vector_store_id - in: path - description: >- - The ID of the vector store containing the file to delete. - required: true - schema: - type: string - - name: file_id - in: path - description: The ID of the file to delete. - required: true - schema: - type: string - /v1/embeddings: - post: - responses: - '200': - description: >- - An OpenAIEmbeddingsResponse containing the embeddings. - content: - application/json: - schema: - $ref: '#/components/schemas/OpenAIEmbeddingsResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Inference - summary: >- - Generate OpenAI-compatible embeddings for the given input using the specified - model. - description: >- - Generate OpenAI-compatible embeddings for the given input using the specified - model. - parameters: [] - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/OpenaiEmbeddingsRequest' - required: true - /v1/files: - get: - responses: - '200': - description: >- - An ListOpenAIFileResponse containing the list of files. - content: - application/json: - schema: - $ref: '#/components/schemas/ListOpenAIFileResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Files - summary: >- - Returns a list of files that belong to the user's organization. - description: >- - Returns a list of files that belong to the user's organization. - parameters: - - name: after - in: query - description: >- - A cursor for use in pagination. `after` is an object ID that defines your - place in the list. For instance, if you make a list request and receive - 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo - in order to fetch the next page of the list. - required: false - schema: - type: string - - name: limit - in: query - description: >- - A limit on the number of objects to be returned. Limit can range between - 1 and 10,000, and the default is 10,000. - required: false - schema: - type: integer - - name: order - in: query - description: >- - Sort order by the `created_at` timestamp of the objects. `asc` for ascending - order and `desc` for descending order. - required: false - schema: - $ref: '#/components/schemas/Order' - - name: purpose - in: query - description: >- - Only return files with the given purpose. - required: false - schema: - $ref: '#/components/schemas/OpenAIFilePurpose' - post: - responses: - '200': - description: >- - An OpenAIFileObject representing the uploaded file. - content: - application/json: - schema: - $ref: '#/components/schemas/OpenAIFileObject' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Files - summary: >- - Upload a file that can be used across various endpoints. - description: >- - Upload a file that can be used across various endpoints. - - The file upload should be a multipart form request with: - - - file: The File object (not file name) to be uploaded. - - - purpose: The intended purpose of the uploaded file. - - - expires_after: Optional form values describing expiration for the file. - parameters: [] - requestBody: - content: - multipart/form-data: - schema: - type: object - properties: - file: - type: string - format: binary - purpose: - $ref: '#/components/schemas/OpenAIFilePurpose' - expires_after: - $ref: '#/components/schemas/ExpiresAfter' - required: - - file - - purpose - required: true + deprecated: false /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}/files: get: responses: @@ -4073,50 +2371,17 @@ paths: required: false schema: type: string - /v1/files/{file_id}/content: + deprecated: false + /v1/vector_stores/{vector_store_id}/files: get: responses: '200': description: >- - The raw file content as a binary response. + A VectorStoreListFilesResponse containing the list of files. content: application/json: schema: - $ref: '#/components/schemas/Response' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Files - summary: >- - Returns the contents of the specified file. - description: >- - Returns the contents of the specified file. - parameters: - - name: file_id - in: path - description: >- - The ID of the file to use for this request. - required: true - schema: - type: string - /v1/vector_stores/{vector_store_id}/file_batches/{batch_id}: - get: - responses: - '200': - description: >- - A VectorStoreFileBatchObject representing the file batch. - content: - application/json: - schema: - $ref: '#/components/schemas/VectorStoreFileBatchObject' + $ref: '#/components/schemas/VectorStoreListFilesResponse' '400': $ref: '#/components/responses/BadRequest400' '429': @@ -4129,22 +2394,216 @@ paths: $ref: '#/components/responses/DefaultError' tags: - VectorIO - summary: Retrieve a vector store file batch. - description: Retrieve a vector store file batch. + summary: List files in a vector store. + description: List files in a vector store. parameters: - - name: batch_id - in: path - description: The ID of the file batch to retrieve. - required: true - schema: - type: string - name: vector_store_id in: path description: >- - The ID of the vector store containing the file batch. + The ID of the vector store to list files from. required: true schema: type: string + - name: limit + in: query + description: >- + (Optional) A limit on the number of objects to be returned. Limit can + range between 1 and 100, and the default is 20. + required: false + schema: + type: integer + - name: order + in: query + description: >- + (Optional) Sort order by the `created_at` timestamp of the objects. `asc` + for ascending order and `desc` for descending order. + required: false + schema: + type: string + - name: after + in: query + description: >- + (Optional) A cursor for use in pagination. `after` is an object ID that + defines your place in the list. + required: false + schema: + type: string + - name: before + in: query + description: >- + (Optional) A cursor for use in pagination. `before` is an object ID that + defines your place in the list. + required: false + schema: + type: string + - name: filter + in: query + description: >- + (Optional) Filter by file status to only return files with the specified + status. + required: false + schema: + $ref: '#/components/schemas/VectorStoreFileStatus' + deprecated: false + post: + responses: + '200': + description: >- + A VectorStoreFileObject representing the attached file. + content: + application/json: + schema: + $ref: '#/components/schemas/VectorStoreFileObject' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - VectorIO + summary: Attach a file to a vector store. + description: Attach a file to a vector store. + parameters: + - name: vector_store_id + in: path + description: >- + The ID of the vector store to attach the file to. + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/OpenaiAttachFileToVectorStoreRequest' + required: true + deprecated: false + /v1/vector_stores/{vector_store_id}/files/{file_id}: + get: + responses: + '200': + description: >- + A VectorStoreFileObject representing the file. + content: + application/json: + schema: + $ref: '#/components/schemas/VectorStoreFileObject' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - VectorIO + summary: Retrieves a vector store file. + description: Retrieves a vector store file. + parameters: + - name: vector_store_id + in: path + description: >- + The ID of the vector store containing the file to retrieve. + required: true + schema: + type: string + - name: file_id + in: path + description: The ID of the file to retrieve. + required: true + schema: + type: string + deprecated: false + post: + responses: + '200': + description: >- + A VectorStoreFileObject representing the updated file. + content: + application/json: + schema: + $ref: '#/components/schemas/VectorStoreFileObject' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - VectorIO + summary: Updates a vector store file. + description: Updates a vector store file. + parameters: + - name: vector_store_id + in: path + description: >- + The ID of the vector store containing the file to update. + required: true + schema: + type: string + - name: file_id + in: path + description: The ID of the file to update. + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/OpenaiUpdateVectorStoreFileRequest' + required: true + deprecated: false + delete: + responses: + '200': + description: >- + A VectorStoreFileDeleteResponse indicating the deletion status. + content: + application/json: + schema: + $ref: '#/components/schemas/VectorStoreFileDeleteResponse' + '400': + $ref: '#/components/responses/BadRequest400' + '429': + $ref: >- + #/components/responses/TooManyRequests429 + '500': + $ref: >- + #/components/responses/InternalServerError500 + default: + $ref: '#/components/responses/DefaultError' + tags: + - VectorIO + summary: Delete a vector store file. + description: Delete a vector store file. + parameters: + - name: vector_store_id + in: path + description: >- + The ID of the vector store containing the file to delete. + required: true + schema: + type: string + - name: file_id + in: path + description: The ID of the file to delete. + required: true + schema: + type: string + deprecated: false /v1/vector_stores/{vector_store_id}/files/{file_id}/content: get: responses: @@ -4185,6 +2644,7 @@ paths: required: true schema: type: string + deprecated: false /v1/vector_stores/{vector_store_id}/search: post: responses: @@ -4226,733 +2686,7 @@ paths: schema: $ref: '#/components/schemas/OpenaiSearchVectorStoreRequest' required: true - /v1alpha/post-training/preference-optimize: - post: - responses: - '200': - description: A PostTrainingJob. - content: - application/json: - schema: - $ref: '#/components/schemas/PostTrainingJob' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - PostTraining (Coming Soon) - summary: Run preference optimization of a model. - description: Run preference optimization of a model. - parameters: [] - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/PreferenceOptimizeRequest' - required: true - /v1/post-training/preference-optimize: - post: - responses: - '200': - description: A PostTrainingJob. - content: - application/json: - schema: - $ref: '#/components/schemas/PostTrainingJob' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - PostTraining (Coming Soon) - summary: Run preference optimization of a model. - description: Run preference optimization of a model. - parameters: [] - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/PreferenceOptimizeRequest' - required: true - /v1/tool-runtime/rag-tool/query: - post: - responses: - '200': - description: >- - RAGQueryResult containing the retrieved content and metadata - content: - application/json: - schema: - $ref: '#/components/schemas/RAGQueryResult' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - ToolRuntime - summary: >- - Query the RAG system for context; typically invoked by the agent. - description: >- - Query the RAG system for context; typically invoked by the agent. - parameters: [] - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/QueryRequest' - required: true - /v1/vector-io/query: - post: - responses: - '200': - description: A QueryChunksResponse. - content: - application/json: - schema: - $ref: '#/components/schemas/QueryChunksResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - VectorIO - summary: Query chunks from a vector database. - description: Query chunks from a vector database. - parameters: [] - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/QueryChunksRequest' - required: true - /v1/telemetry/metrics/{metric_name}: - post: - responses: - '200': - description: A QueryMetricsResponse. - content: - application/json: - schema: - $ref: '#/components/schemas/QueryMetricsResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Telemetry - summary: Query metrics. - description: Query metrics. - parameters: - - name: metric_name - in: path - description: The name of the metric to query. - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/QueryMetricsRequest' - required: true - /v1/telemetry/spans: - post: - responses: - '200': - description: A QuerySpansResponse. - content: - application/json: - schema: - $ref: '#/components/schemas/QuerySpansResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Telemetry - summary: Query spans. - description: Query spans. - parameters: [] - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/QuerySpansRequest' - required: true - /v1/telemetry/traces: - post: - responses: - '200': - description: A QueryTracesResponse. - content: - application/json: - schema: - $ref: '#/components/schemas/QueryTracesResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Telemetry - summary: Query traces. - description: Query traces. - parameters: [] - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/QueryTracesRequest' - required: true - /v1alpha/inference/rerank: - post: - responses: - '200': - description: >- - RerankResponse with indices sorted by relevance score (descending). - content: - application/json: - schema: - $ref: '#/components/schemas/RerankResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Inference - summary: >- - Rerank a list of documents based on their relevance to a query. - description: >- - Rerank a list of documents based on their relevance to a query. - parameters: [] - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/RerankRequest' - required: true - /v1alpha/agents/{agent_id}/session/{session_id}/turn/{turn_id}/resume: - post: - responses: - '200': - description: >- - A Turn object if stream is False, otherwise an AsyncIterator of AgentTurnResponseStreamChunk - objects. - content: - application/json: - schema: - $ref: '#/components/schemas/Turn' - text/event-stream: - schema: - $ref: '#/components/schemas/AgentTurnResponseStreamChunk' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Agents - summary: >- - Resume an agent turn with executed tool call responses. - description: >- - Resume an agent turn with executed tool call responses. - - When a Turn has the status `awaiting_input` due to pending input from client - side tool calls, this endpoint can be used to submit the outputs from the - tool calls once they are ready. - parameters: - - name: agent_id - in: path - description: The ID of the agent to resume. - required: true - schema: - type: string - - name: session_id - in: path - description: The ID of the session to resume. - required: true - schema: - type: string - - name: turn_id - in: path - description: The ID of the turn to resume. - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/ResumeAgentTurnRequest' - required: true - /v1/agents/{agent_id}/session/{session_id}/turn/{turn_id}/resume: - post: - responses: - '200': - description: >- - A Turn object if stream is False, otherwise an AsyncIterator of AgentTurnResponseStreamChunk - objects. - content: - application/json: - schema: - $ref: '#/components/schemas/Turn' - text/event-stream: - schema: - $ref: '#/components/schemas/AgentTurnResponseStreamChunk' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Agents - summary: >- - Resume an agent turn with executed tool call responses. - description: >- - Resume an agent turn with executed tool call responses. - - When a Turn has the status `awaiting_input` due to pending input from client - side tool calls, this endpoint can be used to submit the outputs from the - tool calls once they are ready. - parameters: - - name: agent_id - in: path - description: The ID of the agent to resume. - required: true - schema: - type: string - - name: session_id - in: path - description: The ID of the session to resume. - required: true - schema: - type: string - - name: turn_id - in: path - description: The ID of the turn to resume. - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/ResumeAgentTurnRequest' - required: true - /v1alpha/eval/benchmarks/{benchmark_id}/jobs: - post: - responses: - '200': - description: >- - The job that was created to run the evaluation. - content: - application/json: - schema: - $ref: '#/components/schemas/Job' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Eval - summary: Run an evaluation on a benchmark. - description: Run an evaluation on a benchmark. - parameters: - - name: benchmark_id - in: path - description: >- - The ID of the benchmark to run the evaluation on. - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/RunEvalRequest' - required: true - /v1/eval/benchmarks/{benchmark_id}/jobs: - post: - responses: - '200': - description: >- - The job that was created to run the evaluation. - content: - application/json: - schema: - $ref: '#/components/schemas/Job' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Eval - summary: Run an evaluation on a benchmark. - description: Run an evaluation on a benchmark. - parameters: - - name: benchmark_id - in: path - description: >- - The ID of the benchmark to run the evaluation on. - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/RunEvalRequest' - required: true - /v1/moderations: - post: - responses: - '200': - description: A moderation object. - content: - application/json: - schema: - $ref: '#/components/schemas/ModerationObject' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Safety - summary: >- - Classifies if text and/or image inputs are potentially harmful. - description: >- - Classifies if text and/or image inputs are potentially harmful. - parameters: [] - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/RunModerationRequest' - required: true - /v1/safety/run-shield: - post: - responses: - '200': - description: A RunShieldResponse. - content: - application/json: - schema: - $ref: '#/components/schemas/RunShieldResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Safety - summary: Run a shield. - description: Run a shield. - parameters: [] - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/RunShieldRequest' - required: true - /v1/telemetry/spans/export: - post: - responses: - '200': - description: OK - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Telemetry - summary: Save spans to a dataset. - description: Save spans to a dataset. - parameters: [] - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/SaveSpansToDatasetRequest' - required: true - /v1/scoring/score: - post: - responses: - '200': - description: >- - A ScoreResponse object containing rows and aggregated results. - content: - application/json: - schema: - $ref: '#/components/schemas/ScoreResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Scoring - summary: Score a list of rows. - description: Score a list of rows. - parameters: [] - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/ScoreRequest' - required: true - /v1/scoring/score-batch: - post: - responses: - '200': - description: A ScoreBatchResponse. - content: - application/json: - schema: - $ref: '#/components/schemas/ScoreBatchResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Scoring - summary: Score a batch of rows. - description: Score a batch of rows. - parameters: [] - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/ScoreBatchRequest' - required: true - /v1/prompts/{prompt_id}/set-default-version: - post: - responses: - '200': - description: >- - The prompt with the specified version now set as default. - content: - application/json: - schema: - $ref: '#/components/schemas/Prompt' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - Prompts - summary: >- - Set which version of a prompt should be the default in get_prompt (latest). - description: >- - Set which version of a prompt should be the default in get_prompt (latest). - parameters: - - name: prompt_id - in: path - description: The identifier of the prompt. - required: true - schema: - type: string - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/SetDefaultVersionRequest' - required: true - /v1alpha/post-training/supervised-fine-tune: - post: - responses: - '200': - description: A PostTrainingJob. - content: - application/json: - schema: - $ref: '#/components/schemas/PostTrainingJob' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - PostTraining (Coming Soon) - summary: Run supervised fine-tuning of a model. - description: Run supervised fine-tuning of a model. - parameters: [] - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/SupervisedFineTuneRequest' - required: true - /v1/post-training/supervised-fine-tune: - post: - responses: - '200': - description: A PostTrainingJob. - content: - application/json: - schema: - $ref: '#/components/schemas/PostTrainingJob' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - PostTraining (Coming Soon) - summary: Run supervised fine-tuning of a model. - description: Run supervised fine-tuning of a model. - parameters: [] - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/SupervisedFineTuneRequest' - required: true - /v1/synthetic-data-generation/generate: - post: - responses: - '200': - description: >- - Response containing filtered synthetic data samples and optional statistics - content: - application/json: - schema: - $ref: '#/components/schemas/SyntheticDataGenerationResponse' - '400': - $ref: '#/components/responses/BadRequest400' - '429': - $ref: >- - #/components/responses/TooManyRequests429 - '500': - $ref: >- - #/components/responses/InternalServerError500 - default: - $ref: '#/components/responses/DefaultError' - tags: - - SyntheticDataGeneration (Coming Soon) - summary: >- - Generate synthetic data based on input dialogs and apply filtering. - description: >- - Generate synthetic data based on input dialogs and apply filtering. - parameters: [] - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/SyntheticDataGenerateRequest' - required: true + deprecated: false /v1/version: get: responses: @@ -4978,6 +2712,7 @@ paths: summary: Get the version of the service. description: Get the version of the service. parameters: [] + deprecated: false jsonSchemaDialect: >- https://json-schema.org/draft/2020-12/schema components: @@ -5010,10 +2745,603 @@ components: title: Error description: >- Error response from the API. Roughly follows RFC 7807. - AppendRowsRequest: + Order: + type: string + enum: + - asc + - desc + title: Order + description: Sort order for paginated responses. + ListOpenAIChatCompletionResponse: type: object properties: - rows: + data: + type: array + items: + type: object + properties: + id: + type: string + description: The ID of the chat completion + choices: + type: array + items: + $ref: '#/components/schemas/OpenAIChoice' + description: List of choices + object: + type: string + const: chat.completion + default: chat.completion + description: >- + The object type, which will be "chat.completion" + created: + type: integer + description: >- + The Unix timestamp in seconds when the chat completion was created + model: + type: string + description: >- + The model that was used to generate the chat completion + input_messages: + type: array + items: + $ref: '#/components/schemas/OpenAIMessageParam' + additionalProperties: false + required: + - id + - choices + - object + - created + - model + - input_messages + title: OpenAICompletionWithInputMessages + description: >- + List of chat completion objects with their input messages + has_more: + type: boolean + description: >- + Whether there are more completions available beyond this list + first_id: + type: string + description: ID of the first completion in this list + last_id: + type: string + description: ID of the last completion in this list + object: + type: string + const: list + default: list + description: >- + Must be "list" to identify this as a list response + additionalProperties: false + required: + - data + - has_more + - first_id + - last_id + - object + title: ListOpenAIChatCompletionResponse + description: >- + Response from listing OpenAI-compatible chat completions. + OpenAIAssistantMessageParam: + type: object + properties: + role: + type: string + const: assistant + default: assistant + description: >- + Must be "assistant" to identify this as the model's response + content: + oneOf: + - type: string + - type: array + items: + $ref: '#/components/schemas/OpenAIChatCompletionContentPartTextParam' + description: The content of the model's response + name: + type: string + description: >- + (Optional) The name of the assistant message participant. + tool_calls: + type: array + items: + $ref: '#/components/schemas/OpenAIChatCompletionToolCall' + description: >- + List of tool calls. Each tool call is an OpenAIChatCompletionToolCall + object. + additionalProperties: false + required: + - role + title: OpenAIAssistantMessageParam + description: >- + A message containing the model's (assistant) response in an OpenAI-compatible + chat completion request. + "OpenAIChatCompletionContentPartImageParam": + type: object + properties: + type: + type: string + const: image_url + default: image_url + description: >- + Must be "image_url" to identify this as image content + image_url: + $ref: '#/components/schemas/OpenAIImageURL' + description: >- + Image URL specification and processing details + additionalProperties: false + required: + - type + - image_url + title: >- + OpenAIChatCompletionContentPartImageParam + description: >- + Image content part for OpenAI-compatible chat completion messages. + OpenAIChatCompletionContentPartParam: + oneOf: + - $ref: '#/components/schemas/OpenAIChatCompletionContentPartTextParam' + - $ref: '#/components/schemas/OpenAIChatCompletionContentPartImageParam' + - $ref: '#/components/schemas/OpenAIFile' + discriminator: + propertyName: type + mapping: + text: '#/components/schemas/OpenAIChatCompletionContentPartTextParam' + image_url: '#/components/schemas/OpenAIChatCompletionContentPartImageParam' + file: '#/components/schemas/OpenAIFile' + OpenAIChatCompletionContentPartTextParam: + type: object + properties: + type: + type: string + const: text + default: text + description: >- + Must be "text" to identify this as text content + text: + type: string + description: The text content of the message + additionalProperties: false + required: + - type + - text + title: OpenAIChatCompletionContentPartTextParam + description: >- + Text content part for OpenAI-compatible chat completion messages. + OpenAIChatCompletionToolCall: + type: object + properties: + index: + type: integer + description: >- + (Optional) Index of the tool call in the list + id: + type: string + description: >- + (Optional) Unique identifier for the tool call + type: + type: string + const: function + default: function + description: >- + Must be "function" to identify this as a function call + function: + $ref: '#/components/schemas/OpenAIChatCompletionToolCallFunction' + description: (Optional) Function call details + additionalProperties: false + required: + - type + title: OpenAIChatCompletionToolCall + description: >- + Tool call specification for OpenAI-compatible chat completion responses. + OpenAIChatCompletionToolCallFunction: + type: object + properties: + name: + type: string + description: (Optional) Name of the function to call + arguments: + type: string + description: >- + (Optional) Arguments to pass to the function as a JSON string + additionalProperties: false + title: OpenAIChatCompletionToolCallFunction + description: >- + Function call details for OpenAI-compatible tool calls. + OpenAIChoice: + type: object + properties: + message: + oneOf: + - $ref: '#/components/schemas/OpenAIUserMessageParam' + - $ref: '#/components/schemas/OpenAISystemMessageParam' + - $ref: '#/components/schemas/OpenAIAssistantMessageParam' + - $ref: '#/components/schemas/OpenAIToolMessageParam' + - $ref: '#/components/schemas/OpenAIDeveloperMessageParam' + discriminator: + propertyName: role + mapping: + user: '#/components/schemas/OpenAIUserMessageParam' + system: '#/components/schemas/OpenAISystemMessageParam' + assistant: '#/components/schemas/OpenAIAssistantMessageParam' + tool: '#/components/schemas/OpenAIToolMessageParam' + developer: '#/components/schemas/OpenAIDeveloperMessageParam' + description: The message from the model + finish_reason: + type: string + description: The reason the model stopped generating + index: + type: integer + description: The index of the choice + logprobs: + $ref: '#/components/schemas/OpenAIChoiceLogprobs' + description: >- + (Optional) The log probabilities for the tokens in the message + additionalProperties: false + required: + - message + - finish_reason + - index + title: OpenAIChoice + description: >- + A choice from an OpenAI-compatible chat completion response. + OpenAIChoiceLogprobs: + type: object + properties: + content: + type: array + items: + $ref: '#/components/schemas/OpenAITokenLogProb' + description: >- + (Optional) The log probabilities for the tokens in the message + refusal: + type: array + items: + $ref: '#/components/schemas/OpenAITokenLogProb' + description: >- + (Optional) The log probabilities for the tokens in the message + additionalProperties: false + title: OpenAIChoiceLogprobs + description: >- + The log probabilities for the tokens in the message from an OpenAI-compatible + chat completion response. + OpenAIDeveloperMessageParam: + type: object + properties: + role: + type: string + const: developer + default: developer + description: >- + Must be "developer" to identify this as a developer message + content: + oneOf: + - type: string + - type: array + items: + $ref: '#/components/schemas/OpenAIChatCompletionContentPartTextParam' + description: The content of the developer message + name: + type: string + description: >- + (Optional) The name of the developer message participant. + additionalProperties: false + required: + - role + - content + title: OpenAIDeveloperMessageParam + description: >- + A message from the developer in an OpenAI-compatible chat completion request. + OpenAIFile: + type: object + properties: + type: + type: string + const: file + default: file + file: + $ref: '#/components/schemas/OpenAIFileFile' + additionalProperties: false + required: + - type + - file + title: OpenAIFile + OpenAIFileFile: + type: object + properties: + file_data: + type: string + file_id: + type: string + filename: + type: string + additionalProperties: false + title: OpenAIFileFile + OpenAIImageURL: + type: object + properties: + url: + type: string + description: >- + URL of the image to include in the message + detail: + type: string + description: >- + (Optional) Level of detail for image processing. Can be "low", "high", + or "auto" + additionalProperties: false + required: + - url + title: OpenAIImageURL + description: >- + Image URL specification for OpenAI-compatible chat completion messages. + OpenAIMessageParam: + oneOf: + - $ref: '#/components/schemas/OpenAIUserMessageParam' + - $ref: '#/components/schemas/OpenAISystemMessageParam' + - $ref: '#/components/schemas/OpenAIAssistantMessageParam' + - $ref: '#/components/schemas/OpenAIToolMessageParam' + - $ref: '#/components/schemas/OpenAIDeveloperMessageParam' + discriminator: + propertyName: role + mapping: + user: '#/components/schemas/OpenAIUserMessageParam' + system: '#/components/schemas/OpenAISystemMessageParam' + assistant: '#/components/schemas/OpenAIAssistantMessageParam' + tool: '#/components/schemas/OpenAIToolMessageParam' + developer: '#/components/schemas/OpenAIDeveloperMessageParam' + OpenAISystemMessageParam: + type: object + properties: + role: + type: string + const: system + default: system + description: >- + Must be "system" to identify this as a system message + content: + oneOf: + - type: string + - type: array + items: + $ref: '#/components/schemas/OpenAIChatCompletionContentPartTextParam' + description: >- + The content of the "system prompt". If multiple system messages are provided, + they are concatenated. The underlying Llama Stack code may also add other + system messages (for example, for formatting tool definitions). + name: + type: string + description: >- + (Optional) The name of the system message participant. + additionalProperties: false + required: + - role + - content + title: OpenAISystemMessageParam + description: >- + A system message providing instructions or context to the model. + OpenAITokenLogProb: + type: object + properties: + token: + type: string + bytes: + type: array + items: + type: integer + logprob: + type: number + top_logprobs: + type: array + items: + $ref: '#/components/schemas/OpenAITopLogProb' + additionalProperties: false + required: + - token + - logprob + - top_logprobs + title: OpenAITokenLogProb + description: >- + The log probability for a token from an OpenAI-compatible chat completion + response. + OpenAIToolMessageParam: + type: object + properties: + role: + type: string + const: tool + default: tool + description: >- + Must be "tool" to identify this as a tool response + tool_call_id: + type: string + description: >- + Unique identifier for the tool call this response is for + content: + oneOf: + - type: string + - type: array + items: + $ref: '#/components/schemas/OpenAIChatCompletionContentPartTextParam' + description: The response content from the tool + additionalProperties: false + required: + - role + - tool_call_id + - content + title: OpenAIToolMessageParam + description: >- + A message representing the result of a tool invocation in an OpenAI-compatible + chat completion request. + OpenAITopLogProb: + type: object + properties: + token: + type: string + bytes: + type: array + items: + type: integer + logprob: + type: number + additionalProperties: false + required: + - token + - logprob + title: OpenAITopLogProb + description: >- + The top log probability for a token from an OpenAI-compatible chat completion + response. + OpenAIUserMessageParam: + type: object + properties: + role: + type: string + const: user + default: user + description: >- + Must be "user" to identify this as a user message + content: + oneOf: + - type: string + - type: array + items: + $ref: '#/components/schemas/OpenAIChatCompletionContentPartParam' + description: >- + The content of the message, which can include text and other media + name: + type: string + description: >- + (Optional) The name of the user message participant. + additionalProperties: false + required: + - role + - content + title: OpenAIUserMessageParam + description: >- + A message from the user in an OpenAI-compatible chat completion request. + OpenAIJSONSchema: + type: object + properties: + name: + type: string + description: Name of the schema + description: + type: string + description: (Optional) Description of the schema + strict: + type: boolean + description: >- + (Optional) Whether to enforce strict adherence to the schema + schema: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: (Optional) The JSON schema definition + additionalProperties: false + required: + - name + title: OpenAIJSONSchema + description: >- + JSON schema specification for OpenAI-compatible structured response format. + OpenAIResponseFormatJSONObject: + type: object + properties: + type: + type: string + const: json_object + default: json_object + description: >- + Must be "json_object" to indicate generic JSON object response format + additionalProperties: false + required: + - type + title: OpenAIResponseFormatJSONObject + description: >- + JSON object response format for OpenAI-compatible chat completion requests. + OpenAIResponseFormatJSONSchema: + type: object + properties: + type: + type: string + const: json_schema + default: json_schema + description: >- + Must be "json_schema" to indicate structured JSON response format + json_schema: + $ref: '#/components/schemas/OpenAIJSONSchema' + description: >- + The JSON schema specification for the response + additionalProperties: false + required: + - type + - json_schema + title: OpenAIResponseFormatJSONSchema + description: >- + JSON schema response format for OpenAI-compatible chat completion requests. + OpenAIResponseFormatParam: + oneOf: + - $ref: '#/components/schemas/OpenAIResponseFormatText' + - $ref: '#/components/schemas/OpenAIResponseFormatJSONSchema' + - $ref: '#/components/schemas/OpenAIResponseFormatJSONObject' + discriminator: + propertyName: type + mapping: + text: '#/components/schemas/OpenAIResponseFormatText' + json_schema: '#/components/schemas/OpenAIResponseFormatJSONSchema' + json_object: '#/components/schemas/OpenAIResponseFormatJSONObject' + OpenAIResponseFormatText: + type: object + properties: + type: + type: string + const: text + default: text + description: >- + Must be "text" to indicate plain text response format + additionalProperties: false + required: + - type + title: OpenAIResponseFormatText + description: >- + Text response format for OpenAI-compatible chat completion requests. + OpenaiChatCompletionRequest: + type: object + properties: + model: + type: string + description: >- + The identifier of the model to use. The model must be registered with + Llama Stack and available via the /models endpoint. + messages: + type: array + items: + $ref: '#/components/schemas/OpenAIMessageParam' + description: List of messages in the conversation. + frequency_penalty: + type: number + description: >- + (Optional) The penalty for repeated tokens. + function_call: + oneOf: + - type: string + - type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: (Optional) The function call to use. + functions: type: array items: type: object @@ -5025,776 +3353,53 @@ components: - type: string - type: array - type: object - description: The rows to append to the dataset. - additionalProperties: false - required: - - rows - title: AppendRowsRequest - CancelTrainingJobRequest: - type: object - properties: - job_uuid: - type: string - description: The UUID of the job to cancel. - additionalProperties: false - required: - - job_uuid - title: CancelTrainingJobRequest - AgentConfig: - type: object - properties: - sampling_params: - $ref: '#/components/schemas/SamplingParams' - input_shields: - type: array - items: - type: string - output_shields: - type: array - items: - type: string - toolgroups: - type: array - items: - $ref: '#/components/schemas/AgentTool' - client_tools: - type: array - items: - $ref: '#/components/schemas/ToolDef' - tool_choice: - type: string - enum: - - auto - - required - - none - title: ToolChoice - description: >- - Whether tool use is required or automatic. This is a hint to the model - which may not be followed. It depends on the Instruction Following capabilities - of the model. - deprecated: true - tool_prompt_format: - type: string - enum: - - json - - function_tag - - python_list - title: ToolPromptFormat - description: >- - Prompt format for calling custom / zero shot tools. - deprecated: true - tool_config: - $ref: '#/components/schemas/ToolConfig' - max_infer_iters: - type: integer - default: 10 - model: - type: string - description: >- - The model identifier to use for the agent - instructions: - type: string - description: The system instructions for the agent - name: - type: string - description: >- - Optional name for the agent, used in telemetry and identification - enable_session_persistence: + description: (Optional) List of functions to use. + logit_bias: + type: object + additionalProperties: + type: number + description: (Optional) The logit bias to use. + logprobs: type: boolean - default: false + description: (Optional) The log probabilities to use. + max_completion_tokens: + type: integer description: >- - Optional flag indicating whether session data has to be persisted - response_format: - $ref: '#/components/schemas/ResponseFormat' - description: Optional response format configuration - additionalProperties: false - required: - - model - - instructions - title: AgentConfig - description: Configuration for an agent. - AgentTool: - oneOf: - - type: string - - type: object - properties: - name: - type: string - args: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - additionalProperties: false - required: - - name - - args - title: AgentToolGroupWithArgs - GrammarResponseFormat: - type: object - properties: - type: - type: string - enum: - - json_schema - - grammar - description: >- - Must be "grammar" to identify this format type - const: grammar - default: grammar - bnf: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: >- - The BNF grammar specification the response should conform to - additionalProperties: false - required: - - type - - bnf - title: GrammarResponseFormat - description: >- - Configuration for grammar-guided response generation. - GreedySamplingStrategy: - type: object - properties: - type: - type: string - const: greedy - default: greedy - description: >- - Must be "greedy" to identify this sampling strategy - additionalProperties: false - required: - - type - title: GreedySamplingStrategy - description: >- - Greedy sampling strategy that selects the highest probability token at each - step. - JsonSchemaResponseFormat: - type: object - properties: - type: - type: string - enum: - - json_schema - - grammar - description: >- - Must be "json_schema" to identify this format type - const: json_schema - default: json_schema - json_schema: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: >- - The JSON schema the response should conform to. In a Python SDK, this - is often a `pydantic` model. - additionalProperties: false - required: - - type - - json_schema - title: JsonSchemaResponseFormat - description: >- - Configuration for JSON schema-guided response generation. - ResponseFormat: - oneOf: - - $ref: '#/components/schemas/JsonSchemaResponseFormat' - - $ref: '#/components/schemas/GrammarResponseFormat' - discriminator: - propertyName: type - mapping: - json_schema: '#/components/schemas/JsonSchemaResponseFormat' - grammar: '#/components/schemas/GrammarResponseFormat' - SamplingParams: - type: object - properties: - strategy: - oneOf: - - $ref: '#/components/schemas/GreedySamplingStrategy' - - $ref: '#/components/schemas/TopPSamplingStrategy' - - $ref: '#/components/schemas/TopKSamplingStrategy' - discriminator: - propertyName: type - mapping: - greedy: '#/components/schemas/GreedySamplingStrategy' - top_p: '#/components/schemas/TopPSamplingStrategy' - top_k: '#/components/schemas/TopKSamplingStrategy' - description: The sampling strategy. + (Optional) The maximum number of tokens to generate. max_tokens: type: integer - default: 0 description: >- - The maximum number of tokens that can be generated in the completion. - The token count of your prompt plus max_tokens cannot exceed the model's - context length. - repetition_penalty: - type: number - default: 1.0 - description: >- - Number between -2.0 and 2.0. Positive values penalize new tokens based - on whether they appear in the text so far, increasing the model's likelihood - to talk about new topics. - stop: - type: array - items: - type: string - description: >- - Up to 4 sequences where the API will stop generating further tokens. The - returned text will not contain the stop sequence. - additionalProperties: false - required: - - strategy - title: SamplingParams - description: Sampling parameters. - ToolConfig: - type: object - properties: - tool_choice: - oneOf: - - type: string - enum: - - auto - - required - - none - title: ToolChoice - description: >- - Whether tool use is required or automatic. This is a hint to the model - which may not be followed. It depends on the Instruction Following - capabilities of the model. - - type: string - default: auto - description: >- - (Optional) Whether tool use is automatic, required, or none. Can also - specify a tool name to use a specific tool. Defaults to ToolChoice.auto. - tool_prompt_format: - type: string - enum: - - json - - function_tag - - python_list - description: >- - (Optional) Instructs the model how to format tool calls. By default, Llama - Stack will attempt to use a format that is best adapted to the model. - - `ToolPromptFormat.json`: The tool calls are formatted as a JSON object. - - `ToolPromptFormat.function_tag`: The tool calls are enclosed in a - tag. - `ToolPromptFormat.python_list`: The tool calls are output as Python - syntax -- a list of function calls. - system_message_behavior: - type: string - enum: - - append - - replace - description: >- - (Optional) Config for how to override the default system prompt. - `SystemMessageBehavior.append`: - Appends the provided system message to the default system prompt. - `SystemMessageBehavior.replace`: - Replaces the default system prompt with the provided system message. The - system message can include the string '{{function_definitions}}' to indicate - where the function definitions should be inserted. - default: append - additionalProperties: false - title: ToolConfig - description: Configuration for tool use. - ToolDef: - type: object - properties: - name: - type: string - description: Name of the tool - description: - type: string - description: >- - (Optional) Human-readable description of what the tool does - parameters: - type: array - items: - $ref: '#/components/schemas/ToolParameter' - description: >- - (Optional) List of parameters this tool accepts - metadata: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: >- - (Optional) Additional metadata about the tool - additionalProperties: false - required: - - name - title: ToolDef - description: >- - Tool definition used in runtime contexts. - ToolParameter: - type: object - properties: - name: - type: string - description: Name of the parameter - parameter_type: - type: string - description: >- - Type of the parameter (e.g., string, integer) - description: - type: string - description: >- - Human-readable description of what the parameter does - required: - type: boolean - default: true - description: >- - Whether this parameter is required for tool invocation - items: - type: object - description: >- - Type of the elements when parameter_type is array - title: - type: string - description: (Optional) Title of the parameter - default: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: >- - (Optional) Default value for the parameter if not provided - additionalProperties: false - required: - - name - - parameter_type - - description - - required - title: ToolParameter - description: Parameter definition for a tool. - TopKSamplingStrategy: - type: object - properties: - type: - type: string - const: top_k - default: top_k - description: >- - Must be "top_k" to identify this sampling strategy - top_k: + (Optional) The maximum number of tokens to generate. + n: type: integer description: >- - Number of top tokens to consider for sampling. Must be at least 1 - additionalProperties: false - required: - - type - - top_k - title: TopKSamplingStrategy - description: >- - Top-k sampling strategy that restricts sampling to the k most likely tokens. - TopPSamplingStrategy: - type: object - properties: - type: - type: string - const: top_p - default: top_p + (Optional) The number of completions to generate. + parallel_tool_calls: + type: boolean description: >- - Must be "top_p" to identify this sampling strategy - temperature: + (Optional) Whether to parallelize tool calls. + presence_penalty: type: number description: >- - Controls randomness in sampling. Higher values increase randomness - top_p: - type: number - default: 0.95 - description: >- - Cumulative probability threshold for nucleus sampling. Defaults to 0.95 - additionalProperties: false - required: - - type - title: TopPSamplingStrategy - description: >- - Top-p (nucleus) sampling strategy that samples from the smallest set of tokens - with cumulative probability >= p. - CreateAgentRequest: - type: object - properties: - agent_config: - $ref: '#/components/schemas/AgentConfig' - description: The configuration for the agent. - additionalProperties: false - required: - - agent_config - title: CreateAgentRequest - AgentCreateResponse: - type: object - properties: - agent_id: - type: string - description: Unique identifier for the created agent - additionalProperties: false - required: - - agent_id - title: AgentCreateResponse - description: >- - Response returned when creating a new agent. - CreateAgentSessionRequest: - type: object - properties: - session_name: - type: string - description: The name of the session to create. - additionalProperties: false - required: - - session_name - title: CreateAgentSessionRequest - AgentSessionCreateResponse: - type: object - properties: - session_id: - type: string - description: >- - Unique identifier for the created session - additionalProperties: false - required: - - session_id - title: AgentSessionCreateResponse - description: >- - Response returned when creating a new agent session. - ImageContentItem: - type: object - properties: - type: - type: string - const: image - default: image - description: >- - Discriminator type of the content item. Always "image" - image: - type: object - properties: - url: - $ref: '#/components/schemas/URL' - description: >- - A URL of the image or data URL in the format of data:image/{type};base64,{data}. - Note that URL could have length limits. - data: - type: string - contentEncoding: base64 - description: base64 encoded image data as string - additionalProperties: false - description: >- - Image as a base64 encoded string or an URL - additionalProperties: false - required: - - type - - image - title: ImageContentItem - description: A image content item - InterleavedContent: - oneOf: - - type: string - - $ref: '#/components/schemas/InterleavedContentItem' - - type: array - items: - $ref: '#/components/schemas/InterleavedContentItem' - InterleavedContentItem: - oneOf: - - $ref: '#/components/schemas/ImageContentItem' - - $ref: '#/components/schemas/TextContentItem' - discriminator: - propertyName: type - mapping: - image: '#/components/schemas/ImageContentItem' - text: '#/components/schemas/TextContentItem' - TextContentItem: - type: object - properties: - type: - type: string - const: text - default: text - description: >- - Discriminator type of the content item. Always "text" - text: - type: string - description: Text content - additionalProperties: false - required: - - type - - text - title: TextContentItem - description: A text content item - ToolResponseMessage: - type: object - properties: - role: - type: string - const: tool - default: tool - description: >- - Must be "tool" to identify this as a tool response - call_id: - type: string - description: >- - Unique identifier for the tool call this response is for - content: - $ref: '#/components/schemas/InterleavedContent' - description: The response content from the tool - additionalProperties: false - required: - - role - - call_id - - content - title: ToolResponseMessage - description: >- - A message representing the result of a tool invocation. - URL: - type: object - properties: - uri: - type: string - description: The URL string pointing to the resource - additionalProperties: false - required: - - uri - title: URL - description: A URL reference to external content. - UserMessage: - type: object - properties: - role: - type: string - const: user - default: user - description: >- - Must be "user" to identify this as a user message - content: - $ref: '#/components/schemas/InterleavedContent' - description: >- - The content of the message, which can include text and other media - context: - $ref: '#/components/schemas/InterleavedContent' - description: >- - (Optional) This field is used internally by Llama Stack to pass RAG context. - This field may be removed in the API in the future. - additionalProperties: false - required: - - role - - content - title: UserMessage - description: >- - A message from the user in a chat conversation. - CreateAgentTurnRequest: - type: object - properties: - messages: - type: array - items: - oneOf: - - $ref: '#/components/schemas/UserMessage' - - $ref: '#/components/schemas/ToolResponseMessage' - description: List of messages to start the turn with. + (Optional) The penalty for repeated tokens. + response_format: + $ref: '#/components/schemas/OpenAIResponseFormatParam' + description: (Optional) The response format to use. + seed: + type: integer + description: (Optional) The seed to use. + stop: + oneOf: + - type: string + - type: array + items: + type: string + description: (Optional) The stop tokens to use. stream: type: boolean description: >- - (Optional) If True, generate an SSE event stream of the response. Defaults - to False. - documents: - type: array - items: - type: object - properties: - content: - oneOf: - - type: string - - $ref: '#/components/schemas/InterleavedContentItem' - - type: array - items: - $ref: '#/components/schemas/InterleavedContentItem' - - $ref: '#/components/schemas/URL' - description: The content of the document. - mime_type: - type: string - description: The MIME type of the document. - additionalProperties: false - required: - - content - - mime_type - title: Document - description: A document to be used by an agent. - description: >- - (Optional) List of documents to create the turn with. - toolgroups: - type: array - items: - $ref: '#/components/schemas/AgentTool' - description: >- - (Optional) List of toolgroups to create the turn with, will be used in - addition to the agent's config toolgroups for the request. - tool_config: - $ref: '#/components/schemas/ToolConfig' - description: >- - (Optional) The tool configuration to create the turn with, will be used - to override the agent's tool_config. - additionalProperties: false - required: - - messages - title: CreateAgentTurnRequest - CompletionMessage: - type: object - properties: - role: - type: string - const: assistant - default: assistant - description: >- - Must be "assistant" to identify this as the model's response - content: - $ref: '#/components/schemas/InterleavedContent' - description: The content of the model's response - stop_reason: - type: string - enum: - - end_of_turn - - end_of_message - - out_of_tokens - description: >- - Reason why the model stopped generating. Options are: - `StopReason.end_of_turn`: - The model finished generating the entire response. - `StopReason.end_of_message`: - The model finished generating but generated a partial response -- usually, - a tool call. The user may call the tool and continue the conversation - with the tool's response. - `StopReason.out_of_tokens`: The model ran - out of token budget. - tool_calls: - type: array - items: - $ref: '#/components/schemas/ToolCall' - description: >- - List of tool calls. Each tool call is a ToolCall object. - additionalProperties: false - required: - - role - - content - - stop_reason - title: CompletionMessage - description: >- - A message containing the model's (assistant) response in a chat conversation. - InferenceStep: - type: object - properties: - turn_id: - type: string - description: The ID of the turn. - step_id: - type: string - description: The ID of the step. - started_at: - type: string - format: date-time - description: The time the step started. - completed_at: - type: string - format: date-time - description: The time the step completed. - step_type: - type: string - enum: - - inference - - tool_execution - - shield_call - - memory_retrieval - title: StepType - description: Type of the step in an agent turn. - const: inference - default: inference - model_response: - $ref: '#/components/schemas/CompletionMessage' - description: The response from the LLM. - additionalProperties: false - required: - - turn_id - - step_id - - step_type - - model_response - title: InferenceStep - description: An inference step in an agent turn. - MemoryRetrievalStep: - type: object - properties: - turn_id: - type: string - description: The ID of the turn. - step_id: - type: string - description: The ID of the step. - started_at: - type: string - format: date-time - description: The time the step started. - completed_at: - type: string - format: date-time - description: The time the step completed. - step_type: - type: string - enum: - - inference - - tool_execution - - shield_call - - memory_retrieval - title: StepType - description: Type of the step in an agent turn. - const: memory_retrieval - default: memory_retrieval - vector_db_ids: - type: string - description: >- - The IDs of the vector databases to retrieve context from. - inserted_context: - $ref: '#/components/schemas/InterleavedContent' - description: >- - The context retrieved from the vector databases. - additionalProperties: false - required: - - turn_id - - step_id - - step_type - - vector_db_ids - - inserted_context - title: MemoryRetrievalStep - description: >- - A memory retrieval step in an agent turn. - SafetyViolation: - type: object - properties: - violation_level: - $ref: '#/components/schemas/ViolationLevel' - description: Severity level of the violation - user_message: - type: string - description: >- - (Optional) Message to convey to the user about the violation - metadata: + (Optional) Whether to stream the response. + stream_options: type: object additionalProperties: oneOf: @@ -5804,438 +3409,683 @@ components: - type: string - type: array - type: object - description: >- - Additional metadata including specific violation codes for debugging and - telemetry - additionalProperties: false - required: - - violation_level - - metadata - title: SafetyViolation - description: >- - Details of a safety violation detected by content moderation. - ShieldCallStep: - type: object - properties: - turn_id: - type: string - description: The ID of the turn. - step_id: - type: string - description: The ID of the step. - started_at: - type: string - format: date-time - description: The time the step started. - completed_at: - type: string - format: date-time - description: The time the step completed. - step_type: - type: string - enum: - - inference - - tool_execution - - shield_call - - memory_retrieval - title: StepType - description: Type of the step in an agent turn. - const: shield_call - default: shield_call - violation: - $ref: '#/components/schemas/SafetyViolation' - description: The violation from the shield call. - additionalProperties: false - required: - - turn_id - - step_id - - step_type - title: ShieldCallStep - description: A shield call step in an agent turn. - ToolCall: - type: object - properties: - call_id: - type: string - tool_name: - oneOf: - - type: string - enum: - - brave_search - - wolfram_alpha - - photogen - - code_interpreter - title: BuiltinTool - - type: string - arguments: + description: (Optional) The stream options to use. + temperature: + type: number + description: (Optional) The temperature to use. + tool_choice: oneOf: - type: string - type: object additionalProperties: oneOf: - - type: string - - type: integer - - type: number - - type: boolean - type: 'null' + - type: boolean + - type: number + - type: string - type: array - items: - oneOf: - - type: string - - type: integer - - type: number - - type: boolean - - type: 'null' - type: object - additionalProperties: - oneOf: - - type: string - - type: integer - - type: number - - type: boolean - - type: 'null' - arguments_json: - type: string - additionalProperties: false - required: - - call_id - - tool_name - - arguments - title: ToolCall - ToolExecutionStep: - type: object - properties: - turn_id: - type: string - description: The ID of the turn. - step_id: - type: string - description: The ID of the step. - started_at: - type: string - format: date-time - description: The time the step started. - completed_at: - type: string - format: date-time - description: The time the step completed. - step_type: - type: string - enum: - - inference - - tool_execution - - shield_call - - memory_retrieval - title: StepType - description: Type of the step in an agent turn. - const: tool_execution - default: tool_execution - tool_calls: - type: array - items: - $ref: '#/components/schemas/ToolCall' - description: The tool calls to execute. - tool_responses: - type: array - items: - $ref: '#/components/schemas/ToolResponse' - description: The tool responses from the tool calls. - additionalProperties: false - required: - - turn_id - - step_id - - step_type - - tool_calls - - tool_responses - title: ToolExecutionStep - description: A tool execution step in an agent turn. - ToolResponse: - type: object - properties: - call_id: - type: string - description: >- - Unique identifier for the tool call this response is for - tool_name: - oneOf: - - type: string - enum: - - brave_search - - wolfram_alpha - - photogen - - code_interpreter - title: BuiltinTool - - type: string - description: Name of the tool that was invoked - content: - $ref: '#/components/schemas/InterleavedContent' - description: The response content from the tool - metadata: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: >- - (Optional) Additional metadata about the tool response - additionalProperties: false - required: - - call_id - - tool_name - - content - title: ToolResponse - description: Response from a tool invocation. - Turn: - type: object - properties: - turn_id: - type: string - description: >- - Unique identifier for the turn within a session - session_id: - type: string - description: >- - Unique identifier for the conversation session - input_messages: - type: array - items: - oneOf: - - $ref: '#/components/schemas/UserMessage' - - $ref: '#/components/schemas/ToolResponseMessage' - description: >- - List of messages that initiated this turn - steps: - type: array - items: - oneOf: - - $ref: '#/components/schemas/InferenceStep' - - $ref: '#/components/schemas/ToolExecutionStep' - - $ref: '#/components/schemas/ShieldCallStep' - - $ref: '#/components/schemas/MemoryRetrievalStep' - discriminator: - propertyName: step_type - mapping: - inference: '#/components/schemas/InferenceStep' - tool_execution: '#/components/schemas/ToolExecutionStep' - shield_call: '#/components/schemas/ShieldCallStep' - memory_retrieval: '#/components/schemas/MemoryRetrievalStep' - description: >- - Ordered list of processing steps executed during this turn - output_message: - $ref: '#/components/schemas/CompletionMessage' - description: >- - The model's generated response containing content and metadata - output_attachments: + description: (Optional) The tool choice to use. + tools: type: array items: type: object - properties: - content: - oneOf: - - type: string - - $ref: '#/components/schemas/InterleavedContentItem' - - type: array - items: - $ref: '#/components/schemas/InterleavedContentItem' - - $ref: '#/components/schemas/URL' - description: The content of the attachment. - mime_type: - type: string - description: The MIME type of the attachment. - additionalProperties: false - required: - - content - - mime_type - title: Attachment - description: An attachment to an agent turn. + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: (Optional) The tools to use. + top_logprobs: + type: integer description: >- - (Optional) Files or media attached to the agent's response - started_at: + (Optional) The top log probabilities to use. + top_p: + type: number + description: (Optional) The top p to use. + user: type: string - format: date-time - description: Timestamp when the turn began - completed_at: - type: string - format: date-time - description: >- - (Optional) Timestamp when the turn finished, if completed + description: (Optional) The user to use. additionalProperties: false required: - - turn_id - - session_id - - input_messages - - steps - - output_message - - started_at - title: Turn + - model + - messages + title: OpenaiChatCompletionRequest + OpenAIChatCompletion: + type: object + properties: + id: + type: string + description: The ID of the chat completion + choices: + type: array + items: + $ref: '#/components/schemas/OpenAIChoice' + description: List of choices + object: + type: string + const: chat.completion + default: chat.completion + description: >- + The object type, which will be "chat.completion" + created: + type: integer + description: >- + The Unix timestamp in seconds when the chat completion was created + model: + type: string + description: >- + The model that was used to generate the chat completion + additionalProperties: false + required: + - id + - choices + - object + - created + - model + title: OpenAIChatCompletion description: >- - A single turn in an interaction with an Agentic System. - ViolationLevel: + Response from an OpenAI-compatible chat completion request. + OpenAIChatCompletionChunk: + type: object + properties: + id: + type: string + description: The ID of the chat completion + choices: + type: array + items: + $ref: '#/components/schemas/OpenAIChunkChoice' + description: List of choices + object: + type: string + const: chat.completion.chunk + default: chat.completion.chunk + description: >- + The object type, which will be "chat.completion.chunk" + created: + type: integer + description: >- + The Unix timestamp in seconds when the chat completion was created + model: + type: string + description: >- + The model that was used to generate the chat completion + additionalProperties: false + required: + - id + - choices + - object + - created + - model + title: OpenAIChatCompletionChunk + description: >- + Chunk from a streaming response to an OpenAI-compatible chat completion request. + OpenAIChoiceDelta: + type: object + properties: + content: + type: string + description: (Optional) The content of the delta + refusal: + type: string + description: (Optional) The refusal of the delta + role: + type: string + description: (Optional) The role of the delta + tool_calls: + type: array + items: + $ref: '#/components/schemas/OpenAIChatCompletionToolCall' + description: (Optional) The tool calls of the delta + additionalProperties: false + title: OpenAIChoiceDelta + description: >- + A delta from an OpenAI-compatible chat completion streaming response. + OpenAIChunkChoice: + type: object + properties: + delta: + $ref: '#/components/schemas/OpenAIChoiceDelta' + description: The delta from the chunk + finish_reason: + type: string + description: The reason the model stopped generating + index: + type: integer + description: The index of the choice + logprobs: + $ref: '#/components/schemas/OpenAIChoiceLogprobs' + description: >- + (Optional) The log probabilities for the tokens in the message + additionalProperties: false + required: + - delta + - finish_reason + - index + title: OpenAIChunkChoice + description: >- + A chunk choice from an OpenAI-compatible chat completion streaming response. + OpenAICompletionWithInputMessages: + type: object + properties: + id: + type: string + description: The ID of the chat completion + choices: + type: array + items: + $ref: '#/components/schemas/OpenAIChoice' + description: List of choices + object: + type: string + const: chat.completion + default: chat.completion + description: >- + The object type, which will be "chat.completion" + created: + type: integer + description: >- + The Unix timestamp in seconds when the chat completion was created + model: + type: string + description: >- + The model that was used to generate the chat completion + input_messages: + type: array + items: + $ref: '#/components/schemas/OpenAIMessageParam' + additionalProperties: false + required: + - id + - choices + - object + - created + - model + - input_messages + title: OpenAICompletionWithInputMessages + OpenaiCompletionRequest: + type: object + properties: + model: + type: string + description: >- + The identifier of the model to use. The model must be registered with + Llama Stack and available via the /models endpoint. + prompt: + oneOf: + - type: string + - type: array + items: + type: string + - type: array + items: + type: integer + - type: array + items: + type: array + items: + type: integer + description: The prompt to generate a completion for. + best_of: + type: integer + description: >- + (Optional) The number of completions to generate. + echo: + type: boolean + description: (Optional) Whether to echo the prompt. + frequency_penalty: + type: number + description: >- + (Optional) The penalty for repeated tokens. + logit_bias: + type: object + additionalProperties: + type: number + description: (Optional) The logit bias to use. + logprobs: + type: boolean + description: (Optional) The log probabilities to use. + max_tokens: + type: integer + description: >- + (Optional) The maximum number of tokens to generate. + n: + type: integer + description: >- + (Optional) The number of completions to generate. + presence_penalty: + type: number + description: >- + (Optional) The penalty for repeated tokens. + seed: + type: integer + description: (Optional) The seed to use. + stop: + oneOf: + - type: string + - type: array + items: + type: string + description: (Optional) The stop tokens to use. + stream: + type: boolean + description: >- + (Optional) Whether to stream the response. + stream_options: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: (Optional) The stream options to use. + temperature: + type: number + description: (Optional) The temperature to use. + top_p: + type: number + description: (Optional) The top p to use. + user: + type: string + description: (Optional) The user to use. + guided_choice: + type: array + items: + type: string + prompt_logprobs: + type: integer + suffix: + type: string + description: >- + (Optional) The suffix that should be appended to the completion. + additionalProperties: false + required: + - model + - prompt + title: OpenaiCompletionRequest + OpenAICompletion: + type: object + properties: + id: + type: string + choices: + type: array + items: + $ref: '#/components/schemas/OpenAICompletionChoice' + created: + type: integer + model: + type: string + object: + type: string + const: text_completion + default: text_completion + additionalProperties: false + required: + - id + - choices + - created + - model + - object + title: OpenAICompletion + description: >- + Response from an OpenAI-compatible completion request. + OpenAICompletionChoice: + type: object + properties: + finish_reason: + type: string + text: + type: string + index: + type: integer + logprobs: + $ref: '#/components/schemas/OpenAIChoiceLogprobs' + additionalProperties: false + required: + - finish_reason + - text + - index + title: OpenAICompletionChoice + description: >- + A choice from an OpenAI-compatible completion response. + OpenaiEmbeddingsRequest: + type: object + properties: + model: + type: string + description: >- + The identifier of the model to use. The model must be an embedding model + registered with Llama Stack and available via the /models endpoint. + input: + oneOf: + - type: string + - type: array + items: + type: string + description: >- + Input text to embed, encoded as a string or array of strings. To embed + multiple inputs in a single request, pass an array of strings. + encoding_format: + type: string + description: >- + (Optional) The format to return the embeddings in. Can be either "float" + or "base64". Defaults to "float". + dimensions: + type: integer + description: >- + (Optional) The number of dimensions the resulting output embeddings should + have. Only supported in text-embedding-3 and later models. + user: + type: string + description: >- + (Optional) A unique identifier representing your end-user, which can help + OpenAI to monitor and detect abuse. + additionalProperties: false + required: + - model + - input + title: OpenaiEmbeddingsRequest + OpenAIEmbeddingData: + type: object + properties: + object: + type: string + const: embedding + default: embedding + description: >- + The object type, which will be "embedding" + embedding: + oneOf: + - type: array + items: + type: number + - type: string + description: >- + The embedding vector as a list of floats (when encoding_format="float") + or as a base64-encoded string (when encoding_format="base64") + index: + type: integer + description: >- + The index of the embedding in the input list + additionalProperties: false + required: + - object + - embedding + - index + title: OpenAIEmbeddingData + description: >- + A single embedding data object from an OpenAI-compatible embeddings response. + OpenAIEmbeddingUsage: + type: object + properties: + prompt_tokens: + type: integer + description: The number of tokens in the input + total_tokens: + type: integer + description: The total number of tokens used + additionalProperties: false + required: + - prompt_tokens + - total_tokens + title: OpenAIEmbeddingUsage + description: >- + Usage information for an OpenAI-compatible embeddings response. + OpenAIEmbeddingsResponse: + type: object + properties: + object: + type: string + const: list + default: list + description: The object type, which will be "list" + data: + type: array + items: + $ref: '#/components/schemas/OpenAIEmbeddingData' + description: List of embedding data objects + model: + type: string + description: >- + The model that was used to generate the embeddings + usage: + $ref: '#/components/schemas/OpenAIEmbeddingUsage' + description: Usage information + additionalProperties: false + required: + - object + - data + - model + - usage + title: OpenAIEmbeddingsResponse + description: >- + Response from an OpenAI-compatible embeddings request. + OpenAIFilePurpose: type: string enum: - - info - - warn - - error - title: ViolationLevel - description: Severity level of a safety violation. - AgentTurnResponseEvent: + - assistants + - batch + title: OpenAIFilePurpose + description: >- + Valid purpose values for OpenAI Files API. + ListOpenAIFileResponse: type: object properties: - payload: - oneOf: - - $ref: '#/components/schemas/AgentTurnResponseStepStartPayload' - - $ref: '#/components/schemas/AgentTurnResponseStepProgressPayload' - - $ref: '#/components/schemas/AgentTurnResponseStepCompletePayload' - - $ref: '#/components/schemas/AgentTurnResponseTurnStartPayload' - - $ref: '#/components/schemas/AgentTurnResponseTurnCompletePayload' - - $ref: '#/components/schemas/AgentTurnResponseTurnAwaitingInputPayload' - discriminator: - propertyName: event_type - mapping: - step_start: '#/components/schemas/AgentTurnResponseStepStartPayload' - step_progress: '#/components/schemas/AgentTurnResponseStepProgressPayload' - step_complete: '#/components/schemas/AgentTurnResponseStepCompletePayload' - turn_start: '#/components/schemas/AgentTurnResponseTurnStartPayload' - turn_complete: '#/components/schemas/AgentTurnResponseTurnCompletePayload' - turn_awaiting_input: '#/components/schemas/AgentTurnResponseTurnAwaitingInputPayload' + data: + type: array + items: + $ref: '#/components/schemas/OpenAIFileObject' + description: List of file objects + has_more: + type: boolean description: >- - Event-specific payload containing event data + Whether there are more files available beyond this page + first_id: + type: string + description: >- + ID of the first file in the list for pagination + last_id: + type: string + description: >- + ID of the last file in the list for pagination + object: + type: string + const: list + default: list + description: The object type, which is always "list" additionalProperties: false required: - - payload - title: AgentTurnResponseEvent + - data + - has_more + - first_id + - last_id + - object + title: ListOpenAIFileResponse description: >- - An event in an agent turn response stream. - AgentTurnResponseStepCompletePayload: + Response for listing files in OpenAI Files API. + OpenAIFileObject: type: object properties: - event_type: + object: type: string - enum: - - step_start - - step_complete - - step_progress - - turn_start - - turn_complete - - turn_awaiting_input - const: step_complete - default: step_complete - description: Type of event being reported - step_type: - type: string - enum: - - inference - - tool_execution - - shield_call - - memory_retrieval - description: Type of step being executed - step_id: + const: file + default: file + description: The object type, which is always "file" + id: type: string description: >- - Unique identifier for the step within a turn - step_details: - oneOf: - - $ref: '#/components/schemas/InferenceStep' - - $ref: '#/components/schemas/ToolExecutionStep' - - $ref: '#/components/schemas/ShieldCallStep' - - $ref: '#/components/schemas/MemoryRetrievalStep' - discriminator: - propertyName: step_type - mapping: - inference: '#/components/schemas/InferenceStep' - tool_execution: '#/components/schemas/ToolExecutionStep' - shield_call: '#/components/schemas/ShieldCallStep' - memory_retrieval: '#/components/schemas/MemoryRetrievalStep' - description: Complete details of the executed step + The file identifier, which can be referenced in the API endpoints + bytes: + type: integer + description: The size of the file, in bytes + created_at: + type: integer + description: >- + The Unix timestamp (in seconds) for when the file was created + expires_at: + type: integer + description: >- + The Unix timestamp (in seconds) for when the file expires + filename: + type: string + description: The name of the file + purpose: + type: string + enum: + - assistants + - batch + description: The intended purpose of the file additionalProperties: false required: - - event_type - - step_type - - step_id - - step_details - title: AgentTurnResponseStepCompletePayload + - object + - id + - bytes + - created_at + - expires_at + - filename + - purpose + title: OpenAIFileObject description: >- - Payload for step completion events in agent turn responses. - AgentTurnResponseStepProgressPayload: + OpenAI File object as defined in the OpenAI Files API. + ExpiresAfter: type: object properties: - event_type: + anchor: type: string - enum: - - step_start - - step_complete - - step_progress - - turn_start - - turn_complete - - turn_awaiting_input - const: step_progress - default: step_progress - description: Type of event being reported - step_type: - type: string - enum: - - inference - - tool_execution - - shield_call - - memory_retrieval - description: Type of step being executed - step_id: - type: string - description: >- - Unique identifier for the step within a turn - delta: - oneOf: - - $ref: '#/components/schemas/TextDelta' - - $ref: '#/components/schemas/ImageDelta' - - $ref: '#/components/schemas/ToolCallDelta' - discriminator: - propertyName: type - mapping: - text: '#/components/schemas/TextDelta' - image: '#/components/schemas/ImageDelta' - tool_call: '#/components/schemas/ToolCallDelta' - description: >- - Incremental content changes during step execution + const: created_at + seconds: + type: integer additionalProperties: false required: - - event_type - - step_type - - step_id - - delta - title: AgentTurnResponseStepProgressPayload + - anchor + - seconds + title: ExpiresAfter description: >- - Payload for step progress events in agent turn responses. - AgentTurnResponseStepStartPayload: + Control expiration of uploaded files. + + Params: + - anchor, must be "created_at" + - seconds, must be int between 3600 and 2592000 (1 hour to 30 days) + OpenAIFileDeleteResponse: type: object properties: - event_type: + id: + type: string + description: The file identifier that was deleted + object: + type: string + const: file + default: file + description: The object type, which is always "file" + deleted: + type: boolean + description: >- + Whether the file was successfully deleted + additionalProperties: false + required: + - id + - object + - deleted + title: OpenAIFileDeleteResponse + description: >- + Response for deleting a file in OpenAI Files API. + Response: + type: object + title: Response + HealthInfo: + type: object + properties: + status: type: string enum: - - step_start - - step_complete - - step_progress - - turn_start - - turn_complete - - turn_awaiting_input - const: step_start - default: step_start - description: Type of event being reported - step_type: + - OK + - Error + - Not Implemented + description: Current health status of the service + additionalProperties: false + required: + - status + title: HealthInfo + description: >- + Health status information for the service. + RouteInfo: + type: object + properties: + route: type: string - enum: - - inference - - tool_execution - - shield_call - - memory_retrieval - description: Type of step being executed - step_id: + description: The API endpoint path + method: + type: string + description: HTTP method for the route + provider_types: + type: array + items: + type: string + description: >- + List of provider types that implement this route + additionalProperties: false + required: + - route + - method + - provider_types + title: RouteInfo + description: >- + Information about an API route including its path, method, and implementing + providers. + ListRoutesResponse: + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/RouteInfo' + description: >- + List of available route information objects + additionalProperties: false + required: + - data + title: ListRoutesResponse + description: >- + Response containing a list of all available API routes. + Model: + type: object + properties: + identifier: type: string description: >- - Unique identifier for the step within a turn + Unique identifier for this resource in llama stack + provider_resource_id: + type: string + description: >- + Unique identifier for this resource in the provider + provider_id: + type: string + description: >- + ID of the provider that owns this resource + type: + type: string + enum: + - model + - shield + - vector_db + - dataset + - scoring_function + - benchmark + - tool + - tool_group + - prompt + const: model + default: model + description: >- + The resource type, always 'model' for model resources metadata: type: object additionalProperties: @@ -6246,177 +4096,360 @@ components: - type: string - type: array - type: object + description: Any additional metadata for this model + model_type: + $ref: '#/components/schemas/ModelType' + default: llm description: >- - (Optional) Additional metadata for the step - additionalProperties: false - required: - - event_type - - step_type - - step_id - title: AgentTurnResponseStepStartPayload - description: >- - Payload for step start events in agent turn responses. - AgentTurnResponseStreamChunk: - type: object - properties: - event: - $ref: '#/components/schemas/AgentTurnResponseEvent' - description: >- - Individual event in the agent turn response stream - additionalProperties: false - required: - - event - title: AgentTurnResponseStreamChunk - description: Streamed agent turn completion response. - "AgentTurnResponseTurnAwaitingInputPayload": - type: object - properties: - event_type: - type: string - enum: - - step_start - - step_complete - - step_progress - - turn_start - - turn_complete - - turn_awaiting_input - const: turn_awaiting_input - default: turn_awaiting_input - description: Type of event being reported - turn: - $ref: '#/components/schemas/Turn' - description: >- - Turn data when waiting for external tool responses - additionalProperties: false - required: - - event_type - - turn - title: >- - AgentTurnResponseTurnAwaitingInputPayload - description: >- - Payload for turn awaiting input events in agent turn responses. - AgentTurnResponseTurnCompletePayload: - type: object - properties: - event_type: - type: string - enum: - - step_start - - step_complete - - step_progress - - turn_start - - turn_complete - - turn_awaiting_input - const: turn_complete - default: turn_complete - description: Type of event being reported - turn: - $ref: '#/components/schemas/Turn' - description: >- - Complete turn data including all steps and results - additionalProperties: false - required: - - event_type - - turn - title: AgentTurnResponseTurnCompletePayload - description: >- - Payload for turn completion events in agent turn responses. - AgentTurnResponseTurnStartPayload: - type: object - properties: - event_type: - type: string - enum: - - step_start - - step_complete - - step_progress - - turn_start - - turn_complete - - turn_awaiting_input - const: turn_start - default: turn_start - description: Type of event being reported - turn_id: - type: string - description: >- - Unique identifier for the turn within a session - additionalProperties: false - required: - - event_type - - turn_id - title: AgentTurnResponseTurnStartPayload - description: >- - Payload for turn start events in agent turn responses. - ImageDelta: - type: object - properties: - type: - type: string - const: image - default: image - description: >- - Discriminator type of the delta. Always "image" - image: - type: string - contentEncoding: base64 - description: The incremental image data as bytes + The type of model (LLM or embedding model) additionalProperties: false required: + - identifier + - provider_id - type - - image - title: ImageDelta + - metadata + - model_type + title: Model description: >- - An image content delta for streaming responses. - TextDelta: + A model resource representing an AI model registered in Llama Stack. + ModelType: + type: string + enum: + - llm + - embedding + title: ModelType + description: >- + Enumeration of supported model types in Llama Stack. + ListModelsResponse: type: object properties: - type: - type: string - const: text - default: text - description: >- - Discriminator type of the delta. Always "text" - text: - type: string - description: The incremental text content + data: + type: array + items: + $ref: '#/components/schemas/Model' additionalProperties: false required: - - type - - text - title: TextDelta - description: >- - A text content delta for streaming responses. - ToolCallDelta: + - data + title: ListModelsResponse + RegisterModelRequest: type: object properties: - type: + model_id: + type: string + description: The identifier of the model to register. + provider_model_id: type: string - const: tool_call - default: tool_call description: >- - Discriminator type of the delta. Always "tool_call" - tool_call: + The identifier of the model in the provider. + provider_id: + type: string + description: The identifier of the provider. + metadata: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: Any additional metadata for this model. + model_type: + $ref: '#/components/schemas/ModelType' + description: The type of model to register. + additionalProperties: false + required: + - model_id + title: RegisterModelRequest + RunModerationRequest: + type: object + properties: + input: oneOf: - type: string - - $ref: '#/components/schemas/ToolCall' + - type: array + items: + type: string description: >- - Either an in-progress tool call string or the final parsed tool call - parse_status: + Input (or inputs) to classify. Can be a single string, an array of strings, + or an array of multi-modal input objects similar to other models. + model: type: string - enum: - - started - - in_progress - - failed - - succeeded - description: Current parsing status of the tool call + description: >- + The content moderation model you would like to use. additionalProperties: false required: - - type - - tool_call - - parse_status - title: ToolCallDelta + - input + - model + title: RunModerationRequest + ModerationObject: + type: object + properties: + id: + type: string + description: >- + The unique identifier for the moderation request. + model: + type: string + description: >- + The model used to generate the moderation results. + results: + type: array + items: + $ref: '#/components/schemas/ModerationObjectResults' + description: A list of moderation objects + additionalProperties: false + required: + - id + - model + - results + title: ModerationObject + description: A moderation object. + ModerationObjectResults: + type: object + properties: + flagged: + type: boolean + description: >- + Whether any of the below categories are flagged. + categories: + type: object + additionalProperties: + type: boolean + description: >- + A list of the categories, and whether they are flagged or not. + category_applied_input_types: + type: object + additionalProperties: + type: array + items: + type: string + description: >- + A list of the categories along with the input type(s) that the score applies + to. + category_scores: + type: object + additionalProperties: + type: number + description: >- + A list of the categories along with their scores as predicted by model. + user_message: + type: string + metadata: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + additionalProperties: false + required: + - flagged + - metadata + title: ModerationObjectResults + description: A moderation object. + Prompt: + type: object + properties: + prompt: + type: string + description: >- + The system prompt text with variable placeholders. Variables are only + supported when using the Responses API. + version: + type: integer + description: >- + Version (integer starting at 1, incremented on save) + prompt_id: + type: string + description: >- + Unique identifier formatted as 'pmpt_<48-digit-hash>' + variables: + type: array + items: + type: string + description: >- + List of prompt variable names that can be used in the prompt template + is_default: + type: boolean + default: false + description: >- + Boolean indicating whether this version is the default version for this + prompt + additionalProperties: false + required: + - version + - prompt_id + - variables + - is_default + title: Prompt description: >- - A tool call content delta for streaming responses. + A prompt resource representing a stored OpenAI Compatible prompt template + in Llama Stack. + ListPromptsResponse: + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/Prompt' + additionalProperties: false + required: + - data + title: ListPromptsResponse + description: Response model to list prompts. + CreatePromptRequest: + type: object + properties: + prompt: + type: string + description: >- + The prompt text content with variable placeholders. + variables: + type: array + items: + type: string + description: >- + List of variable names that can be used in the prompt template. + additionalProperties: false + required: + - prompt + title: CreatePromptRequest + UpdatePromptRequest: + type: object + properties: + prompt: + type: string + description: The updated prompt text content. + version: + type: integer + description: >- + The current version of the prompt being updated. + variables: + type: array + items: + type: string + description: >- + Updated list of variable names that can be used in the prompt template. + set_as_default: + type: boolean + description: >- + Set the new version as the default (default=True). + additionalProperties: false + required: + - prompt + - version + - set_as_default + title: UpdatePromptRequest + SetDefaultVersionRequest: + type: object + properties: + version: + type: integer + description: The version to set as default. + additionalProperties: false + required: + - version + title: SetDefaultVersionRequest + ProviderInfo: + type: object + properties: + api: + type: string + description: The API name this provider implements + provider_id: + type: string + description: Unique identifier for the provider + provider_type: + type: string + description: The type of provider implementation + config: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + Configuration parameters for the provider + health: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: Current health status of the provider + additionalProperties: false + required: + - api + - provider_id + - provider_type + - config + - health + title: ProviderInfo + description: >- + Information about a registered provider including its configuration and health + status. + ListProvidersResponse: + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/ProviderInfo' + description: List of provider information objects + additionalProperties: false + required: + - data + title: ListProvidersResponse + description: >- + Response containing a list of all available providers. + ListOpenAIResponseObject: + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/OpenAIResponseObjectWithInput' + description: >- + List of response objects with their input context + has_more: + type: boolean + description: >- + Whether there are more results available beyond this page + first_id: + type: string + description: >- + Identifier of the first item in this page + last_id: + type: string + description: Identifier of the last item in this page + object: + type: string + const: list + default: list + description: Object type identifier, always "list" + additionalProperties: false + required: + - data + - has_more + - first_id + - last_id + - object + title: ListOpenAIResponseObject + description: >- + Paginated list of OpenAI response objects with navigation metadata. OpenAIResponseAnnotationCitation: type: object properties: @@ -6535,6 +4568,24 @@ components: url_citation: '#/components/schemas/OpenAIResponseAnnotationCitation' container_file_citation: '#/components/schemas/OpenAIResponseAnnotationContainerFileCitation' file_path: '#/components/schemas/OpenAIResponseAnnotationFilePath' + OpenAIResponseError: + type: object + properties: + code: + type: string + description: >- + Error code identifying the type of failure + message: + type: string + description: >- + Human-readable error message describing the failure + additionalProperties: false + required: + - code + - message + title: OpenAIResponseError + description: >- + Error details for failed OpenAI response requests. OpenAIResponseInput: oneOf: - $ref: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall' @@ -6627,6 +4678,499 @@ components: title: OpenAIResponseInputMessageContentText description: >- Text content for input messages in OpenAI response format. + OpenAIResponseMCPApprovalRequest: + type: object + properties: + arguments: + type: string + id: + type: string + name: + type: string + server_label: + type: string + type: + type: string + const: mcp_approval_request + default: mcp_approval_request + additionalProperties: false + required: + - arguments + - id + - name + - server_label + - type + title: OpenAIResponseMCPApprovalRequest + description: >- + A request for human approval of a tool invocation. + OpenAIResponseMCPApprovalResponse: + type: object + properties: + approval_request_id: + type: string + approve: + type: boolean + type: + type: string + const: mcp_approval_response + default: mcp_approval_response + id: + type: string + reason: + type: string + additionalProperties: false + required: + - approval_request_id + - approve + - type + title: OpenAIResponseMCPApprovalResponse + description: A response to an MCP approval request. + OpenAIResponseMessage: + type: object + properties: + content: + oneOf: + - type: string + - type: array + items: + $ref: '#/components/schemas/OpenAIResponseInputMessageContent' + - type: array + items: + $ref: '#/components/schemas/OpenAIResponseOutputMessageContent' + role: + oneOf: + - type: string + const: system + - type: string + const: developer + - type: string + const: user + - type: string + const: assistant + type: + type: string + const: message + default: message + id: + type: string + status: + type: string + additionalProperties: false + required: + - content + - role + - type + title: OpenAIResponseMessage + description: >- + Corresponds to the various Message types in the Responses API. They are all + under one type because the Responses API gives them all the same "type" value, + and there is no way to tell them apart in certain scenarios. + OpenAIResponseObjectWithInput: + type: object + properties: + created_at: + type: integer + description: >- + Unix timestamp when the response was created + error: + $ref: '#/components/schemas/OpenAIResponseError' + description: >- + (Optional) Error details if the response generation failed + id: + type: string + description: Unique identifier for this response + model: + type: string + description: Model identifier used for generation + object: + type: string + const: response + default: response + description: >- + Object type identifier, always "response" + output: + type: array + items: + $ref: '#/components/schemas/OpenAIResponseOutput' + description: >- + List of generated output items (messages, tool calls, etc.) + parallel_tool_calls: + type: boolean + default: false + description: >- + Whether tool calls can be executed in parallel + previous_response_id: + type: string + description: >- + (Optional) ID of the previous response in a conversation + status: + type: string + description: >- + Current status of the response generation + temperature: + type: number + description: >- + (Optional) Sampling temperature used for generation + text: + $ref: '#/components/schemas/OpenAIResponseText' + description: >- + Text formatting configuration for the response + top_p: + type: number + description: >- + (Optional) Nucleus sampling parameter used for generation + truncation: + type: string + description: >- + (Optional) Truncation strategy applied to the response + input: + type: array + items: + $ref: '#/components/schemas/OpenAIResponseInput' + description: >- + List of input items that led to this response + additionalProperties: false + required: + - created_at + - id + - model + - object + - output + - parallel_tool_calls + - status + - text + - input + title: OpenAIResponseObjectWithInput + description: >- + OpenAI response object extended with input context information. + OpenAIResponseOutput: + oneOf: + - $ref: '#/components/schemas/OpenAIResponseMessage' + - $ref: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall' + - $ref: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall' + - $ref: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall' + - $ref: '#/components/schemas/OpenAIResponseOutputMessageMCPCall' + - $ref: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools' + - $ref: '#/components/schemas/OpenAIResponseMCPApprovalRequest' + discriminator: + propertyName: type + mapping: + message: '#/components/schemas/OpenAIResponseMessage' + web_search_call: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall' + file_search_call: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall' + function_call: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall' + mcp_call: '#/components/schemas/OpenAIResponseOutputMessageMCPCall' + mcp_list_tools: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools' + mcp_approval_request: '#/components/schemas/OpenAIResponseMCPApprovalRequest' + OpenAIResponseOutputMessageContent: + type: object + properties: + text: + type: string + type: + type: string + const: output_text + default: output_text + annotations: + type: array + items: + $ref: '#/components/schemas/OpenAIResponseAnnotations' + additionalProperties: false + required: + - text + - type + - annotations + title: >- + OpenAIResponseOutputMessageContentOutputText + "OpenAIResponseOutputMessageFileSearchToolCall": + type: object + properties: + id: + type: string + description: Unique identifier for this tool call + queries: + type: array + items: + type: string + description: List of search queries executed + status: + type: string + description: >- + Current status of the file search operation + type: + type: string + const: file_search_call + default: file_search_call + description: >- + Tool call type identifier, always "file_search_call" + results: + type: array + items: + type: object + properties: + attributes: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + (Optional) Key-value attributes associated with the file + file_id: + type: string + description: >- + Unique identifier of the file containing the result + filename: + type: string + description: Name of the file containing the result + score: + type: number + description: >- + Relevance score for this search result (between 0 and 1) + text: + type: string + description: Text content of the search result + additionalProperties: false + required: + - attributes + - file_id + - filename + - score + - text + title: >- + OpenAIResponseOutputMessageFileSearchToolCallResults + description: >- + Search results returned by the file search operation. + description: >- + (Optional) Search results returned by the file search operation + additionalProperties: false + required: + - id + - queries + - status + - type + title: >- + OpenAIResponseOutputMessageFileSearchToolCall + description: >- + File search tool call output message for OpenAI responses. + "OpenAIResponseOutputMessageFunctionToolCall": + type: object + properties: + call_id: + type: string + description: Unique identifier for the function call + name: + type: string + description: Name of the function being called + arguments: + type: string + description: >- + JSON string containing the function arguments + type: + type: string + const: function_call + default: function_call + description: >- + Tool call type identifier, always "function_call" + id: + type: string + description: >- + (Optional) Additional identifier for the tool call + status: + type: string + description: >- + (Optional) Current status of the function call execution + additionalProperties: false + required: + - call_id + - name + - arguments + - type + title: >- + OpenAIResponseOutputMessageFunctionToolCall + description: >- + Function tool call output message for OpenAI responses. + OpenAIResponseOutputMessageMCPCall: + type: object + properties: + id: + type: string + description: Unique identifier for this MCP call + type: + type: string + const: mcp_call + default: mcp_call + description: >- + Tool call type identifier, always "mcp_call" + arguments: + type: string + description: >- + JSON string containing the MCP call arguments + name: + type: string + description: Name of the MCP method being called + server_label: + type: string + description: >- + Label identifying the MCP server handling the call + error: + type: string + description: >- + (Optional) Error message if the MCP call failed + output: + type: string + description: >- + (Optional) Output result from the successful MCP call + additionalProperties: false + required: + - id + - type + - arguments + - name + - server_label + title: OpenAIResponseOutputMessageMCPCall + description: >- + Model Context Protocol (MCP) call output message for OpenAI responses. + OpenAIResponseOutputMessageMCPListTools: + type: object + properties: + id: + type: string + description: >- + Unique identifier for this MCP list tools operation + type: + type: string + const: mcp_list_tools + default: mcp_list_tools + description: >- + Tool call type identifier, always "mcp_list_tools" + server_label: + type: string + description: >- + Label identifying the MCP server providing the tools + tools: + type: array + items: + type: object + properties: + input_schema: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + JSON schema defining the tool's input parameters + name: + type: string + description: Name of the tool + description: + type: string + description: >- + (Optional) Description of what the tool does + additionalProperties: false + required: + - input_schema + - name + title: MCPListToolsTool + description: >- + Tool definition returned by MCP list tools operation. + description: >- + List of available tools provided by the MCP server + additionalProperties: false + required: + - id + - type + - server_label + - tools + title: OpenAIResponseOutputMessageMCPListTools + description: >- + MCP list tools output message containing available tools from an MCP server. + "OpenAIResponseOutputMessageWebSearchToolCall": + type: object + properties: + id: + type: string + description: Unique identifier for this tool call + status: + type: string + description: >- + Current status of the web search operation + type: + type: string + const: web_search_call + default: web_search_call + description: >- + Tool call type identifier, always "web_search_call" + additionalProperties: false + required: + - id + - status + - type + title: >- + OpenAIResponseOutputMessageWebSearchToolCall + description: >- + Web search tool call output message for OpenAI responses. + OpenAIResponseText: + type: object + properties: + format: + type: object + properties: + type: + oneOf: + - type: string + const: text + - type: string + const: json_schema + - type: string + const: json_object + description: >- + Must be "text", "json_schema", or "json_object" to identify the format + type + name: + type: string + description: >- + The name of the response format. Only used for json_schema. + schema: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + The JSON schema the response should conform to. In a Python SDK, this + is often a `pydantic` model. Only used for json_schema. + description: + type: string + description: >- + (Optional) A description of the response format. Only used for json_schema. + strict: + type: boolean + description: >- + (Optional) Whether to strictly enforce the JSON schema. If true, the + response must match the schema exactly. Only used for json_schema. + additionalProperties: false + required: + - type + description: >- + (Optional) Text format configuration specifying output format requirements + additionalProperties: false + title: OpenAIResponseText + description: >- + Text response configuration for OpenAI responses. OpenAIResponseInputTool: oneOf: - $ref: '#/components/schemas/OpenAIResponseInputToolWebSearch' @@ -6837,302 +5381,6 @@ components: title: OpenAIResponseInputToolWebSearch description: >- Web search tool configuration for OpenAI response inputs. - OpenAIResponseMCPApprovalRequest: - type: object - properties: - arguments: - type: string - id: - type: string - name: - type: string - server_label: - type: string - type: - type: string - const: mcp_approval_request - default: mcp_approval_request - additionalProperties: false - required: - - arguments - - id - - name - - server_label - - type - title: OpenAIResponseMCPApprovalRequest - description: >- - A request for human approval of a tool invocation. - OpenAIResponseMCPApprovalResponse: - type: object - properties: - approval_request_id: - type: string - approve: - type: boolean - type: - type: string - const: mcp_approval_response - default: mcp_approval_response - id: - type: string - reason: - type: string - additionalProperties: false - required: - - approval_request_id - - approve - - type - title: OpenAIResponseMCPApprovalResponse - description: A response to an MCP approval request. - OpenAIResponseMessage: - type: object - properties: - content: - oneOf: - - type: string - - type: array - items: - $ref: '#/components/schemas/OpenAIResponseInputMessageContent' - - type: array - items: - $ref: '#/components/schemas/OpenAIResponseOutputMessageContent' - role: - oneOf: - - type: string - const: system - - type: string - const: developer - - type: string - const: user - - type: string - const: assistant - type: - type: string - const: message - default: message - id: - type: string - status: - type: string - additionalProperties: false - required: - - content - - role - - type - title: OpenAIResponseMessage - description: >- - Corresponds to the various Message types in the Responses API. They are all - under one type because the Responses API gives them all the same "type" value, - and there is no way to tell them apart in certain scenarios. - OpenAIResponseOutputMessageContent: - type: object - properties: - text: - type: string - type: - type: string - const: output_text - default: output_text - annotations: - type: array - items: - $ref: '#/components/schemas/OpenAIResponseAnnotations' - additionalProperties: false - required: - - text - - type - - annotations - title: >- - OpenAIResponseOutputMessageContentOutputText - "OpenAIResponseOutputMessageFileSearchToolCall": - type: object - properties: - id: - type: string - description: Unique identifier for this tool call - queries: - type: array - items: - type: string - description: List of search queries executed - status: - type: string - description: >- - Current status of the file search operation - type: - type: string - const: file_search_call - default: file_search_call - description: >- - Tool call type identifier, always "file_search_call" - results: - type: array - items: - type: object - properties: - attributes: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: >- - (Optional) Key-value attributes associated with the file - file_id: - type: string - description: >- - Unique identifier of the file containing the result - filename: - type: string - description: Name of the file containing the result - score: - type: number - description: >- - Relevance score for this search result (between 0 and 1) - text: - type: string - description: Text content of the search result - additionalProperties: false - required: - - attributes - - file_id - - filename - - score - - text - title: >- - OpenAIResponseOutputMessageFileSearchToolCallResults - description: >- - Search results returned by the file search operation. - description: >- - (Optional) Search results returned by the file search operation - additionalProperties: false - required: - - id - - queries - - status - - type - title: >- - OpenAIResponseOutputMessageFileSearchToolCall - description: >- - File search tool call output message for OpenAI responses. - "OpenAIResponseOutputMessageFunctionToolCall": - type: object - properties: - call_id: - type: string - description: Unique identifier for the function call - name: - type: string - description: Name of the function being called - arguments: - type: string - description: >- - JSON string containing the function arguments - type: - type: string - const: function_call - default: function_call - description: >- - Tool call type identifier, always "function_call" - id: - type: string - description: >- - (Optional) Additional identifier for the tool call - status: - type: string - description: >- - (Optional) Current status of the function call execution - additionalProperties: false - required: - - call_id - - name - - arguments - - type - title: >- - OpenAIResponseOutputMessageFunctionToolCall - description: >- - Function tool call output message for OpenAI responses. - "OpenAIResponseOutputMessageWebSearchToolCall": - type: object - properties: - id: - type: string - description: Unique identifier for this tool call - status: - type: string - description: >- - Current status of the web search operation - type: - type: string - const: web_search_call - default: web_search_call - description: >- - Tool call type identifier, always "web_search_call" - additionalProperties: false - required: - - id - - status - - type - title: >- - OpenAIResponseOutputMessageWebSearchToolCall - description: >- - Web search tool call output message for OpenAI responses. - OpenAIResponseText: - type: object - properties: - format: - type: object - properties: - type: - oneOf: - - type: string - const: text - - type: string - const: json_schema - - type: string - const: json_object - description: >- - Must be "text", "json_schema", or "json_object" to identify the format - type - name: - type: string - description: >- - The name of the response format. Only used for json_schema. - schema: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: >- - The JSON schema the response should conform to. In a Python SDK, this - is often a `pydantic` model. Only used for json_schema. - description: - type: string - description: >- - (Optional) A description of the response format. Only used for json_schema. - strict: - type: boolean - description: >- - (Optional) Whether to strictly enforce the JSON schema. If true, the - response must match the schema exactly. Only used for json_schema. - additionalProperties: false - required: - - type - description: >- - (Optional) Text format configuration specifying output format requirements - additionalProperties: false - title: OpenAIResponseText - description: >- - Text response configuration for OpenAI responses. CreateOpenaiResponseRequest: type: object properties: @@ -7179,24 +5427,6 @@ components: - input - model title: CreateOpenaiResponseRequest - OpenAIResponseError: - type: object - properties: - code: - type: string - description: >- - Error code identifying the type of failure - message: - type: string - description: >- - Human-readable error message describing the failure - additionalProperties: false - required: - - code - - message - title: OpenAIResponseError - description: >- - Error details for failed OpenAI response requests. OpenAIResponseObject: type: object properties: @@ -7268,125 +5498,6 @@ components: title: OpenAIResponseObject description: >- Complete OpenAI response object containing generation results and metadata. - OpenAIResponseOutput: - oneOf: - - $ref: '#/components/schemas/OpenAIResponseMessage' - - $ref: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall' - - $ref: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall' - - $ref: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall' - - $ref: '#/components/schemas/OpenAIResponseOutputMessageMCPCall' - - $ref: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools' - - $ref: '#/components/schemas/OpenAIResponseMCPApprovalRequest' - discriminator: - propertyName: type - mapping: - message: '#/components/schemas/OpenAIResponseMessage' - web_search_call: '#/components/schemas/OpenAIResponseOutputMessageWebSearchToolCall' - file_search_call: '#/components/schemas/OpenAIResponseOutputMessageFileSearchToolCall' - function_call: '#/components/schemas/OpenAIResponseOutputMessageFunctionToolCall' - mcp_call: '#/components/schemas/OpenAIResponseOutputMessageMCPCall' - mcp_list_tools: '#/components/schemas/OpenAIResponseOutputMessageMCPListTools' - mcp_approval_request: '#/components/schemas/OpenAIResponseMCPApprovalRequest' - OpenAIResponseOutputMessageMCPCall: - type: object - properties: - id: - type: string - description: Unique identifier for this MCP call - type: - type: string - const: mcp_call - default: mcp_call - description: >- - Tool call type identifier, always "mcp_call" - arguments: - type: string - description: >- - JSON string containing the MCP call arguments - name: - type: string - description: Name of the MCP method being called - server_label: - type: string - description: >- - Label identifying the MCP server handling the call - error: - type: string - description: >- - (Optional) Error message if the MCP call failed - output: - type: string - description: >- - (Optional) Output result from the successful MCP call - additionalProperties: false - required: - - id - - type - - arguments - - name - - server_label - title: OpenAIResponseOutputMessageMCPCall - description: >- - Model Context Protocol (MCP) call output message for OpenAI responses. - OpenAIResponseOutputMessageMCPListTools: - type: object - properties: - id: - type: string - description: >- - Unique identifier for this MCP list tools operation - type: - type: string - const: mcp_list_tools - default: mcp_list_tools - description: >- - Tool call type identifier, always "mcp_list_tools" - server_label: - type: string - description: >- - Label identifying the MCP server providing the tools - tools: - type: array - items: - type: object - properties: - input_schema: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: >- - JSON schema defining the tool's input parameters - name: - type: string - description: Name of the tool - description: - type: string - description: >- - (Optional) Description of what the tool does - additionalProperties: false - required: - - input_schema - - name - title: MCPListToolsTool - description: >- - Tool definition returned by MCP list tools operation. - description: >- - List of available tools provided by the MCP server - additionalProperties: false - required: - - id - - type - - server_label - - tools - title: OpenAIResponseOutputMessageMCPListTools - description: >- - MCP list tools output message containing available tools from an MCP server. OpenAIResponseContentPartOutputText: type: object properties: @@ -8088,61 +6199,6 @@ components: - type title: >- OpenAIResponseObjectStreamResponseWebSearchCallSearching - CreatePromptRequest: - type: object - properties: - prompt: - type: string - description: >- - The prompt text content with variable placeholders. - variables: - type: array - items: - type: string - description: >- - List of variable names that can be used in the prompt template. - additionalProperties: false - required: - - prompt - title: CreatePromptRequest - Prompt: - type: object - properties: - prompt: - type: string - description: >- - The system prompt text with variable placeholders. Variables are only - supported when using the Responses API. - version: - type: integer - description: >- - Version (integer starting at 1, incremented on save) - prompt_id: - type: string - description: >- - Unique identifier formatted as 'pmpt_<48-digit-hash>' - variables: - type: array - items: - type: string - description: >- - List of prompt variable names that can be used in the prompt template - is_default: - type: boolean - default: false - description: >- - Boolean indicating whether this version is the default version for this - prompt - additionalProperties: false - required: - - version - - prompt_id - - variables - - is_default - title: Prompt - description: >- - A prompt resource representing a stored OpenAI Compatible prompt template - in Llama Stack. OpenAIDeleteResponseObject: type: object properties: @@ -8168,200 +6224,124 @@ components: title: OpenAIDeleteResponseObject description: >- Response object confirming deletion of an OpenAI response. - AgentCandidate: + ListOpenAIResponseInputItem: + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/OpenAIResponseInput' + description: List of input items + object: + type: string + const: list + default: list + description: Object type identifier, always "list" + additionalProperties: false + required: + - data + - object + title: ListOpenAIResponseInputItem + description: >- + List container for OpenAI response input items. + CompletionMessage: + type: object + properties: + role: + type: string + const: assistant + default: assistant + description: >- + Must be "assistant" to identify this as the model's response + content: + $ref: '#/components/schemas/InterleavedContent' + description: The content of the model's response + stop_reason: + type: string + enum: + - end_of_turn + - end_of_message + - out_of_tokens + description: >- + Reason why the model stopped generating. Options are: - `StopReason.end_of_turn`: + The model finished generating the entire response. - `StopReason.end_of_message`: + The model finished generating but generated a partial response -- usually, + a tool call. The user may call the tool and continue the conversation + with the tool's response. - `StopReason.out_of_tokens`: The model ran + out of token budget. + tool_calls: + type: array + items: + $ref: '#/components/schemas/ToolCall' + description: >- + List of tool calls. Each tool call is a ToolCall object. + additionalProperties: false + required: + - role + - content + - stop_reason + title: CompletionMessage + description: >- + A message containing the model's (assistant) response in a chat conversation. + ImageContentItem: type: object properties: type: type: string - const: agent - default: agent - config: - $ref: '#/components/schemas/AgentConfig' + const: image + default: image description: >- - The configuration for the agent candidate. - additionalProperties: false - required: - - type - - config - title: AgentCandidate - description: An agent candidate for evaluation. - AggregationFunctionType: - type: string - enum: - - average - - weighted_average - - median - - categorical_count - - accuracy - title: AggregationFunctionType - description: >- - Types of aggregation functions for scoring results. - BasicScoringFnParams: - type: object - properties: - type: - $ref: '#/components/schemas/ScoringFnParamsType' - const: basic - default: basic - description: >- - The type of scoring function parameters, always basic - aggregation_functions: - type: array - items: - $ref: '#/components/schemas/AggregationFunctionType' - description: >- - Aggregation functions to apply to the scores of each row - additionalProperties: false - required: - - type - - aggregation_functions - title: BasicScoringFnParams - description: >- - Parameters for basic scoring function configuration. - BenchmarkConfig: - type: object - properties: - eval_candidate: - oneOf: - - $ref: '#/components/schemas/ModelCandidate' - - $ref: '#/components/schemas/AgentCandidate' - discriminator: - propertyName: type - mapping: - model: '#/components/schemas/ModelCandidate' - agent: '#/components/schemas/AgentCandidate' - description: The candidate to evaluate. - scoring_params: + Discriminator type of the content item. Always "image" + image: type: object - additionalProperties: - $ref: '#/components/schemas/ScoringFnParams' + properties: + url: + $ref: '#/components/schemas/URL' + description: >- + A URL of the image or data URL in the format of data:image/{type};base64,{data}. + Note that URL could have length limits. + data: + type: string + contentEncoding: base64 + description: base64 encoded image data as string + additionalProperties: false description: >- - Map between scoring function id and parameters for each scoring function - you want to run - num_examples: - type: integer - description: >- - (Optional) The number of examples to evaluate. If not provided, all examples - in the dataset will be evaluated - additionalProperties: false - required: - - eval_candidate - - scoring_params - title: BenchmarkConfig - description: >- - A benchmark configuration for evaluation. - LLMAsJudgeScoringFnParams: - type: object - properties: - type: - $ref: '#/components/schemas/ScoringFnParamsType' - const: llm_as_judge - default: llm_as_judge - description: >- - The type of scoring function parameters, always llm_as_judge - judge_model: - type: string - description: >- - Identifier of the LLM model to use as a judge for scoring - prompt_template: - type: string - description: >- - (Optional) Custom prompt template for the judge model - judge_score_regexes: - type: array - items: - type: string - description: >- - Regexes to extract the answer from generated response - aggregation_functions: - type: array - items: - $ref: '#/components/schemas/AggregationFunctionType' - description: >- - Aggregation functions to apply to the scores of each row + Image as a base64 encoded string or an URL additionalProperties: false required: - type - - judge_model - - judge_score_regexes - - aggregation_functions - title: LLMAsJudgeScoringFnParams - description: >- - Parameters for LLM-as-judge scoring function configuration. - ModelCandidate: - type: object - properties: - type: - type: string - const: model - default: model - model: - type: string - description: The model ID to evaluate. - sampling_params: - $ref: '#/components/schemas/SamplingParams' - description: The sampling parameters for the model. - system_message: - $ref: '#/components/schemas/SystemMessage' - description: >- - (Optional) The system message providing instructions or context to the - model. - additionalProperties: false - required: - - type - - model - - sampling_params - title: ModelCandidate - description: A model candidate for evaluation. - RegexParserScoringFnParams: - type: object - properties: - type: - $ref: '#/components/schemas/ScoringFnParamsType' - const: regex_parser - default: regex_parser - description: >- - The type of scoring function parameters, always regex_parser - parsing_regexes: - type: array - items: - type: string - description: >- - Regex to extract the answer from generated response - aggregation_functions: - type: array - items: - $ref: '#/components/schemas/AggregationFunctionType' - description: >- - Aggregation functions to apply to the scores of each row - additionalProperties: false - required: - - type - - parsing_regexes - - aggregation_functions - title: RegexParserScoringFnParams - description: >- - Parameters for regex parser scoring function configuration. - ScoringFnParams: + - image + title: ImageContentItem + description: A image content item + InterleavedContent: oneOf: - - $ref: '#/components/schemas/LLMAsJudgeScoringFnParams' - - $ref: '#/components/schemas/RegexParserScoringFnParams' - - $ref: '#/components/schemas/BasicScoringFnParams' + - type: string + - $ref: '#/components/schemas/InterleavedContentItem' + - type: array + items: + $ref: '#/components/schemas/InterleavedContentItem' + InterleavedContentItem: + oneOf: + - $ref: '#/components/schemas/ImageContentItem' + - $ref: '#/components/schemas/TextContentItem' discriminator: propertyName: type mapping: - llm_as_judge: '#/components/schemas/LLMAsJudgeScoringFnParams' - regex_parser: '#/components/schemas/RegexParserScoringFnParams' - basic: '#/components/schemas/BasicScoringFnParams' - ScoringFnParamsType: - type: string - enum: - - llm_as_judge - - regex_parser - - basic - title: ScoringFnParamsType - description: >- - Types of scoring function parameter configurations. + image: '#/components/schemas/ImageContentItem' + text: '#/components/schemas/TextContentItem' + Message: + oneOf: + - $ref: '#/components/schemas/UserMessage' + - $ref: '#/components/schemas/SystemMessage' + - $ref: '#/components/schemas/ToolResponseMessage' + - $ref: '#/components/schemas/CompletionMessage' + discriminator: + propertyName: role + mapping: + user: '#/components/schemas/UserMessage' + system: '#/components/schemas/SystemMessage' + tool: '#/components/schemas/ToolResponseMessage' + assistant: '#/components/schemas/CompletionMessage' SystemMessage: type: object properties: @@ -8384,295 +6364,7 @@ components: title: SystemMessage description: >- A system message providing instructions or context to the model. - EvaluateRowsRequest: - type: object - properties: - input_rows: - type: array - items: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: The rows to evaluate. - scoring_functions: - type: array - items: - type: string - description: >- - The scoring functions to use for the evaluation. - benchmark_config: - $ref: '#/components/schemas/BenchmarkConfig' - description: The configuration for the benchmark. - additionalProperties: false - required: - - input_rows - - scoring_functions - - benchmark_config - title: EvaluateRowsRequest - EvaluateResponse: - type: object - properties: - generations: - type: array - items: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: The generations from the evaluation. - scores: - type: object - additionalProperties: - $ref: '#/components/schemas/ScoringResult' - description: The scores from the evaluation. - additionalProperties: false - required: - - generations - - scores - title: EvaluateResponse - description: The response from an evaluation. - ScoringResult: - type: object - properties: - score_rows: - type: array - items: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: >- - The scoring result for each row. Each row is a map of column name to value. - aggregated_results: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: Map of metric name to aggregated value - additionalProperties: false - required: - - score_rows - - aggregated_results - title: ScoringResult - description: A scoring result for a single row. - Agent: - type: object - properties: - agent_id: - type: string - description: Unique identifier for the agent - agent_config: - $ref: '#/components/schemas/AgentConfig' - description: Configuration settings for the agent - created_at: - type: string - format: date-time - description: Timestamp when the agent was created - additionalProperties: false - required: - - agent_id - - agent_config - - created_at - title: Agent - description: >- - An agent instance with configuration and metadata. - Session: - type: object - properties: - session_id: - type: string - description: >- - Unique identifier for the conversation session - session_name: - type: string - description: Human-readable name for the session - turns: - type: array - items: - $ref: '#/components/schemas/Turn' - description: >- - List of all turns that have occurred in this session - started_at: - type: string - format: date-time - description: Timestamp when the session was created - additionalProperties: false - required: - - session_id - - session_name - - turns - - started_at - title: Session - description: >- - A single session of an interaction with an Agentic System. - AgentStepResponse: - type: object - properties: - step: - oneOf: - - $ref: '#/components/schemas/InferenceStep' - - $ref: '#/components/schemas/ToolExecutionStep' - - $ref: '#/components/schemas/ShieldCallStep' - - $ref: '#/components/schemas/MemoryRetrievalStep' - discriminator: - propertyName: step_type - mapping: - inference: '#/components/schemas/InferenceStep' - tool_execution: '#/components/schemas/ToolExecutionStep' - shield_call: '#/components/schemas/ShieldCallStep' - memory_retrieval: '#/components/schemas/MemoryRetrievalStep' - description: >- - The complete step data and execution details - additionalProperties: false - required: - - step - title: AgentStepResponse - description: >- - Response containing details of a specific agent step. - Benchmark: - type: object - properties: - identifier: - type: string - provider_resource_id: - type: string - provider_id: - type: string - type: - type: string - enum: - - model - - shield - - vector_db - - dataset - - scoring_function - - benchmark - - tool - - tool_group - - prompt - const: benchmark - default: benchmark - description: The resource type, always benchmark - dataset_id: - type: string - description: >- - Identifier of the dataset to use for the benchmark evaluation - scoring_functions: - type: array - items: - type: string - description: >- - List of scoring function identifiers to apply during evaluation - metadata: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: Metadata for this evaluation task - additionalProperties: false - required: - - identifier - - provider_id - - type - - dataset_id - - scoring_functions - - metadata - title: Benchmark - description: >- - A benchmark resource for evaluating model performance. - OpenAIAssistantMessageParam: - type: object - properties: - role: - type: string - const: assistant - default: assistant - description: >- - Must be "assistant" to identify this as the model's response - content: - oneOf: - - type: string - - type: array - items: - $ref: '#/components/schemas/OpenAIChatCompletionContentPartTextParam' - description: The content of the model's response - name: - type: string - description: >- - (Optional) The name of the assistant message participant. - tool_calls: - type: array - items: - $ref: '#/components/schemas/OpenAIChatCompletionToolCall' - description: >- - List of tool calls. Each tool call is an OpenAIChatCompletionToolCall - object. - additionalProperties: false - required: - - role - title: OpenAIAssistantMessageParam - description: >- - A message containing the model's (assistant) response in an OpenAI-compatible - chat completion request. - "OpenAIChatCompletionContentPartImageParam": - type: object - properties: - type: - type: string - const: image_url - default: image_url - description: >- - Must be "image_url" to identify this as image content - image_url: - $ref: '#/components/schemas/OpenAIImageURL' - description: >- - Image URL specification and processing details - additionalProperties: false - required: - - type - - image_url - title: >- - OpenAIChatCompletionContentPartImageParam - description: >- - Image content part for OpenAI-compatible chat completion messages. - OpenAIChatCompletionContentPartParam: - oneOf: - - $ref: '#/components/schemas/OpenAIChatCompletionContentPartTextParam' - - $ref: '#/components/schemas/OpenAIChatCompletionContentPartImageParam' - - $ref: '#/components/schemas/OpenAIFile' - discriminator: - propertyName: type - mapping: - text: '#/components/schemas/OpenAIChatCompletionContentPartTextParam' - image_url: '#/components/schemas/OpenAIChatCompletionContentPartImageParam' - file: '#/components/schemas/OpenAIFile' - OpenAIChatCompletionContentPartTextParam: + TextContentItem: type: object properties: type: @@ -8680,254 +6372,67 @@ components: const: text default: text description: >- - Must be "text" to identify this as text content + Discriminator type of the content item. Always "text" text: type: string - description: The text content of the message + description: Text content additionalProperties: false required: - type - text - title: OpenAIChatCompletionContentPartTextParam - description: >- - Text content part for OpenAI-compatible chat completion messages. - OpenAIChatCompletionToolCall: + title: TextContentItem + description: A text content item + ToolCall: type: object properties: - index: - type: integer - description: >- - (Optional) Index of the tool call in the list - id: + call_id: type: string - description: >- - (Optional) Unique identifier for the tool call - type: - type: string - const: function - default: function - description: >- - Must be "function" to identify this as a function call - function: - $ref: '#/components/schemas/OpenAIChatCompletionToolCallFunction' - description: (Optional) Function call details - additionalProperties: false - required: - - type - title: OpenAIChatCompletionToolCall - description: >- - Tool call specification for OpenAI-compatible chat completion responses. - OpenAIChatCompletionToolCallFunction: - type: object - properties: - name: - type: string - description: (Optional) Name of the function to call + tool_name: + oneOf: + - type: string + enum: + - brave_search + - wolfram_alpha + - photogen + - code_interpreter + title: BuiltinTool + - type: string arguments: - type: string - description: >- - (Optional) Arguments to pass to the function as a JSON string - additionalProperties: false - title: OpenAIChatCompletionToolCallFunction - description: >- - Function call details for OpenAI-compatible tool calls. - OpenAIChoice: - type: object - properties: - message: - oneOf: - - $ref: '#/components/schemas/OpenAIUserMessageParam' - - $ref: '#/components/schemas/OpenAISystemMessageParam' - - $ref: '#/components/schemas/OpenAIAssistantMessageParam' - - $ref: '#/components/schemas/OpenAIToolMessageParam' - - $ref: '#/components/schemas/OpenAIDeveloperMessageParam' - discriminator: - propertyName: role - mapping: - user: '#/components/schemas/OpenAIUserMessageParam' - system: '#/components/schemas/OpenAISystemMessageParam' - assistant: '#/components/schemas/OpenAIAssistantMessageParam' - tool: '#/components/schemas/OpenAIToolMessageParam' - developer: '#/components/schemas/OpenAIDeveloperMessageParam' - description: The message from the model - finish_reason: - type: string - description: The reason the model stopped generating - index: - type: integer - description: The index of the choice - logprobs: - $ref: '#/components/schemas/OpenAIChoiceLogprobs' - description: >- - (Optional) The log probabilities for the tokens in the message - additionalProperties: false - required: - - message - - finish_reason - - index - title: OpenAIChoice - description: >- - A choice from an OpenAI-compatible chat completion response. - OpenAIChoiceLogprobs: - type: object - properties: - content: - type: array - items: - $ref: '#/components/schemas/OpenAITokenLogProb' - description: >- - (Optional) The log probabilities for the tokens in the message - refusal: - type: array - items: - $ref: '#/components/schemas/OpenAITokenLogProb' - description: >- - (Optional) The log probabilities for the tokens in the message - additionalProperties: false - title: OpenAIChoiceLogprobs - description: >- - The log probabilities for the tokens in the message from an OpenAI-compatible - chat completion response. - OpenAIDeveloperMessageParam: - type: object - properties: - role: - type: string - const: developer - default: developer - description: >- - Must be "developer" to identify this as a developer message - content: oneOf: - type: string - - type: array - items: - $ref: '#/components/schemas/OpenAIChatCompletionContentPartTextParam' - description: The content of the developer message - name: + - type: object + additionalProperties: + oneOf: + - type: string + - type: integer + - type: number + - type: boolean + - type: 'null' + - type: array + items: + oneOf: + - type: string + - type: integer + - type: number + - type: boolean + - type: 'null' + - type: object + additionalProperties: + oneOf: + - type: string + - type: integer + - type: number + - type: boolean + - type: 'null' + arguments_json: type: string - description: >- - (Optional) The name of the developer message participant. additionalProperties: false required: - - role - - content - title: OpenAIDeveloperMessageParam - description: >- - A message from the developer in an OpenAI-compatible chat completion request. - OpenAIFile: - type: object - properties: - type: - type: string - const: file - default: file - file: - $ref: '#/components/schemas/OpenAIFileFile' - additionalProperties: false - required: - - type - - file - title: OpenAIFile - OpenAIFileFile: - type: object - properties: - file_data: - type: string - file_id: - type: string - filename: - type: string - additionalProperties: false - title: OpenAIFileFile - OpenAIImageURL: - type: object - properties: - url: - type: string - description: >- - URL of the image to include in the message - detail: - type: string - description: >- - (Optional) Level of detail for image processing. Can be "low", "high", - or "auto" - additionalProperties: false - required: - - url - title: OpenAIImageURL - description: >- - Image URL specification for OpenAI-compatible chat completion messages. - OpenAIMessageParam: - oneOf: - - $ref: '#/components/schemas/OpenAIUserMessageParam' - - $ref: '#/components/schemas/OpenAISystemMessageParam' - - $ref: '#/components/schemas/OpenAIAssistantMessageParam' - - $ref: '#/components/schemas/OpenAIToolMessageParam' - - $ref: '#/components/schemas/OpenAIDeveloperMessageParam' - discriminator: - propertyName: role - mapping: - user: '#/components/schemas/OpenAIUserMessageParam' - system: '#/components/schemas/OpenAISystemMessageParam' - assistant: '#/components/schemas/OpenAIAssistantMessageParam' - tool: '#/components/schemas/OpenAIToolMessageParam' - developer: '#/components/schemas/OpenAIDeveloperMessageParam' - OpenAISystemMessageParam: - type: object - properties: - role: - type: string - const: system - default: system - description: >- - Must be "system" to identify this as a system message - content: - oneOf: - - type: string - - type: array - items: - $ref: '#/components/schemas/OpenAIChatCompletionContentPartTextParam' - description: >- - The content of the "system prompt". If multiple system messages are provided, - they are concatenated. The underlying Llama Stack code may also add other - system messages (for example, for formatting tool definitions). - name: - type: string - description: >- - (Optional) The name of the system message participant. - additionalProperties: false - required: - - role - - content - title: OpenAISystemMessageParam - description: >- - A system message providing instructions or context to the model. - OpenAITokenLogProb: - type: object - properties: - token: - type: string - bytes: - type: array - items: - type: integer - logprob: - type: number - top_logprobs: - type: array - items: - $ref: '#/components/schemas/OpenAITopLogProb' - additionalProperties: false - required: - - token - - logprob - - top_logprobs - title: OpenAITokenLogProb - description: >- - The log probability for a token from an OpenAI-compatible chat completion - response. - OpenAIToolMessageParam: + - call_id + - tool_name + - arguments + title: ToolCall + ToolResponseMessage: type: object properties: role: @@ -8936,46 +6441,33 @@ components: default: tool description: >- Must be "tool" to identify this as a tool response - tool_call_id: + call_id: type: string description: >- Unique identifier for the tool call this response is for content: - oneOf: - - type: string - - type: array - items: - $ref: '#/components/schemas/OpenAIChatCompletionContentPartTextParam' + $ref: '#/components/schemas/InterleavedContent' description: The response content from the tool additionalProperties: false required: - role - - tool_call_id + - call_id - content - title: OpenAIToolMessageParam + title: ToolResponseMessage description: >- - A message representing the result of a tool invocation in an OpenAI-compatible - chat completion request. - OpenAITopLogProb: + A message representing the result of a tool invocation. + URL: type: object properties: - token: + uri: type: string - bytes: - type: array - items: - type: integer - logprob: - type: number + description: The URL string pointing to the resource additionalProperties: false required: - - token - - logprob - title: OpenAITopLogProb - description: >- - The top log probability for a token from an OpenAI-compatible chat completion - response. - OpenAIUserMessageParam: + - uri + title: URL + description: A URL reference to external content. + UserMessage: type: object properties: role: @@ -8985,106 +6477,69 @@ components: description: >- Must be "user" to identify this as a user message content: - oneOf: - - type: string - - type: array - items: - $ref: '#/components/schemas/OpenAIChatCompletionContentPartParam' + $ref: '#/components/schemas/InterleavedContent' description: >- The content of the message, which can include text and other media - name: - type: string + context: + $ref: '#/components/schemas/InterleavedContent' description: >- - (Optional) The name of the user message participant. + (Optional) This field is used internally by Llama Stack to pass RAG context. + This field may be removed in the API in the future. additionalProperties: false required: - role - content - title: OpenAIUserMessageParam + title: UserMessage description: >- - A message from the user in an OpenAI-compatible chat completion request. - OpenAICompletionWithInputMessages: + A message from the user in a chat conversation. + RunShieldRequest: type: object properties: - id: + shield_id: type: string - description: The ID of the chat completion - choices: + description: The identifier of the shield to run. + messages: type: array items: - $ref: '#/components/schemas/OpenAIChoice' - description: List of choices - object: - type: string - const: chat.completion - default: chat.completion - description: >- - The object type, which will be "chat.completion" - created: - type: integer - description: >- - The Unix timestamp in seconds when the chat completion was created - model: - type: string - description: >- - The model that was used to generate the chat completion - input_messages: - type: array - items: - $ref: '#/components/schemas/OpenAIMessageParam' + $ref: '#/components/schemas/Message' + description: The messages to run the shield on. + params: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: The parameters of the shield. additionalProperties: false required: - - id - - choices - - object - - created - - model - - input_messages - title: OpenAICompletionWithInputMessages - Dataset: + - shield_id + - messages + - params + title: RunShieldRequest + RunShieldResponse: type: object properties: - identifier: - type: string - provider_resource_id: - type: string - provider_id: - type: string - type: - type: string - enum: - - model - - shield - - vector_db - - dataset - - scoring_function - - benchmark - - tool - - tool_group - - prompt - const: dataset - default: dataset + violation: + $ref: '#/components/schemas/SafetyViolation' description: >- - Type of resource, always 'dataset' for datasets - purpose: + (Optional) Safety violation detected by the shield, if any + additionalProperties: false + title: RunShieldResponse + description: Response from running a safety shield. + SafetyViolation: + type: object + properties: + violation_level: + $ref: '#/components/schemas/ViolationLevel' + description: Severity level of the violation + user_message: type: string - enum: - - post-training/messages - - eval/question-answer - - eval/messages-answer description: >- - Purpose of the dataset indicating its intended use - source: - oneOf: - - $ref: '#/components/schemas/URIDataSource' - - $ref: '#/components/schemas/RowsDataSource' - discriminator: - propertyName: type - mapping: - uri: '#/components/schemas/URIDataSource' - rows: '#/components/schemas/RowsDataSource' - description: >- - Data source configuration for the dataset + (Optional) Message to convey to the user about the violation metadata: type: object additionalProperties: @@ -9095,131 +6550,24 @@ components: - type: string - type: array - type: object - description: Additional metadata for the dataset + description: >- + Additional metadata including specific violation codes for debugging and + telemetry additionalProperties: false required: - - identifier - - provider_id - - type - - purpose - - source + - violation_level - metadata - title: Dataset + title: SafetyViolation description: >- - Dataset resource for storing and accessing training or evaluation data. - RowsDataSource: - type: object - properties: - type: - type: string - const: rows - default: rows - rows: - type: array - items: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: >- - The dataset is stored in rows. E.g. - [ {"messages": [{"role": "user", - "content": "Hello, world!"}, {"role": "assistant", "content": "Hello, - world!"}]} ] - additionalProperties: false - required: - - type - - rows - title: RowsDataSource - description: A dataset stored in rows. - URIDataSource: - type: object - properties: - type: - type: string - const: uri - default: uri - uri: - type: string - description: >- - The dataset can be obtained from a URI. E.g. - "https://mywebsite.com/mydata.jsonl" - - "lsfs://mydata.jsonl" - "data:csv;base64,{base64_content}" - additionalProperties: false - required: - - type - - uri - title: URIDataSource - description: >- - A dataset that can be obtained from a URI. - Model: - type: object - properties: - identifier: - type: string - description: >- - Unique identifier for this resource in llama stack - provider_resource_id: - type: string - description: >- - Unique identifier for this resource in the provider - provider_id: - type: string - description: >- - ID of the provider that owns this resource - type: - type: string - enum: - - model - - shield - - vector_db - - dataset - - scoring_function - - benchmark - - tool - - tool_group - - prompt - const: model - default: model - description: >- - The resource type, always 'model' for model resources - metadata: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: Any additional metadata for this model - model_type: - $ref: '#/components/schemas/ModelType' - default: llm - description: >- - The type of model (LLM or embedding model) - additionalProperties: false - required: - - identifier - - provider_id - - type - - metadata - - model_type - title: Model - description: >- - A model resource representing an AI model registered in Llama Stack. - ModelType: + Details of a safety violation detected by content moderation. + ViolationLevel: type: string enum: - - llm - - embedding - title: ModelType - description: >- - Enumeration of supported model types in Llama Stack. + - info + - warn + - error + title: ViolationLevel + description: Severity level of a safety violation. AgentTurnInputType: type: object properties: @@ -9234,6 +6582,17 @@ components: - type title: AgentTurnInputType description: Parameter type for agent turn input. + AggregationFunctionType: + type: string + enum: + - average + - weighted_average + - median + - categorical_count + - accuracy + title: AggregationFunctionType + description: >- + Types of aggregation functions for scoring results. ArrayType: type: object properties: @@ -9247,6 +6606,28 @@ components: - type title: ArrayType description: Parameter type for array values. + BasicScoringFnParams: + type: object + properties: + type: + $ref: '#/components/schemas/ScoringFnParamsType' + const: basic + default: basic + description: >- + The type of scoring function parameters, always basic + aggregation_functions: + type: array + items: + $ref: '#/components/schemas/AggregationFunctionType' + description: >- + Aggregation functions to apply to the scores of each row + additionalProperties: false + required: + - type + - aggregation_functions + title: BasicScoringFnParams + description: >- + Parameters for basic scoring function configuration. BooleanType: type: object properties: @@ -9302,6 +6683,44 @@ components: - type title: JsonType description: Parameter type for JSON values. + LLMAsJudgeScoringFnParams: + type: object + properties: + type: + $ref: '#/components/schemas/ScoringFnParamsType' + const: llm_as_judge + default: llm_as_judge + description: >- + The type of scoring function parameters, always llm_as_judge + judge_model: + type: string + description: >- + Identifier of the LLM model to use as a judge for scoring + prompt_template: + type: string + description: >- + (Optional) Custom prompt template for the judge model + judge_score_regexes: + type: array + items: + type: string + description: >- + Regexes to extract the answer from generated response + aggregation_functions: + type: array + items: + $ref: '#/components/schemas/AggregationFunctionType' + description: >- + Aggregation functions to apply to the scores of each row + additionalProperties: false + required: + - type + - judge_model + - judge_score_regexes + - aggregation_functions + title: LLMAsJudgeScoringFnParams + description: >- + Parameters for LLM-as-judge scoring function configuration. NumberType: type: object properties: @@ -9328,6 +6747,35 @@ components: - type title: ObjectType description: Parameter type for object values. + RegexParserScoringFnParams: + type: object + properties: + type: + $ref: '#/components/schemas/ScoringFnParamsType' + const: regex_parser + default: regex_parser + description: >- + The type of scoring function parameters, always regex_parser + parsing_regexes: + type: array + items: + type: string + description: >- + Regex to extract the answer from generated response + aggregation_functions: + type: array + items: + $ref: '#/components/schemas/AggregationFunctionType' + description: >- + Aggregation functions to apply to the scores of each row + additionalProperties: false + required: + - type + - parsing_regexes + - aggregation_functions + title: RegexParserScoringFnParams + description: >- + Parameters for regex parser scoring function configuration. ScoringFn: type: object properties: @@ -9402,6 +6850,26 @@ components: title: ScoringFn description: >- A scoring function resource for evaluating model outputs. + ScoringFnParams: + oneOf: + - $ref: '#/components/schemas/LLMAsJudgeScoringFnParams' + - $ref: '#/components/schemas/RegexParserScoringFnParams' + - $ref: '#/components/schemas/BasicScoringFnParams' + discriminator: + propertyName: type + mapping: + llm_as_judge: '#/components/schemas/LLMAsJudgeScoringFnParams' + regex_parser: '#/components/schemas/RegexParserScoringFnParams' + basic: '#/components/schemas/BasicScoringFnParams' + ScoringFnParamsType: + type: string + enum: + - llm_as_judge + - regex_parser + - basic + title: ScoringFnParamsType + description: >- + Types of scoring function parameter configurations. StringType: type: object properties: @@ -9428,6 +6896,194 @@ components: - type title: UnionType description: Parameter type for union values. + ListScoringFunctionsResponse: + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/ScoringFn' + additionalProperties: false + required: + - data + title: ListScoringFunctionsResponse + ParamType: + oneOf: + - $ref: '#/components/schemas/StringType' + - $ref: '#/components/schemas/NumberType' + - $ref: '#/components/schemas/BooleanType' + - $ref: '#/components/schemas/ArrayType' + - $ref: '#/components/schemas/ObjectType' + - $ref: '#/components/schemas/JsonType' + - $ref: '#/components/schemas/UnionType' + - $ref: '#/components/schemas/ChatCompletionInputType' + - $ref: '#/components/schemas/CompletionInputType' + - $ref: '#/components/schemas/AgentTurnInputType' + discriminator: + propertyName: type + mapping: + string: '#/components/schemas/StringType' + number: '#/components/schemas/NumberType' + boolean: '#/components/schemas/BooleanType' + array: '#/components/schemas/ArrayType' + object: '#/components/schemas/ObjectType' + json: '#/components/schemas/JsonType' + union: '#/components/schemas/UnionType' + chat_completion_input: '#/components/schemas/ChatCompletionInputType' + completion_input: '#/components/schemas/CompletionInputType' + agent_turn_input: '#/components/schemas/AgentTurnInputType' + RegisterScoringFunctionRequest: + type: object + properties: + scoring_fn_id: + type: string + description: >- + The ID of the scoring function to register. + description: + type: string + description: The description of the scoring function. + return_type: + $ref: '#/components/schemas/ParamType' + description: The return type of the scoring function. + provider_scoring_fn_id: + type: string + description: >- + The ID of the provider scoring function to use for the scoring function. + provider_id: + type: string + description: >- + The ID of the provider to use for the scoring function. + params: + $ref: '#/components/schemas/ScoringFnParams' + description: >- + The parameters for the scoring function for benchmark eval, these can + be overridden for app eval. + additionalProperties: false + required: + - scoring_fn_id + - description + - return_type + title: RegisterScoringFunctionRequest + ScoreRequest: + type: object + properties: + input_rows: + type: array + items: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: The rows to score. + scoring_functions: + type: object + additionalProperties: + oneOf: + - $ref: '#/components/schemas/ScoringFnParams' + - type: 'null' + description: >- + The scoring functions to use for the scoring. + additionalProperties: false + required: + - input_rows + - scoring_functions + title: ScoreRequest + ScoreResponse: + type: object + properties: + results: + type: object + additionalProperties: + $ref: '#/components/schemas/ScoringResult' + description: >- + A map of scoring function name to ScoringResult. + additionalProperties: false + required: + - results + title: ScoreResponse + description: The response from scoring. + ScoringResult: + type: object + properties: + score_rows: + type: array + items: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + The scoring result for each row. Each row is a map of column name to value. + aggregated_results: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: Map of metric name to aggregated value + additionalProperties: false + required: + - score_rows + - aggregated_results + title: ScoringResult + description: A scoring result for a single row. + ScoreBatchRequest: + type: object + properties: + dataset_id: + type: string + description: The ID of the dataset to score. + scoring_functions: + type: object + additionalProperties: + oneOf: + - $ref: '#/components/schemas/ScoringFnParams' + - type: 'null' + description: >- + The scoring functions to use for the scoring. + save_results_dataset: + type: boolean + description: >- + Whether to save the results to a dataset. + additionalProperties: false + required: + - dataset_id + - scoring_functions + - save_results_dataset + title: ScoreBatchRequest + ScoreBatchResponse: + type: object + properties: + dataset_id: + type: string + description: >- + (Optional) The identifier of the dataset that was scored + results: + type: object + additionalProperties: + $ref: '#/components/schemas/ScoringResult' + description: >- + A map of scoring function name to ScoringResult + additionalProperties: false + required: + - results + title: ScoreBatchResponse + description: >- + Response from batch scoring operations on datasets. Shield: type: object properties: @@ -9472,1163 +7128,6 @@ components: title: Shield description: >- A safety shield resource that can be used to check content. - Span: - type: object - properties: - span_id: - type: string - description: Unique identifier for the span - trace_id: - type: string - description: >- - Unique identifier for the trace this span belongs to - parent_span_id: - type: string - description: >- - (Optional) Unique identifier for the parent span, if this is a child span - name: - type: string - description: >- - Human-readable name describing the operation this span represents - start_time: - type: string - format: date-time - description: Timestamp when the operation began - end_time: - type: string - format: date-time - description: >- - (Optional) Timestamp when the operation finished, if completed - attributes: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: >- - (Optional) Key-value pairs containing additional metadata about the span - additionalProperties: false - required: - - span_id - - trace_id - - name - - start_time - title: Span - description: >- - A span representing a single operation within a trace. - GetSpanTreeRequest: - type: object - properties: - attributes_to_return: - type: array - items: - type: string - description: The attributes to return in the tree. - max_depth: - type: integer - description: The maximum depth of the tree. - additionalProperties: false - title: GetSpanTreeRequest - SpanStatus: - type: string - enum: - - ok - - error - title: SpanStatus - description: >- - The status of a span indicating whether it completed successfully or with - an error. - SpanWithStatus: - type: object - properties: - span_id: - type: string - description: Unique identifier for the span - trace_id: - type: string - description: >- - Unique identifier for the trace this span belongs to - parent_span_id: - type: string - description: >- - (Optional) Unique identifier for the parent span, if this is a child span - name: - type: string - description: >- - Human-readable name describing the operation this span represents - start_time: - type: string - format: date-time - description: Timestamp when the operation began - end_time: - type: string - format: date-time - description: >- - (Optional) Timestamp when the operation finished, if completed - attributes: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: >- - (Optional) Key-value pairs containing additional metadata about the span - status: - $ref: '#/components/schemas/SpanStatus' - description: >- - (Optional) The current status of the span - additionalProperties: false - required: - - span_id - - trace_id - - name - - start_time - title: SpanWithStatus - description: A span that includes status information. - QuerySpanTreeResponse: - type: object - properties: - data: - type: object - additionalProperties: - $ref: '#/components/schemas/SpanWithStatus' - description: >- - Dictionary mapping span IDs to spans with status information - additionalProperties: false - required: - - data - title: QuerySpanTreeResponse - description: >- - Response containing a tree structure of spans. - Tool: - type: object - properties: - identifier: - type: string - provider_resource_id: - type: string - provider_id: - type: string - type: - type: string - enum: - - model - - shield - - vector_db - - dataset - - scoring_function - - benchmark - - tool - - tool_group - - prompt - const: tool - default: tool - description: Type of resource, always 'tool' - toolgroup_id: - type: string - description: >- - ID of the tool group this tool belongs to - description: - type: string - description: >- - Human-readable description of what the tool does - parameters: - type: array - items: - $ref: '#/components/schemas/ToolParameter' - description: List of parameters this tool accepts - metadata: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: >- - (Optional) Additional metadata about the tool - additionalProperties: false - required: - - identifier - - provider_id - - type - - toolgroup_id - - description - - parameters - title: Tool - description: A tool that can be invoked by agents. - ToolGroup: - type: object - properties: - identifier: - type: string - provider_resource_id: - type: string - provider_id: - type: string - type: - type: string - enum: - - model - - shield - - vector_db - - dataset - - scoring_function - - benchmark - - tool - - tool_group - - prompt - const: tool_group - default: tool_group - description: Type of resource, always 'tool_group' - mcp_endpoint: - $ref: '#/components/schemas/URL' - description: >- - (Optional) Model Context Protocol endpoint for remote tools - args: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: >- - (Optional) Additional arguments for the tool group - additionalProperties: false - required: - - identifier - - provider_id - - type - title: ToolGroup - description: >- - A group of related tools managed together. - Trace: - type: object - properties: - trace_id: - type: string - description: Unique identifier for the trace - root_span_id: - type: string - description: >- - Unique identifier for the root span that started this trace - start_time: - type: string - format: date-time - description: Timestamp when the trace began - end_time: - type: string - format: date-time - description: >- - (Optional) Timestamp when the trace finished, if completed - additionalProperties: false - required: - - trace_id - - root_span_id - - start_time - title: Trace - description: >- - A trace representing the complete execution path of a request across multiple - operations. - Checkpoint: - type: object - properties: - identifier: - type: string - description: Unique identifier for the checkpoint - created_at: - type: string - format: date-time - description: >- - Timestamp when the checkpoint was created - epoch: - type: integer - description: >- - Training epoch when the checkpoint was saved - post_training_job_id: - type: string - description: >- - Identifier of the training job that created this checkpoint - path: - type: string - description: >- - File system path where the checkpoint is stored - training_metrics: - $ref: '#/components/schemas/PostTrainingMetric' - description: >- - (Optional) Training metrics associated with this checkpoint - additionalProperties: false - required: - - identifier - - created_at - - epoch - - post_training_job_id - - path - title: Checkpoint - description: Checkpoint created during training runs. - PostTrainingJobArtifactsResponse: - type: object - properties: - job_uuid: - type: string - description: Unique identifier for the training job - checkpoints: - type: array - items: - $ref: '#/components/schemas/Checkpoint' - description: >- - List of model checkpoints created during training - additionalProperties: false - required: - - job_uuid - - checkpoints - title: PostTrainingJobArtifactsResponse - description: Artifacts of a finetuning job. - PostTrainingMetric: - type: object - properties: - epoch: - type: integer - description: Training epoch number - train_loss: - type: number - description: Loss value on the training dataset - validation_loss: - type: number - description: Loss value on the validation dataset - perplexity: - type: number - description: >- - Perplexity metric indicating model confidence - additionalProperties: false - required: - - epoch - - train_loss - - validation_loss - - perplexity - title: PostTrainingMetric - description: >- - Training metrics captured during post-training jobs. - PostTrainingJobStatusResponse: - type: object - properties: - job_uuid: - type: string - description: Unique identifier for the training job - status: - type: string - enum: - - completed - - in_progress - - failed - - scheduled - - cancelled - description: Current status of the training job - scheduled_at: - type: string - format: date-time - description: >- - (Optional) Timestamp when the job was scheduled - started_at: - type: string - format: date-time - description: >- - (Optional) Timestamp when the job execution began - completed_at: - type: string - format: date-time - description: >- - (Optional) Timestamp when the job finished, if completed - resources_allocated: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: >- - (Optional) Information about computational resources allocated to the - job - checkpoints: - type: array - items: - $ref: '#/components/schemas/Checkpoint' - description: >- - List of model checkpoints created during training - additionalProperties: false - required: - - job_uuid - - status - - checkpoints - title: PostTrainingJobStatusResponse - description: Status of a finetuning job. - ListPostTrainingJobsResponse: - type: object - properties: - data: - type: array - items: - type: object - properties: - job_uuid: - type: string - additionalProperties: false - required: - - job_uuid - title: PostTrainingJob - additionalProperties: false - required: - - data - title: ListPostTrainingJobsResponse - VectorDB: - type: object - properties: - identifier: - type: string - provider_resource_id: - type: string - provider_id: - type: string - type: - type: string - enum: - - model - - shield - - vector_db - - dataset - - scoring_function - - benchmark - - tool - - tool_group - - prompt - const: vector_db - default: vector_db - description: >- - Type of resource, always 'vector_db' for vector databases - embedding_model: - type: string - description: >- - Name of the embedding model to use for vector generation - embedding_dimension: - type: integer - description: Dimension of the embedding vectors - vector_db_name: - type: string - additionalProperties: false - required: - - identifier - - provider_id - - type - - embedding_model - - embedding_dimension - title: VectorDB - description: >- - Vector database resource for storing and querying vector embeddings. - HealthInfo: - type: object - properties: - status: - type: string - enum: - - OK - - Error - - Not Implemented - description: Current health status of the service - additionalProperties: false - required: - - status - title: HealthInfo - description: >- - Health status information for the service. - RAGDocument: - type: object - properties: - document_id: - type: string - description: The unique identifier for the document. - content: - oneOf: - - type: string - - $ref: '#/components/schemas/InterleavedContentItem' - - type: array - items: - $ref: '#/components/schemas/InterleavedContentItem' - - $ref: '#/components/schemas/URL' - description: The content of the document. - mime_type: - type: string - description: The MIME type of the document. - metadata: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: Additional metadata for the document. - additionalProperties: false - required: - - document_id - - content - - metadata - title: RAGDocument - description: >- - A document to be used for document ingestion in the RAG Tool. - InsertRequest: - type: object - properties: - documents: - type: array - items: - $ref: '#/components/schemas/RAGDocument' - description: >- - List of documents to index in the RAG system - vector_db_id: - type: string - description: >- - ID of the vector database to store the document embeddings - chunk_size_in_tokens: - type: integer - description: >- - (Optional) Size in tokens for document chunking during indexing - additionalProperties: false - required: - - documents - - vector_db_id - - chunk_size_in_tokens - title: InsertRequest - Chunk: - type: object - properties: - content: - $ref: '#/components/schemas/InterleavedContent' - description: >- - The content of the chunk, which can be interleaved text, images, or other - types. - metadata: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: >- - Metadata associated with the chunk that will be used in the model context - during inference. - embedding: - type: array - items: - type: number - description: >- - Optional embedding for the chunk. If not provided, it will be computed - later. - stored_chunk_id: - type: string - description: >- - The chunk ID that is stored in the vector database. Used for backend functionality. - chunk_metadata: - $ref: '#/components/schemas/ChunkMetadata' - description: >- - Metadata for the chunk that will NOT be used in the context during inference. - The `chunk_metadata` is required backend functionality. - additionalProperties: false - required: - - content - - metadata - title: Chunk - description: >- - A chunk of content that can be inserted into a vector database. - ChunkMetadata: - type: object - properties: - chunk_id: - type: string - description: >- - The ID of the chunk. If not set, it will be generated based on the document - ID and content. - document_id: - type: string - description: >- - The ID of the document this chunk belongs to. - source: - type: string - description: >- - The source of the content, such as a URL, file path, or other identifier. - created_timestamp: - type: integer - description: >- - An optional timestamp indicating when the chunk was created. - updated_timestamp: - type: integer - description: >- - An optional timestamp indicating when the chunk was last updated. - chunk_window: - type: string - description: >- - The window of the chunk, which can be used to group related chunks together. - chunk_tokenizer: - type: string - description: >- - The tokenizer used to create the chunk. Default is Tiktoken. - chunk_embedding_model: - type: string - description: >- - The embedding model used to create the chunk's embedding. - chunk_embedding_dimension: - type: integer - description: >- - The dimension of the embedding vector for the chunk. - content_token_count: - type: integer - description: >- - The number of tokens in the content of the chunk. - metadata_token_count: - type: integer - description: >- - The number of tokens in the metadata of the chunk. - additionalProperties: false - title: ChunkMetadata - description: >- - `ChunkMetadata` is backend metadata for a `Chunk` that is used to store additional - information about the chunk that will not be used in the context during - inference, but is required for backend functionality. The `ChunkMetadata` is - set during chunk creation in `MemoryToolRuntimeImpl().insert()`and is not - expected to change after. Use `Chunk.metadata` for metadata that will - be used in the context during inference. - InsertChunksRequest: - type: object - properties: - vector_db_id: - type: string - description: >- - The identifier of the vector database to insert the chunks into. - chunks: - type: array - items: - $ref: '#/components/schemas/Chunk' - description: >- - The chunks to insert. Each `Chunk` should contain content which can be - interleaved text, images, or other types. `metadata`: `dict[str, Any]` - and `embedding`: `List[float]` are optional. If `metadata` is provided, - you configure how Llama Stack formats the chunk during generation. If - `embedding` is not provided, it will be computed later. - ttl_seconds: - type: integer - description: The time to live of the chunks. - additionalProperties: false - required: - - vector_db_id - - chunks - title: InsertChunksRequest - ProviderInfo: - type: object - properties: - api: - type: string - description: The API name this provider implements - provider_id: - type: string - description: Unique identifier for the provider - provider_type: - type: string - description: The type of provider implementation - config: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: >- - Configuration parameters for the provider - health: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: Current health status of the provider - additionalProperties: false - required: - - api - - provider_id - - provider_type - - config - - health - title: ProviderInfo - description: >- - Information about a registered provider including its configuration and health - status. - InvokeToolRequest: - type: object - properties: - tool_name: - type: string - description: The name of the tool to invoke. - kwargs: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: >- - A dictionary of arguments to pass to the tool. - additionalProperties: false - required: - - tool_name - - kwargs - title: InvokeToolRequest - ToolInvocationResult: - type: object - properties: - content: - $ref: '#/components/schemas/InterleavedContent' - description: >- - (Optional) The output content from the tool execution - error_message: - type: string - description: >- - (Optional) Error message if the tool execution failed - error_code: - type: integer - description: >- - (Optional) Numeric error code if the tool execution failed - metadata: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: >- - (Optional) Additional metadata about the tool execution - additionalProperties: false - title: ToolInvocationResult - description: Result of a tool invocation. - PaginatedResponse: - type: object - properties: - data: - type: array - items: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: The list of items for the current page - has_more: - type: boolean - description: >- - Whether there are more items available after this set - url: - type: string - description: The URL for accessing this list - additionalProperties: false - required: - - data - - has_more - title: PaginatedResponse - description: >- - A generic paginated response that follows a simple format. - Job: - type: object - properties: - job_id: - type: string - description: Unique identifier for the job - status: - type: string - enum: - - completed - - in_progress - - failed - - scheduled - - cancelled - description: Current execution status of the job - additionalProperties: false - required: - - job_id - - status - title: Job - description: >- - A job execution instance with status tracking. - ListBenchmarksResponse: - type: object - properties: - data: - type: array - items: - $ref: '#/components/schemas/Benchmark' - additionalProperties: false - required: - - data - title: ListBenchmarksResponse - Order: - type: string - enum: - - asc - - desc - title: Order - description: Sort order for paginated responses. - ListOpenAIChatCompletionResponse: - type: object - properties: - data: - type: array - items: - type: object - properties: - id: - type: string - description: The ID of the chat completion - choices: - type: array - items: - $ref: '#/components/schemas/OpenAIChoice' - description: List of choices - object: - type: string - const: chat.completion - default: chat.completion - description: >- - The object type, which will be "chat.completion" - created: - type: integer - description: >- - The Unix timestamp in seconds when the chat completion was created - model: - type: string - description: >- - The model that was used to generate the chat completion - input_messages: - type: array - items: - $ref: '#/components/schemas/OpenAIMessageParam' - additionalProperties: false - required: - - id - - choices - - object - - created - - model - - input_messages - title: OpenAICompletionWithInputMessages - description: >- - List of chat completion objects with their input messages - has_more: - type: boolean - description: >- - Whether there are more completions available beyond this list - first_id: - type: string - description: ID of the first completion in this list - last_id: - type: string - description: ID of the last completion in this list - object: - type: string - const: list - default: list - description: >- - Must be "list" to identify this as a list response - additionalProperties: false - required: - - data - - has_more - - first_id - - last_id - - object - title: ListOpenAIChatCompletionResponse - description: >- - Response from listing OpenAI-compatible chat completions. - ListDatasetsResponse: - type: object - properties: - data: - type: array - items: - $ref: '#/components/schemas/Dataset' - description: List of datasets - additionalProperties: false - required: - - data - title: ListDatasetsResponse - description: Response from listing datasets. - ListModelsResponse: - type: object - properties: - data: - type: array - items: - $ref: '#/components/schemas/Model' - additionalProperties: false - required: - - data - title: ListModelsResponse - ListOpenAIResponseInputItem: - type: object - properties: - data: - type: array - items: - $ref: '#/components/schemas/OpenAIResponseInput' - description: List of input items - object: - type: string - const: list - default: list - description: Object type identifier, always "list" - additionalProperties: false - required: - - data - - object - title: ListOpenAIResponseInputItem - description: >- - List container for OpenAI response input items. - ListOpenAIResponseObject: - type: object - properties: - data: - type: array - items: - $ref: '#/components/schemas/OpenAIResponseObjectWithInput' - description: >- - List of response objects with their input context - has_more: - type: boolean - description: >- - Whether there are more results available beyond this page - first_id: - type: string - description: >- - Identifier of the first item in this page - last_id: - type: string - description: Identifier of the last item in this page - object: - type: string - const: list - default: list - description: Object type identifier, always "list" - additionalProperties: false - required: - - data - - has_more - - first_id - - last_id - - object - title: ListOpenAIResponseObject - description: >- - Paginated list of OpenAI response objects with navigation metadata. - OpenAIResponseObjectWithInput: - type: object - properties: - created_at: - type: integer - description: >- - Unix timestamp when the response was created - error: - $ref: '#/components/schemas/OpenAIResponseError' - description: >- - (Optional) Error details if the response generation failed - id: - type: string - description: Unique identifier for this response - model: - type: string - description: Model identifier used for generation - object: - type: string - const: response - default: response - description: >- - Object type identifier, always "response" - output: - type: array - items: - $ref: '#/components/schemas/OpenAIResponseOutput' - description: >- - List of generated output items (messages, tool calls, etc.) - parallel_tool_calls: - type: boolean - default: false - description: >- - Whether tool calls can be executed in parallel - previous_response_id: - type: string - description: >- - (Optional) ID of the previous response in a conversation - status: - type: string - description: >- - Current status of the response generation - temperature: - type: number - description: >- - (Optional) Sampling temperature used for generation - text: - $ref: '#/components/schemas/OpenAIResponseText' - description: >- - Text formatting configuration for the response - top_p: - type: number - description: >- - (Optional) Nucleus sampling parameter used for generation - truncation: - type: string - description: >- - (Optional) Truncation strategy applied to the response - input: - type: array - items: - $ref: '#/components/schemas/OpenAIResponseInput' - description: >- - List of input items that led to this response - additionalProperties: false - required: - - created_at - - id - - model - - object - - output - - parallel_tool_calls - - status - - text - - input - title: OpenAIResponseObjectWithInput - description: >- - OpenAI response object extended with input context information. - ListPromptsResponse: - type: object - properties: - data: - type: array - items: - $ref: '#/components/schemas/Prompt' - additionalProperties: false - required: - - data - title: ListPromptsResponse - description: Response model to list prompts. - ListProvidersResponse: - type: object - properties: - data: - type: array - items: - $ref: '#/components/schemas/ProviderInfo' - description: List of provider information objects - additionalProperties: false - required: - - data - title: ListProvidersResponse - description: >- - Response containing a list of all available providers. - RouteInfo: - type: object - properties: - route: - type: string - description: The API endpoint path - method: - type: string - description: HTTP method for the route - provider_types: - type: array - items: - type: string - description: >- - List of provider types that implement this route - additionalProperties: false - required: - - route - - method - - provider_types - title: RouteInfo - description: >- - Information about an API route including its path, method, and implementing - providers. - ListRoutesResponse: - type: object - properties: - data: - type: array - items: - $ref: '#/components/schemas/RouteInfo' - description: >- - List of available route information objects - additionalProperties: false - required: - - data - title: ListRoutesResponse - description: >- - Response containing a list of all available API routes. - ListToolDefsResponse: - type: object - properties: - data: - type: array - items: - $ref: '#/components/schemas/ToolDef' - description: List of tool definitions - additionalProperties: false - required: - - data - title: ListToolDefsResponse - description: >- - Response containing a list of tool definitions. - ListScoringFunctionsResponse: - type: object - properties: - data: - type: array - items: - $ref: '#/components/schemas/ScoringFn' - additionalProperties: false - required: - - data - title: ListScoringFunctionsResponse ListShieldsResponse: type: object properties: @@ -10640,46 +7139,102 @@ components: required: - data title: ListShieldsResponse - ListToolGroupsResponse: + RegisterShieldRequest: type: object properties: - data: - type: array - items: - $ref: '#/components/schemas/ToolGroup' - description: List of tool groups + shield_id: + type: string + description: >- + The identifier of the shield to register. + provider_shield_id: + type: string + description: >- + The identifier of the shield in the provider. + provider_id: + type: string + description: The identifier of the provider. + params: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: The parameters of the shield. additionalProperties: false required: - - data - title: ListToolGroupsResponse + - shield_id + title: RegisterShieldRequest + SyntheticDataGenerateRequest: + type: object + properties: + dialogs: + type: array + items: + $ref: '#/components/schemas/Message' + description: >- + List of conversation messages to use as input for synthetic data generation + filtering_function: + type: string + enum: + - none + - random + - top_k + - top_p + - top_k_top_p + - sigmoid + description: >- + Type of filtering to apply to generated synthetic data samples + model: + type: string + description: >- + (Optional) The identifier of the model to use. The model must be registered + with Llama Stack and available via the /models endpoint + additionalProperties: false + required: + - dialogs + - filtering_function + title: SyntheticDataGenerateRequest + SyntheticDataGenerationResponse: + type: object + properties: + synthetic_data: + type: array + items: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + List of generated synthetic data samples that passed the filtering criteria + statistics: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + (Optional) Statistical information about the generation process and filtering + results + additionalProperties: false + required: + - synthetic_data + title: SyntheticDataGenerationResponse description: >- - Response containing a list of tool groups. - ListToolsResponse: - type: object - properties: - data: - type: array - items: - $ref: '#/components/schemas/Tool' - description: List of tools - additionalProperties: false - required: - - data - title: ListToolsResponse - description: Response containing a list of tools. - ListVectorDBsResponse: - type: object - properties: - data: - type: array - items: - $ref: '#/components/schemas/VectorDB' - description: List of vector databases - additionalProperties: false - required: - - data - title: ListVectorDBsResponse - description: Response from listing vector databases. + Response from the synthetic data generation. Batch of (prompt, response, score) + tuples that pass the threshold. Event: oneOf: - $ref: '#/components/schemas/UnstructuredLogEvent' @@ -10808,6 +7363,15 @@ components: - name title: SpanStartPayload description: Payload for a span start event. + SpanStatus: + type: string + enum: + - ok + - error + title: SpanStatus + description: >- + The status of a span indicating whether it completed successfully or with + an error. StructuredLogEvent: type: object properties: @@ -10932,78 +7496,13 @@ components: - event - ttl_seconds title: LogEventRequest - VectorStoreChunkingStrategy: - oneOf: - - $ref: '#/components/schemas/VectorStoreChunkingStrategyAuto' - - $ref: '#/components/schemas/VectorStoreChunkingStrategyStatic' - discriminator: - propertyName: type - mapping: - auto: '#/components/schemas/VectorStoreChunkingStrategyAuto' - static: '#/components/schemas/VectorStoreChunkingStrategyStatic' - VectorStoreChunkingStrategyAuto: + InvokeToolRequest: type: object properties: - type: + tool_name: type: string - const: auto - default: auto - description: >- - Strategy type, always "auto" for automatic chunking - additionalProperties: false - required: - - type - title: VectorStoreChunkingStrategyAuto - description: >- - Automatic chunking strategy for vector store files. - VectorStoreChunkingStrategyStatic: - type: object - properties: - type: - type: string - const: static - default: static - description: >- - Strategy type, always "static" for static chunking - static: - $ref: '#/components/schemas/VectorStoreChunkingStrategyStaticConfig' - description: >- - Configuration parameters for the static chunking strategy - additionalProperties: false - required: - - type - - static - title: VectorStoreChunkingStrategyStatic - description: >- - Static chunking strategy with configurable parameters. - VectorStoreChunkingStrategyStaticConfig: - type: object - properties: - chunk_overlap_tokens: - type: integer - default: 400 - description: >- - Number of tokens to overlap between adjacent chunks - max_chunk_size_tokens: - type: integer - default: 800 - description: >- - Maximum number of tokens per chunk, must be between 100 and 4096 - additionalProperties: false - required: - - chunk_overlap_tokens - - max_chunk_size_tokens - title: VectorStoreChunkingStrategyStaticConfig - description: >- - Configuration for static chunking strategy. - OpenaiAttachFileToVectorStoreRequest: - type: object - properties: - file_id: - type: string - description: >- - The ID of the file to attach to the vector store. - attributes: + description: The name of the tool to invoke. + kwargs: type: object additionalProperties: oneOf: @@ -11014,49 +7513,28 @@ components: - type: array - type: object description: >- - The key-value attributes stored with the file, which can be used for filtering. - chunking_strategy: - $ref: '#/components/schemas/VectorStoreChunkingStrategy' - description: >- - The chunking strategy to use for the file. + A dictionary of arguments to pass to the tool. additionalProperties: false required: - - file_id - title: OpenaiAttachFileToVectorStoreRequest - VectorStoreFileLastError: + - tool_name + - kwargs + title: InvokeToolRequest + ToolInvocationResult: type: object properties: - code: - oneOf: - - type: string - const: server_error - - type: string - const: rate_limit_exceeded + content: + $ref: '#/components/schemas/InterleavedContent' description: >- - Error code indicating the type of failure - message: + (Optional) The output content from the tool execution + error_message: type: string description: >- - Human-readable error message describing the failure - additionalProperties: false - required: - - code - - message - title: VectorStoreFileLastError - description: >- - Error information for failed vector store file processing. - VectorStoreFileObject: - type: object - properties: - id: - type: string - description: Unique identifier for the file - object: - type: string - default: vector_store.file + (Optional) Error message if the tool execution failed + error_code: + type: integer description: >- - Object type identifier, always "vector_store.file" - attributes: + (Optional) Numeric error code if the tool execution failed + metadata: type: object additionalProperties: oneOf: @@ -11067,142 +7545,27 @@ components: - type: array - type: object description: >- - Key-value attributes associated with the file - chunking_strategy: - oneOf: - - $ref: '#/components/schemas/VectorStoreChunkingStrategyAuto' - - $ref: '#/components/schemas/VectorStoreChunkingStrategyStatic' - discriminator: - propertyName: type - mapping: - auto: '#/components/schemas/VectorStoreChunkingStrategyAuto' - static: '#/components/schemas/VectorStoreChunkingStrategyStatic' - description: >- - Strategy used for splitting the file into chunks - created_at: - type: integer - description: >- - Timestamp when the file was added to the vector store - last_error: - $ref: '#/components/schemas/VectorStoreFileLastError' - description: >- - (Optional) Error information if file processing failed - status: - $ref: '#/components/schemas/VectorStoreFileStatus' - description: Current processing status of the file - usage_bytes: - type: integer - default: 0 - description: Storage space used by this file in bytes - vector_store_id: - type: string - description: >- - ID of the vector store containing this file + (Optional) Additional metadata about the tool execution additionalProperties: false - required: - - id - - object - - attributes - - chunking_strategy - - created_at - - status - - usage_bytes - - vector_store_id - title: VectorStoreFileObject - description: OpenAI Vector Store File object. - VectorStoreFileStatus: - oneOf: - - type: string - const: completed - - type: string - const: in_progress - - type: string - const: cancelled - - type: string - const: failed - VectorStoreFileBatchObject: - type: object - properties: - id: - type: string - description: Unique identifier for the file batch - object: - type: string - default: vector_store.file_batch - description: >- - Object type identifier, always "vector_store.file_batch" - created_at: - type: integer - description: >- - Timestamp when the file batch was created - vector_store_id: - type: string - description: >- - ID of the vector store containing the file batch - status: - $ref: '#/components/schemas/VectorStoreFileStatus' - description: >- - Current processing status of the file batch - file_counts: - $ref: '#/components/schemas/VectorStoreFileCounts' - description: >- - File processing status counts for the batch - additionalProperties: false - required: - - id - - object - - created_at - - vector_store_id - - status - - file_counts - title: VectorStoreFileBatchObject - description: OpenAI Vector Store File Batch object. - VectorStoreFileCounts: - type: object - properties: - completed: - type: integer - description: >- - Number of files that have been successfully processed - cancelled: - type: integer - description: >- - Number of files that had their processing cancelled - failed: - type: integer - description: Number of files that failed to process - in_progress: - type: integer - description: >- - Number of files currently being processed - total: - type: integer - description: >- - Total number of files in the vector store - additionalProperties: false - required: - - completed - - cancelled - - failed - - in_progress - - total - title: VectorStoreFileCounts - description: >- - File processing status counts for a vector store. - OpenAIJSONSchema: + title: ToolInvocationResult + description: Result of a tool invocation. + ToolDef: type: object properties: name: type: string - description: Name of the schema + description: Name of the tool description: type: string - description: (Optional) Description of the schema - strict: - type: boolean description: >- - (Optional) Whether to enforce strict adherence to the schema - schema: + (Optional) Human-readable description of what the tool does + parameters: + type: array + items: + $ref: '#/components/schemas/ToolParameter' + description: >- + (Optional) List of parameters this tool accepts + metadata: type: object additionalProperties: oneOf: @@ -11212,519 +7575,90 @@ components: - type: string - type: array - type: object - description: (Optional) The JSON schema definition + description: >- + (Optional) Additional metadata about the tool additionalProperties: false required: - name - title: OpenAIJSONSchema + title: ToolDef description: >- - JSON schema specification for OpenAI-compatible structured response format. - OpenAIResponseFormatJSONObject: + Tool definition used in runtime contexts. + ToolParameter: type: object properties: - type: + name: type: string - const: json_object - default: json_object - description: >- - Must be "json_object" to indicate generic JSON object response format - additionalProperties: false - required: - - type - title: OpenAIResponseFormatJSONObject - description: >- - JSON object response format for OpenAI-compatible chat completion requests. - OpenAIResponseFormatJSONSchema: - type: object - properties: - type: - type: string - const: json_schema - default: json_schema - description: >- - Must be "json_schema" to indicate structured JSON response format - json_schema: - $ref: '#/components/schemas/OpenAIJSONSchema' - description: >- - The JSON schema specification for the response - additionalProperties: false - required: - - type - - json_schema - title: OpenAIResponseFormatJSONSchema - description: >- - JSON schema response format for OpenAI-compatible chat completion requests. - OpenAIResponseFormatParam: - oneOf: - - $ref: '#/components/schemas/OpenAIResponseFormatText' - - $ref: '#/components/schemas/OpenAIResponseFormatJSONSchema' - - $ref: '#/components/schemas/OpenAIResponseFormatJSONObject' - discriminator: - propertyName: type - mapping: - text: '#/components/schemas/OpenAIResponseFormatText' - json_schema: '#/components/schemas/OpenAIResponseFormatJSONSchema' - json_object: '#/components/schemas/OpenAIResponseFormatJSONObject' - OpenAIResponseFormatText: - type: object - properties: - type: - type: string - const: text - default: text - description: >- - Must be "text" to indicate plain text response format - additionalProperties: false - required: - - type - title: OpenAIResponseFormatText - description: >- - Text response format for OpenAI-compatible chat completion requests. - OpenaiChatCompletionRequest: - type: object - properties: - model: + description: Name of the parameter + parameter_type: type: string description: >- - The identifier of the model to use. The model must be registered with - Llama Stack and available via the /models endpoint. - messages: - type: array - items: - $ref: '#/components/schemas/OpenAIMessageParam' - description: List of messages in the conversation. - frequency_penalty: - type: number + Type of the parameter (e.g., string, integer) + description: + type: string description: >- - (Optional) The penalty for repeated tokens. - function_call: - oneOf: - - type: string - - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: (Optional) The function call to use. - functions: - type: array - items: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: (Optional) List of functions to use. - logit_bias: + Human-readable description of what the parameter does + required: + type: boolean + default: true + description: >- + Whether this parameter is required for tool invocation + items: type: object - additionalProperties: - type: number - description: (Optional) The logit bias to use. - logprobs: - type: boolean - description: (Optional) The log probabilities to use. - max_completion_tokens: - type: integer description: >- - (Optional) The maximum number of tokens to generate. - max_tokens: - type: integer - description: >- - (Optional) The maximum number of tokens to generate. - n: - type: integer - description: >- - (Optional) The number of completions to generate. - parallel_tool_calls: - type: boolean - description: >- - (Optional) Whether to parallelize tool calls. - presence_penalty: - type: number - description: >- - (Optional) The penalty for repeated tokens. - response_format: - $ref: '#/components/schemas/OpenAIResponseFormatParam' - description: (Optional) The response format to use. - seed: - type: integer - description: (Optional) The seed to use. - stop: + Type of the elements when parameter_type is array + title: + type: string + description: (Optional) Title of the parameter + default: oneOf: + - type: 'null' + - type: boolean + - type: number - type: string - type: array - items: - type: string - description: (Optional) The stop tokens to use. - stream: - type: boolean - description: >- - (Optional) Whether to stream the response. - stream_options: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: (Optional) The stream options to use. - temperature: - type: number - description: (Optional) The temperature to use. - tool_choice: - oneOf: - - type: string - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: (Optional) The tool choice to use. - tools: - type: array - items: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: (Optional) The tools to use. - top_logprobs: - type: integer description: >- - (Optional) The top log probabilities to use. - top_p: - type: number - description: (Optional) The top p to use. - user: - type: string - description: (Optional) The user to use. + (Optional) Default value for the parameter if not provided additionalProperties: false required: - - model - - messages - title: OpenaiChatCompletionRequest - OpenAIChatCompletion: + - name + - parameter_type + - description + - required + title: ToolParameter + description: Parameter definition for a tool. + ListToolDefsResponse: type: object properties: - id: - type: string - description: The ID of the chat completion - choices: + data: type: array items: - $ref: '#/components/schemas/OpenAIChoice' - description: List of choices - object: - type: string - const: chat.completion - default: chat.completion - description: >- - The object type, which will be "chat.completion" - created: - type: integer - description: >- - The Unix timestamp in seconds when the chat completion was created - model: - type: string - description: >- - The model that was used to generate the chat completion + $ref: '#/components/schemas/ToolDef' + description: List of tool definitions additionalProperties: false required: - - id - - choices - - object - - created - - model - title: OpenAIChatCompletion + - data + title: ListToolDefsResponse description: >- - Response from an OpenAI-compatible chat completion request. - OpenAIChatCompletionChunk: + Response containing a list of tool definitions. + RAGDocument: type: object properties: - id: + document_id: type: string - description: The ID of the chat completion - choices: - type: array - items: - $ref: '#/components/schemas/OpenAIChunkChoice' - description: List of choices - object: - type: string - const: chat.completion.chunk - default: chat.completion.chunk - description: >- - The object type, which will be "chat.completion.chunk" - created: - type: integer - description: >- - The Unix timestamp in seconds when the chat completion was created - model: - type: string - description: >- - The model that was used to generate the chat completion - additionalProperties: false - required: - - id - - choices - - object - - created - - model - title: OpenAIChatCompletionChunk - description: >- - Chunk from a streaming response to an OpenAI-compatible chat completion request. - OpenAIChoiceDelta: - type: object - properties: + description: The unique identifier for the document. content: - type: string - description: (Optional) The content of the delta - refusal: - type: string - description: (Optional) The refusal of the delta - role: - type: string - description: (Optional) The role of the delta - tool_calls: - type: array - items: - $ref: '#/components/schemas/OpenAIChatCompletionToolCall' - description: (Optional) The tool calls of the delta - additionalProperties: false - title: OpenAIChoiceDelta - description: >- - A delta from an OpenAI-compatible chat completion streaming response. - OpenAIChunkChoice: - type: object - properties: - delta: - $ref: '#/components/schemas/OpenAIChoiceDelta' - description: The delta from the chunk - finish_reason: - type: string - description: The reason the model stopped generating - index: - type: integer - description: The index of the choice - logprobs: - $ref: '#/components/schemas/OpenAIChoiceLogprobs' - description: >- - (Optional) The log probabilities for the tokens in the message - additionalProperties: false - required: - - delta - - finish_reason - - index - title: OpenAIChunkChoice - description: >- - A chunk choice from an OpenAI-compatible chat completion streaming response. - OpenaiCompletionRequest: - type: object - properties: - model: - type: string - description: >- - The identifier of the model to use. The model must be registered with - Llama Stack and available via the /models endpoint. - prompt: oneOf: - type: string + - $ref: '#/components/schemas/InterleavedContentItem' - type: array items: - type: string - - type: array - items: - type: integer - - type: array - items: - type: array - items: - type: integer - description: The prompt to generate a completion for. - best_of: - type: integer - description: >- - (Optional) The number of completions to generate. - echo: - type: boolean - description: (Optional) Whether to echo the prompt. - frequency_penalty: - type: number - description: >- - (Optional) The penalty for repeated tokens. - logit_bias: - type: object - additionalProperties: - type: number - description: (Optional) The logit bias to use. - logprobs: - type: boolean - description: (Optional) The log probabilities to use. - max_tokens: - type: integer - description: >- - (Optional) The maximum number of tokens to generate. - n: - type: integer - description: >- - (Optional) The number of completions to generate. - presence_penalty: - type: number - description: >- - (Optional) The penalty for repeated tokens. - seed: - type: integer - description: (Optional) The seed to use. - stop: - oneOf: - - type: string - - type: array - items: - type: string - description: (Optional) The stop tokens to use. - stream: - type: boolean - description: >- - (Optional) Whether to stream the response. - stream_options: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: (Optional) The stream options to use. - temperature: - type: number - description: (Optional) The temperature to use. - top_p: - type: number - description: (Optional) The top p to use. - user: + $ref: '#/components/schemas/InterleavedContentItem' + - $ref: '#/components/schemas/URL' + description: The content of the document. + mime_type: type: string - description: (Optional) The user to use. - guided_choice: - type: array - items: - type: string - prompt_logprobs: - type: integer - suffix: - type: string - description: >- - (Optional) The suffix that should be appended to the completion. - additionalProperties: false - required: - - model - - prompt - title: OpenaiCompletionRequest - OpenAICompletion: - type: object - properties: - id: - type: string - choices: - type: array - items: - $ref: '#/components/schemas/OpenAICompletionChoice' - created: - type: integer - model: - type: string - object: - type: string - const: text_completion - default: text_completion - additionalProperties: false - required: - - id - - choices - - created - - model - - object - title: OpenAICompletion - description: >- - Response from an OpenAI-compatible completion request. - OpenAICompletionChoice: - type: object - properties: - finish_reason: - type: string - text: - type: string - index: - type: integer - logprobs: - $ref: '#/components/schemas/OpenAIChoiceLogprobs' - additionalProperties: false - required: - - finish_reason - - text - - index - title: OpenAICompletionChoice - description: >- - A choice from an OpenAI-compatible completion response. - OpenaiCreateVectorStoreRequest: - type: object - properties: - name: - type: string - description: A name for the vector store. - file_ids: - type: array - items: - type: string - description: >- - A list of File IDs that the vector store should use. Useful for tools - like `file_search` that can access files. - expires_after: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: >- - The expiration policy for a vector store. - chunking_strategy: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: >- - The chunking strategy used to chunk the file(s). If not set, will use - the `auto` strategy. + description: The MIME type of the document. metadata: type: object additionalProperties: @@ -11735,983 +7669,38 @@ components: - type: string - type: array - type: object - description: >- - Set of 16 key-value pairs that can be attached to an object. - embedding_model: - type: string - description: >- - The embedding model to use for this vector store. - embedding_dimension: - type: integer - description: >- - The dimension of the embedding vectors (default: 384). - provider_id: - type: string - description: >- - The ID of the provider to use for this vector store. - additionalProperties: false - title: OpenaiCreateVectorStoreRequest - VectorStoreObject: - type: object - properties: - id: - type: string - description: Unique identifier for the vector store - object: - type: string - default: vector_store - description: >- - Object type identifier, always "vector_store" - created_at: - type: integer - description: >- - Timestamp when the vector store was created - name: - type: string - description: (Optional) Name of the vector store - usage_bytes: - type: integer - default: 0 - description: >- - Storage space used by the vector store in bytes - file_counts: - $ref: '#/components/schemas/VectorStoreFileCounts' - description: >- - File processing status counts for the vector store - status: - type: string - default: completed - description: Current status of the vector store - expires_after: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: >- - (Optional) Expiration policy for the vector store - expires_at: - type: integer - description: >- - (Optional) Timestamp when the vector store will expire - last_active_at: - type: integer - description: >- - (Optional) Timestamp of last activity on the vector store - metadata: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: >- - Set of key-value pairs that can be attached to the vector store + description: Additional metadata for the document. additionalProperties: false required: - - id - - object - - created_at - - usage_bytes - - file_counts - - status + - document_id + - content - metadata - title: VectorStoreObject - description: OpenAI Vector Store object. - OpenaiCreateVectorStoreFileBatchRequest: + title: RAGDocument + description: >- + A document to be used for document ingestion in the RAG Tool. + InsertRequest: type: object properties: - file_ids: + documents: type: array items: - type: string + $ref: '#/components/schemas/RAGDocument' description: >- - A list of File IDs that the vector store should use. - attributes: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: >- - (Optional) Key-value attributes to store with the files. - chunking_strategy: - $ref: '#/components/schemas/VectorStoreChunkingStrategy' - description: >- - (Optional) The chunking strategy used to chunk the file(s). Defaults to - auto. - additionalProperties: false - required: - - file_ids - title: OpenaiCreateVectorStoreFileBatchRequest - OpenAIFileDeleteResponse: - type: object - properties: - id: - type: string - description: The file identifier that was deleted - object: - type: string - const: file - default: file - description: The object type, which is always "file" - deleted: - type: boolean - description: >- - Whether the file was successfully deleted - additionalProperties: false - required: - - id - - object - - deleted - title: OpenAIFileDeleteResponse - description: >- - Response for deleting a file in OpenAI Files API. - VectorStoreDeleteResponse: - type: object - properties: - id: + List of documents to index in the RAG system + vector_db_id: type: string description: >- - Unique identifier of the deleted vector store - object: - type: string - default: vector_store.deleted - description: >- - Object type identifier for the deletion response - deleted: - type: boolean - default: true - description: >- - Whether the deletion operation was successful - additionalProperties: false - required: - - id - - object - - deleted - title: VectorStoreDeleteResponse - description: Response from deleting a vector store. - VectorStoreFileDeleteResponse: - type: object - properties: - id: - type: string - description: Unique identifier of the deleted file - object: - type: string - default: vector_store.file.deleted - description: >- - Object type identifier for the deletion response - deleted: - type: boolean - default: true - description: >- - Whether the deletion operation was successful - additionalProperties: false - required: - - id - - object - - deleted - title: VectorStoreFileDeleteResponse - description: >- - Response from deleting a vector store file. - OpenaiEmbeddingsRequest: - type: object - properties: - model: - type: string - description: >- - The identifier of the model to use. The model must be an embedding model - registered with Llama Stack and available via the /models endpoint. - input: - oneOf: - - type: string - - type: array - items: - type: string - description: >- - Input text to embed, encoded as a string or array of strings. To embed - multiple inputs in a single request, pass an array of strings. - encoding_format: - type: string - description: >- - (Optional) The format to return the embeddings in. Can be either "float" - or "base64". Defaults to "float". - dimensions: + ID of the vector database to store the document embeddings + chunk_size_in_tokens: type: integer description: >- - (Optional) The number of dimensions the resulting output embeddings should - have. Only supported in text-embedding-3 and later models. - user: - type: string - description: >- - (Optional) A unique identifier representing your end-user, which can help - OpenAI to monitor and detect abuse. + (Optional) Size in tokens for document chunking during indexing additionalProperties: false required: - - model - - input - title: OpenaiEmbeddingsRequest - OpenAIEmbeddingData: - type: object - properties: - object: - type: string - const: embedding - default: embedding - description: >- - The object type, which will be "embedding" - embedding: - oneOf: - - type: array - items: - type: number - - type: string - description: >- - The embedding vector as a list of floats (when encoding_format="float") - or as a base64-encoded string (when encoding_format="base64") - index: - type: integer - description: >- - The index of the embedding in the input list - additionalProperties: false - required: - - object - - embedding - - index - title: OpenAIEmbeddingData - description: >- - A single embedding data object from an OpenAI-compatible embeddings response. - OpenAIEmbeddingUsage: - type: object - properties: - prompt_tokens: - type: integer - description: The number of tokens in the input - total_tokens: - type: integer - description: The total number of tokens used - additionalProperties: false - required: - - prompt_tokens - - total_tokens - title: OpenAIEmbeddingUsage - description: >- - Usage information for an OpenAI-compatible embeddings response. - OpenAIEmbeddingsResponse: - type: object - properties: - object: - type: string - const: list - default: list - description: The object type, which will be "list" - data: - type: array - items: - $ref: '#/components/schemas/OpenAIEmbeddingData' - description: List of embedding data objects - model: - type: string - description: >- - The model that was used to generate the embeddings - usage: - $ref: '#/components/schemas/OpenAIEmbeddingUsage' - description: Usage information - additionalProperties: false - required: - - object - - data - - model - - usage - title: OpenAIEmbeddingsResponse - description: >- - Response from an OpenAI-compatible embeddings request. - OpenAIFilePurpose: - type: string - enum: - - assistants - - batch - title: OpenAIFilePurpose - description: >- - Valid purpose values for OpenAI Files API. - ListOpenAIFileResponse: - type: object - properties: - data: - type: array - items: - $ref: '#/components/schemas/OpenAIFileObject' - description: List of file objects - has_more: - type: boolean - description: >- - Whether there are more files available beyond this page - first_id: - type: string - description: >- - ID of the first file in the list for pagination - last_id: - type: string - description: >- - ID of the last file in the list for pagination - object: - type: string - const: list - default: list - description: The object type, which is always "list" - additionalProperties: false - required: - - data - - has_more - - first_id - - last_id - - object - title: ListOpenAIFileResponse - description: >- - Response for listing files in OpenAI Files API. - OpenAIFileObject: - type: object - properties: - object: - type: string - const: file - default: file - description: The object type, which is always "file" - id: - type: string - description: >- - The file identifier, which can be referenced in the API endpoints - bytes: - type: integer - description: The size of the file, in bytes - created_at: - type: integer - description: >- - The Unix timestamp (in seconds) for when the file was created - expires_at: - type: integer - description: >- - The Unix timestamp (in seconds) for when the file expires - filename: - type: string - description: The name of the file - purpose: - type: string - enum: - - assistants - - batch - description: The intended purpose of the file - additionalProperties: false - required: - - object - - id - - bytes - - created_at - - expires_at - - filename - - purpose - title: OpenAIFileObject - description: >- - OpenAI File object as defined in the OpenAI Files API. - VectorStoreListFilesResponse: - type: object - properties: - object: - type: string - default: list - description: Object type identifier, always "list" - data: - type: array - items: - $ref: '#/components/schemas/VectorStoreFileObject' - description: List of vector store file objects - first_id: - type: string - description: >- - (Optional) ID of the first file in the list for pagination - last_id: - type: string - description: >- - (Optional) ID of the last file in the list for pagination - has_more: - type: boolean - default: false - description: >- - Whether there are more files available beyond this page - additionalProperties: false - required: - - object - - data - - has_more - title: VectorStoreListFilesResponse - description: >- - Response from listing files in a vector store. - VectorStoreFilesListInBatchResponse: - type: object - properties: - object: - type: string - default: list - description: Object type identifier, always "list" - data: - type: array - items: - $ref: '#/components/schemas/VectorStoreFileObject' - description: >- - List of vector store file objects in the batch - first_id: - type: string - description: >- - (Optional) ID of the first file in the list for pagination - last_id: - type: string - description: >- - (Optional) ID of the last file in the list for pagination - has_more: - type: boolean - default: false - description: >- - Whether there are more files available beyond this page - additionalProperties: false - required: - - object - - data - - has_more - title: VectorStoreFilesListInBatchResponse - description: >- - Response from listing files in a vector store file batch. - VectorStoreListResponse: - type: object - properties: - object: - type: string - default: list - description: Object type identifier, always "list" - data: - type: array - items: - $ref: '#/components/schemas/VectorStoreObject' - description: List of vector store objects - first_id: - type: string - description: >- - (Optional) ID of the first vector store in the list for pagination - last_id: - type: string - description: >- - (Optional) ID of the last vector store in the list for pagination - has_more: - type: boolean - default: false - description: >- - Whether there are more vector stores available beyond this page - additionalProperties: false - required: - - object - - data - - has_more - title: VectorStoreListResponse - description: Response from listing vector stores. - Response: - type: object - title: Response - VectorStoreContent: - type: object - properties: - type: - type: string - const: text - description: >- - Content type, currently only "text" is supported - text: - type: string - description: The actual text content - additionalProperties: false - required: - - type - - text - title: VectorStoreContent - description: >- - Content item from a vector store file or search result. - VectorStoreFileContentsResponse: - type: object - properties: - file_id: - type: string - description: Unique identifier for the file - filename: - type: string - description: Name of the file - attributes: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: >- - Key-value attributes associated with the file - content: - type: array - items: - $ref: '#/components/schemas/VectorStoreContent' - description: List of content items from the file - additionalProperties: false - required: - - file_id - - filename - - attributes - - content - title: VectorStoreFileContentsResponse - description: >- - Response from retrieving the contents of a vector store file. - OpenaiSearchVectorStoreRequest: - type: object - properties: - query: - oneOf: - - type: string - - type: array - items: - type: string - description: >- - The query string or array for performing the search. - filters: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: >- - Filters based on file attributes to narrow the search results. - max_num_results: - type: integer - description: >- - Maximum number of results to return (1 to 50 inclusive, default 10). - ranking_options: - type: object - properties: - ranker: - type: string - description: >- - (Optional) Name of the ranking algorithm to use - score_threshold: - type: number - default: 0.0 - description: >- - (Optional) Minimum relevance score threshold for results - additionalProperties: false - description: >- - Ranking options for fine-tuning the search results. - rewrite_query: - type: boolean - description: >- - Whether to rewrite the natural language query for vector search (default - false) - search_mode: - type: string - description: >- - The search mode to use - "keyword", "vector", or "hybrid" (default "vector") - additionalProperties: false - required: - - query - title: OpenaiSearchVectorStoreRequest - VectorStoreSearchResponse: - type: object - properties: - file_id: - type: string - description: >- - Unique identifier of the file containing the result - filename: - type: string - description: Name of the file containing the result - score: - type: number - description: Relevance score for this search result - attributes: - type: object - additionalProperties: - oneOf: - - type: string - - type: number - - type: boolean - description: >- - (Optional) Key-value attributes associated with the file - content: - type: array - items: - $ref: '#/components/schemas/VectorStoreContent' - description: >- - List of content items matching the search query - additionalProperties: false - required: - - file_id - - filename - - score - - content - title: VectorStoreSearchResponse - description: Response from searching a vector store. - VectorStoreSearchResponsePage: - type: object - properties: - object: - type: string - default: vector_store.search_results.page - description: >- - Object type identifier for the search results page - search_query: - type: string - description: >- - The original search query that was executed - data: - type: array - items: - $ref: '#/components/schemas/VectorStoreSearchResponse' - description: List of search result objects - has_more: - type: boolean - default: false - description: >- - Whether there are more results available beyond this page - next_page: - type: string - description: >- - (Optional) Token for retrieving the next page of results - additionalProperties: false - required: - - object - - search_query - - data - - has_more - title: VectorStoreSearchResponsePage - description: >- - Paginated response from searching a vector store. - OpenaiUpdateVectorStoreRequest: - type: object - properties: - name: - type: string - description: The name of the vector store. - expires_after: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: >- - The expiration policy for a vector store. - metadata: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: >- - Set of 16 key-value pairs that can be attached to an object. - additionalProperties: false - title: OpenaiUpdateVectorStoreRequest - OpenaiUpdateVectorStoreFileRequest: - type: object - properties: - attributes: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: >- - The updated key-value attributes to store with the file. - additionalProperties: false - required: - - attributes - title: OpenaiUpdateVectorStoreFileRequest - ExpiresAfter: - type: object - properties: - anchor: - type: string - const: created_at - seconds: - type: integer - additionalProperties: false - required: - - anchor - - seconds - title: ExpiresAfter - description: >- - Control expiration of uploaded files. - - Params: - - anchor, must be "created_at" - - seconds, must be int between 3600 and 2592000 (1 hour to 30 days) - DPOAlignmentConfig: - type: object - properties: - beta: - type: number - description: Temperature parameter for the DPO loss - loss_type: - $ref: '#/components/schemas/DPOLossType' - default: sigmoid - description: The type of loss function to use for DPO - additionalProperties: false - required: - - beta - - loss_type - title: DPOAlignmentConfig - description: >- - Configuration for Direct Preference Optimization (DPO) alignment. - DPOLossType: - type: string - enum: - - sigmoid - - hinge - - ipo - - kto_pair - title: DPOLossType - DataConfig: - type: object - properties: - dataset_id: - type: string - description: >- - Unique identifier for the training dataset - batch_size: - type: integer - description: Number of samples per training batch - shuffle: - type: boolean - description: >- - Whether to shuffle the dataset during training - data_format: - $ref: '#/components/schemas/DatasetFormat' - description: >- - Format of the dataset (instruct or dialog) - validation_dataset_id: - type: string - description: >- - (Optional) Unique identifier for the validation dataset - packed: - type: boolean - default: false - description: >- - (Optional) Whether to pack multiple samples into a single sequence for - efficiency - train_on_input: - type: boolean - default: false - description: >- - (Optional) Whether to compute loss on input tokens as well as output tokens - additionalProperties: false - required: - - dataset_id - - batch_size - - shuffle - - data_format - title: DataConfig - description: >- - Configuration for training data and data loading. - DatasetFormat: - type: string - enum: - - instruct - - dialog - title: DatasetFormat - description: Format of the training dataset. - EfficiencyConfig: - type: object - properties: - enable_activation_checkpointing: - type: boolean - default: false - description: >- - (Optional) Whether to use activation checkpointing to reduce memory usage - enable_activation_offloading: - type: boolean - default: false - description: >- - (Optional) Whether to offload activations to CPU to save GPU memory - memory_efficient_fsdp_wrap: - type: boolean - default: false - description: >- - (Optional) Whether to use memory-efficient FSDP wrapping - fsdp_cpu_offload: - type: boolean - default: false - description: >- - (Optional) Whether to offload FSDP parameters to CPU - additionalProperties: false - title: EfficiencyConfig - description: >- - Configuration for memory and compute efficiency optimizations. - OptimizerConfig: - type: object - properties: - optimizer_type: - $ref: '#/components/schemas/OptimizerType' - description: >- - Type of optimizer to use (adam, adamw, or sgd) - lr: - type: number - description: Learning rate for the optimizer - weight_decay: - type: number - description: >- - Weight decay coefficient for regularization - num_warmup_steps: - type: integer - description: Number of steps for learning rate warmup - additionalProperties: false - required: - - optimizer_type - - lr - - weight_decay - - num_warmup_steps - title: OptimizerConfig - description: >- - Configuration parameters for the optimization algorithm. - OptimizerType: - type: string - enum: - - adam - - adamw - - sgd - title: OptimizerType - description: >- - Available optimizer algorithms for training. - TrainingConfig: - type: object - properties: - n_epochs: - type: integer - description: Number of training epochs to run - max_steps_per_epoch: - type: integer - default: 1 - description: Maximum number of steps to run per epoch - gradient_accumulation_steps: - type: integer - default: 1 - description: >- - Number of steps to accumulate gradients before updating - max_validation_steps: - type: integer - default: 1 - description: >- - (Optional) Maximum number of validation steps per epoch - data_config: - $ref: '#/components/schemas/DataConfig' - description: >- - (Optional) Configuration for data loading and formatting - optimizer_config: - $ref: '#/components/schemas/OptimizerConfig' - description: >- - (Optional) Configuration for the optimization algorithm - efficiency_config: - $ref: '#/components/schemas/EfficiencyConfig' - description: >- - (Optional) Configuration for memory and compute optimizations - dtype: - type: string - default: bf16 - description: >- - (Optional) Data type for model parameters (bf16, fp16, fp32) - additionalProperties: false - required: - - n_epochs - - max_steps_per_epoch - - gradient_accumulation_steps - title: TrainingConfig - description: >- - Comprehensive configuration for the training process. - PreferenceOptimizeRequest: - type: object - properties: - job_uuid: - type: string - description: The UUID of the job to create. - finetuned_model: - type: string - description: The model to fine-tune. - algorithm_config: - $ref: '#/components/schemas/DPOAlignmentConfig' - description: The algorithm configuration. - training_config: - $ref: '#/components/schemas/TrainingConfig' - description: The training configuration. - hyperparam_search_config: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: The hyperparam search configuration. - logger_config: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: The logger configuration. - additionalProperties: false - required: - - job_uuid - - finetuned_model - - algorithm_config - - training_config - - hyperparam_search_config - - logger_config - title: PreferenceOptimizeRequest - PostTrainingJob: - type: object - properties: - job_uuid: - type: string - additionalProperties: false - required: - - job_uuid - title: PostTrainingJob + - documents + - vector_db_id + - chunk_size_in_tokens + title: InsertRequest DefaultRAGQueryGeneratorConfig: type: object properties: @@ -12920,6 +7909,382 @@ components: title: RAGQueryResult description: >- Result of a RAG query containing retrieved content and metadata. + ToolGroup: + type: object + properties: + identifier: + type: string + provider_resource_id: + type: string + provider_id: + type: string + type: + type: string + enum: + - model + - shield + - vector_db + - dataset + - scoring_function + - benchmark + - tool + - tool_group + - prompt + const: tool_group + default: tool_group + description: Type of resource, always 'tool_group' + mcp_endpoint: + $ref: '#/components/schemas/URL' + description: >- + (Optional) Model Context Protocol endpoint for remote tools + args: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + (Optional) Additional arguments for the tool group + additionalProperties: false + required: + - identifier + - provider_id + - type + title: ToolGroup + description: >- + A group of related tools managed together. + ListToolGroupsResponse: + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/ToolGroup' + description: List of tool groups + additionalProperties: false + required: + - data + title: ListToolGroupsResponse + description: >- + Response containing a list of tool groups. + RegisterToolGroupRequest: + type: object + properties: + toolgroup_id: + type: string + description: The ID of the tool group to register. + provider_id: + type: string + description: >- + The ID of the provider to use for the tool group. + mcp_endpoint: + $ref: '#/components/schemas/URL' + description: >- + The MCP endpoint to use for the tool group. + args: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + A dictionary of arguments to pass to the tool group. + additionalProperties: false + required: + - toolgroup_id + - provider_id + title: RegisterToolGroupRequest + Tool: + type: object + properties: + identifier: + type: string + provider_resource_id: + type: string + provider_id: + type: string + type: + type: string + enum: + - model + - shield + - vector_db + - dataset + - scoring_function + - benchmark + - tool + - tool_group + - prompt + const: tool + default: tool + description: Type of resource, always 'tool' + toolgroup_id: + type: string + description: >- + ID of the tool group this tool belongs to + description: + type: string + description: >- + Human-readable description of what the tool does + parameters: + type: array + items: + $ref: '#/components/schemas/ToolParameter' + description: List of parameters this tool accepts + metadata: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + (Optional) Additional metadata about the tool + additionalProperties: false + required: + - identifier + - provider_id + - type + - toolgroup_id + - description + - parameters + title: Tool + description: A tool that can be invoked by agents. + ListToolsResponse: + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/Tool' + description: List of tools + additionalProperties: false + required: + - data + title: ListToolsResponse + description: Response containing a list of tools. + VectorDB: + type: object + properties: + identifier: + type: string + provider_resource_id: + type: string + provider_id: + type: string + type: + type: string + enum: + - model + - shield + - vector_db + - dataset + - scoring_function + - benchmark + - tool + - tool_group + - prompt + const: vector_db + default: vector_db + description: >- + Type of resource, always 'vector_db' for vector databases + embedding_model: + type: string + description: >- + Name of the embedding model to use for vector generation + embedding_dimension: + type: integer + description: Dimension of the embedding vectors + vector_db_name: + type: string + additionalProperties: false + required: + - identifier + - provider_id + - type + - embedding_model + - embedding_dimension + title: VectorDB + description: >- + Vector database resource for storing and querying vector embeddings. + ListVectorDBsResponse: + type: object + properties: + data: + type: array + items: + $ref: '#/components/schemas/VectorDB' + description: List of vector databases + additionalProperties: false + required: + - data + title: ListVectorDBsResponse + description: Response from listing vector databases. + RegisterVectorDbRequest: + type: object + properties: + vector_db_id: + type: string + description: >- + The identifier of the vector database to register. + embedding_model: + type: string + description: The embedding model to use. + embedding_dimension: + type: integer + description: The dimension of the embedding model. + provider_id: + type: string + description: The identifier of the provider. + vector_db_name: + type: string + description: The name of the vector database. + provider_vector_db_id: + type: string + description: >- + The identifier of the vector database in the provider. + additionalProperties: false + required: + - vector_db_id + - embedding_model + title: RegisterVectorDbRequest + Chunk: + type: object + properties: + content: + $ref: '#/components/schemas/InterleavedContent' + description: >- + The content of the chunk, which can be interleaved text, images, or other + types. + metadata: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + Metadata associated with the chunk that will be used in the model context + during inference. + embedding: + type: array + items: + type: number + description: >- + Optional embedding for the chunk. If not provided, it will be computed + later. + stored_chunk_id: + type: string + description: >- + The chunk ID that is stored in the vector database. Used for backend functionality. + chunk_metadata: + $ref: '#/components/schemas/ChunkMetadata' + description: >- + Metadata for the chunk that will NOT be used in the context during inference. + The `chunk_metadata` is required backend functionality. + additionalProperties: false + required: + - content + - metadata + title: Chunk + description: >- + A chunk of content that can be inserted into a vector database. + ChunkMetadata: + type: object + properties: + chunk_id: + type: string + description: >- + The ID of the chunk. If not set, it will be generated based on the document + ID and content. + document_id: + type: string + description: >- + The ID of the document this chunk belongs to. + source: + type: string + description: >- + The source of the content, such as a URL, file path, or other identifier. + created_timestamp: + type: integer + description: >- + An optional timestamp indicating when the chunk was created. + updated_timestamp: + type: integer + description: >- + An optional timestamp indicating when the chunk was last updated. + chunk_window: + type: string + description: >- + The window of the chunk, which can be used to group related chunks together. + chunk_tokenizer: + type: string + description: >- + The tokenizer used to create the chunk. Default is Tiktoken. + chunk_embedding_model: + type: string + description: >- + The embedding model used to create the chunk's embedding. + chunk_embedding_dimension: + type: integer + description: >- + The dimension of the embedding vector for the chunk. + content_token_count: + type: integer + description: >- + The number of tokens in the content of the chunk. + metadata_token_count: + type: integer + description: >- + The number of tokens in the metadata of the chunk. + additionalProperties: false + title: ChunkMetadata + description: >- + `ChunkMetadata` is backend metadata for a `Chunk` that is used to store additional + information about the chunk that will not be used in the context during + inference, but is required for backend functionality. The `ChunkMetadata` is + set during chunk creation in `MemoryToolRuntimeImpl().insert()`and is not + expected to change after. Use `Chunk.metadata` for metadata that will + be used in the context during inference. + InsertChunksRequest: + type: object + properties: + vector_db_id: + type: string + description: >- + The identifier of the vector database to insert the chunks into. + chunks: + type: array + items: + $ref: '#/components/schemas/Chunk' + description: >- + The chunks to insert. Each `Chunk` should contain content which can be + interleaved text, images, or other types. `metadata`: `dict[str, Any]` + and `embedding`: `List[float]` are optional. If `metadata` is provided, + you configure how Llama Stack formats the chunk during generation. If + `embedding` is not provided, it will be computed later. + ttl_seconds: + type: integer + description: The time to live of the chunks. + additionalProperties: false + required: + - vector_db_id + - chunks + title: InsertChunksRequest QueryChunksRequest: type: object properties: @@ -12968,268 +8333,158 @@ components: title: QueryChunksResponse description: >- Response from querying chunks in a vector database. - QueryMetricsRequest: + VectorStoreFileCounts: type: object properties: - start_time: + completed: type: integer - description: The start time of the metric to query. - end_time: + description: >- + Number of files that have been successfully processed + cancelled: type: integer - description: The end time of the metric to query. - granularity: + description: >- + Number of files that had their processing cancelled + failed: + type: integer + description: Number of files that failed to process + in_progress: + type: integer + description: >- + Number of files currently being processed + total: + type: integer + description: >- + Total number of files in the vector store + additionalProperties: false + required: + - completed + - cancelled + - failed + - in_progress + - total + title: VectorStoreFileCounts + description: >- + File processing status counts for a vector store. + VectorStoreListResponse: + type: object + properties: + object: type: string - description: The granularity of the metric to query. - query_type: - type: string - enum: - - range - - instant - description: The type of query to perform. - label_matchers: + default: list + description: Object type identifier, always "list" + data: type: array items: - type: object - properties: - name: - type: string - description: The name of the label to match - value: - type: string - description: The value to match against - operator: - type: string - enum: - - '=' - - '!=' - - =~ - - '!~' - description: >- - The comparison operator to use for matching - default: '=' - additionalProperties: false - required: - - name - - value - - operator - title: MetricLabelMatcher - description: >- - A matcher for filtering metrics by label values. + $ref: '#/components/schemas/VectorStoreObject' + description: List of vector store objects + first_id: + type: string description: >- - The label matchers to apply to the metric. + (Optional) ID of the first vector store in the list for pagination + last_id: + type: string + description: >- + (Optional) ID of the last vector store in the list for pagination + has_more: + type: boolean + default: false + description: >- + Whether there are more vector stores available beyond this page additionalProperties: false required: - - start_time - - query_type - title: QueryMetricsRequest - MetricDataPoint: + - object + - data + - has_more + title: VectorStoreListResponse + description: Response from listing vector stores. + VectorStoreObject: type: object properties: - timestamp: + id: + type: string + description: Unique identifier for the vector store + object: + type: string + default: vector_store + description: >- + Object type identifier, always "vector_store" + created_at: type: integer description: >- - Unix timestamp when the metric value was recorded - value: - type: number - description: >- - The numeric value of the metric at this timestamp - unit: + Timestamp when the vector store was created + name: type: string + description: (Optional) Name of the vector store + usage_bytes: + type: integer + default: 0 + description: >- + Storage space used by the vector store in bytes + file_counts: + $ref: '#/components/schemas/VectorStoreFileCounts' + description: >- + File processing status counts for the vector store + status: + type: string + default: completed + description: Current status of the vector store + expires_after: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + (Optional) Expiration policy for the vector store + expires_at: + type: integer + description: >- + (Optional) Timestamp when the vector store will expire + last_active_at: + type: integer + description: >- + (Optional) Timestamp of last activity on the vector store + metadata: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + Set of key-value pairs that can be attached to the vector store additionalProperties: false required: - - timestamp - - value - - unit - title: MetricDataPoint - description: >- - A single data point in a metric time series. - MetricLabel: + - id + - object + - created_at + - usage_bytes + - file_counts + - status + - metadata + title: VectorStoreObject + description: OpenAI Vector Store object. + OpenaiCreateVectorStoreRequest: type: object properties: name: type: string - description: The name of the label - value: - type: string - description: The value of the label - additionalProperties: false - required: - - name - - value - title: MetricLabel - description: A label associated with a metric. - MetricSeries: - type: object - properties: - metric: - type: string - description: The name of the metric - labels: - type: array - items: - $ref: '#/components/schemas/MetricLabel' - description: >- - List of labels associated with this metric series - values: - type: array - items: - $ref: '#/components/schemas/MetricDataPoint' - description: >- - List of data points in chronological order - additionalProperties: false - required: - - metric - - labels - - values - title: MetricSeries - description: A time series of metric data points. - QueryMetricsResponse: - type: object - properties: - data: - type: array - items: - $ref: '#/components/schemas/MetricSeries' - description: >- - List of metric series matching the query criteria - additionalProperties: false - required: - - data - title: QueryMetricsResponse - description: >- - Response containing metric time series data. - QueryCondition: - type: object - properties: - key: - type: string - description: The attribute key to filter on - op: - $ref: '#/components/schemas/QueryConditionOp' - description: The comparison operator to apply - value: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: The value to compare against - additionalProperties: false - required: - - key - - op - - value - title: QueryCondition - description: A condition for filtering query results. - QueryConditionOp: - type: string - enum: - - eq - - ne - - gt - - lt - title: QueryConditionOp - description: >- - Comparison operators for query conditions. - QuerySpansRequest: - type: object - properties: - attribute_filters: - type: array - items: - $ref: '#/components/schemas/QueryCondition' - description: >- - The attribute filters to apply to the spans. - attributes_to_return: - type: array - items: - type: string - description: The attributes to return in the spans. - max_depth: - type: integer - description: The maximum depth of the tree. - additionalProperties: false - required: - - attribute_filters - - attributes_to_return - title: QuerySpansRequest - QuerySpansResponse: - type: object - properties: - data: - type: array - items: - $ref: '#/components/schemas/Span' - description: >- - List of spans matching the query criteria - additionalProperties: false - required: - - data - title: QuerySpansResponse - description: Response containing a list of spans. - QueryTracesRequest: - type: object - properties: - attribute_filters: - type: array - items: - $ref: '#/components/schemas/QueryCondition' - description: >- - The attribute filters to apply to the traces. - limit: - type: integer - description: The limit of traces to return. - offset: - type: integer - description: The offset of the traces to return. - order_by: - type: array - items: - type: string - description: The order by of the traces to return. - additionalProperties: false - title: QueryTracesRequest - QueryTracesResponse: - type: object - properties: - data: - type: array - items: - $ref: '#/components/schemas/Trace' - description: >- - List of traces matching the query criteria - additionalProperties: false - required: - - data - title: QueryTracesResponse - description: Response containing a list of traces. - RegisterBenchmarkRequest: - type: object - properties: - benchmark_id: - type: string - description: The ID of the benchmark to register. - dataset_id: - type: string - description: >- - The ID of the dataset to use for the benchmark. - scoring_functions: + description: A name for the vector store. + file_ids: type: array items: type: string description: >- - The scoring functions to use for the benchmark. - provider_benchmark_id: - type: string - description: >- - The ID of the provider benchmark to use for the benchmark. - provider_id: - type: string - description: >- - The ID of the provider to use for the benchmark. - metadata: + A list of File IDs that the vector store should use. Useful for tools + like `file_search` that can access files. + expires_after: type: object additionalProperties: oneOf: @@ -13239,54 +8494,21 @@ components: - type: string - type: array - type: object - description: The metadata to use for the benchmark. - additionalProperties: false - required: - - benchmark_id - - dataset_id - - scoring_functions - title: RegisterBenchmarkRequest - DataSource: - oneOf: - - $ref: '#/components/schemas/URIDataSource' - - $ref: '#/components/schemas/RowsDataSource' - discriminator: - propertyName: type - mapping: - uri: '#/components/schemas/URIDataSource' - rows: '#/components/schemas/RowsDataSource' - RegisterDatasetRequest: - type: object - properties: - purpose: - type: string - enum: - - post-training/messages - - eval/question-answer - - eval/messages-answer description: >- - The purpose of the dataset. One of: - "post-training/messages": The dataset - contains a messages column with list of messages for post-training. { - "messages": [ {"role": "user", "content": "Hello, world!"}, {"role": "assistant", - "content": "Hello, world!"}, ] } - "eval/question-answer": The dataset - contains a question column and an answer column for evaluation. { "question": - "What is the capital of France?", "answer": "Paris" } - "eval/messages-answer": - The dataset contains a messages column with list of messages and an answer - column for evaluation. { "messages": [ {"role": "user", "content": "Hello, - my name is John Doe."}, {"role": "assistant", "content": "Hello, John - Doe. How can I help you today?"}, {"role": "user", "content": "What's - my name?"}, ], "answer": "John Doe" } - source: - $ref: '#/components/schemas/DataSource' + The expiration policy for a vector store. + chunking_strategy: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object description: >- - The data source of the dataset. Ensure that the data source schema is - compatible with the purpose of the dataset. Examples: - { "type": "uri", - "uri": "https://mywebsite.com/mydata.jsonl" } - { "type": "uri", "uri": - "lsfs://mydata.jsonl" } - { "type": "uri", "uri": "data:csv;base64,{base64_content}" - } - { "type": "uri", "uri": "huggingface://llamastack/simpleqa?split=train" - } - { "type": "rows", "rows": [ { "messages": [ {"role": "user", "content": - "Hello, world!"}, {"role": "assistant", "content": "Hello, world!"}, ] - } ] } + The chunking strategy used to chunk the file(s). If not set, will use + the `auto` strategy. metadata: type: object additionalProperties: @@ -13298,774 +8520,621 @@ components: - type: array - type: object description: >- - The metadata for the dataset. - E.g. {"description": "My dataset"}. - dataset_id: - type: string - description: >- - The ID of the dataset. If not provided, an ID will be generated. - additionalProperties: false - required: - - purpose - - source - title: RegisterDatasetRequest - RegisterModelRequest: - type: object - properties: - model_id: - type: string - description: The identifier of the model to register. - provider_model_id: - type: string - description: >- - The identifier of the model in the provider. - provider_id: - type: string - description: The identifier of the provider. - metadata: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: Any additional metadata for this model. - model_type: - $ref: '#/components/schemas/ModelType' - description: The type of model to register. - additionalProperties: false - required: - - model_id - title: RegisterModelRequest - ParamType: - oneOf: - - $ref: '#/components/schemas/StringType' - - $ref: '#/components/schemas/NumberType' - - $ref: '#/components/schemas/BooleanType' - - $ref: '#/components/schemas/ArrayType' - - $ref: '#/components/schemas/ObjectType' - - $ref: '#/components/schemas/JsonType' - - $ref: '#/components/schemas/UnionType' - - $ref: '#/components/schemas/ChatCompletionInputType' - - $ref: '#/components/schemas/CompletionInputType' - - $ref: '#/components/schemas/AgentTurnInputType' - discriminator: - propertyName: type - mapping: - string: '#/components/schemas/StringType' - number: '#/components/schemas/NumberType' - boolean: '#/components/schemas/BooleanType' - array: '#/components/schemas/ArrayType' - object: '#/components/schemas/ObjectType' - json: '#/components/schemas/JsonType' - union: '#/components/schemas/UnionType' - chat_completion_input: '#/components/schemas/ChatCompletionInputType' - completion_input: '#/components/schemas/CompletionInputType' - agent_turn_input: '#/components/schemas/AgentTurnInputType' - RegisterScoringFunctionRequest: - type: object - properties: - scoring_fn_id: - type: string - description: >- - The ID of the scoring function to register. - description: - type: string - description: The description of the scoring function. - return_type: - $ref: '#/components/schemas/ParamType' - description: The return type of the scoring function. - provider_scoring_fn_id: - type: string - description: >- - The ID of the provider scoring function to use for the scoring function. - provider_id: - type: string - description: >- - The ID of the provider to use for the scoring function. - params: - $ref: '#/components/schemas/ScoringFnParams' - description: >- - The parameters for the scoring function for benchmark eval, these can - be overridden for app eval. - additionalProperties: false - required: - - scoring_fn_id - - description - - return_type - title: RegisterScoringFunctionRequest - RegisterShieldRequest: - type: object - properties: - shield_id: - type: string - description: >- - The identifier of the shield to register. - provider_shield_id: - type: string - description: >- - The identifier of the shield in the provider. - provider_id: - type: string - description: The identifier of the provider. - params: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: The parameters of the shield. - additionalProperties: false - required: - - shield_id - title: RegisterShieldRequest - RegisterToolGroupRequest: - type: object - properties: - toolgroup_id: - type: string - description: The ID of the tool group to register. - provider_id: - type: string - description: >- - The ID of the provider to use for the tool group. - mcp_endpoint: - $ref: '#/components/schemas/URL' - description: >- - The MCP endpoint to use for the tool group. - args: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: >- - A dictionary of arguments to pass to the tool group. - additionalProperties: false - required: - - toolgroup_id - - provider_id - title: RegisterToolGroupRequest - RegisterVectorDbRequest: - type: object - properties: - vector_db_id: - type: string - description: >- - The identifier of the vector database to register. + Set of 16 key-value pairs that can be attached to an object. embedding_model: type: string - description: The embedding model to use. + description: >- + The embedding model to use for this vector store. embedding_dimension: type: integer - description: The dimension of the embedding model. + description: >- + The dimension of the embedding vectors (default: 384). provider_id: - type: string - description: The identifier of the provider. - vector_db_name: - type: string - description: The name of the vector database. - provider_vector_db_id: type: string description: >- - The identifier of the vector database in the provider. + The ID of the provider to use for this vector store. additionalProperties: false - required: - - vector_db_id - - embedding_model - title: RegisterVectorDbRequest - RerankRequest: + title: OpenaiCreateVectorStoreRequest + OpenaiUpdateVectorStoreRequest: type: object properties: - model: + name: + type: string + description: The name of the vector store. + expires_after: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + The expiration policy for a vector store. + metadata: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + Set of 16 key-value pairs that can be attached to an object. + additionalProperties: false + title: OpenaiUpdateVectorStoreRequest + VectorStoreDeleteResponse: + type: object + properties: + id: type: string description: >- - The identifier of the reranking model to use. - query: - oneOf: - - type: string - - $ref: '#/components/schemas/OpenAIChatCompletionContentPartTextParam' - - $ref: '#/components/schemas/OpenAIChatCompletionContentPartImageParam' + Unique identifier of the deleted vector store + object: + type: string + default: vector_store.deleted description: >- - The search query to rank items against. Can be a string, text content - part, or image content part. The input must not exceed the model's max - input token length. - items: + Object type identifier for the deletion response + deleted: + type: boolean + default: true + description: >- + Whether the deletion operation was successful + additionalProperties: false + required: + - id + - object + - deleted + title: VectorStoreDeleteResponse + description: Response from deleting a vector store. + VectorStoreChunkingStrategy: + oneOf: + - $ref: '#/components/schemas/VectorStoreChunkingStrategyAuto' + - $ref: '#/components/schemas/VectorStoreChunkingStrategyStatic' + discriminator: + propertyName: type + mapping: + auto: '#/components/schemas/VectorStoreChunkingStrategyAuto' + static: '#/components/schemas/VectorStoreChunkingStrategyStatic' + VectorStoreChunkingStrategyAuto: + type: object + properties: + type: + type: string + const: auto + default: auto + description: >- + Strategy type, always "auto" for automatic chunking + additionalProperties: false + required: + - type + title: VectorStoreChunkingStrategyAuto + description: >- + Automatic chunking strategy for vector store files. + VectorStoreChunkingStrategyStatic: + type: object + properties: + type: + type: string + const: static + default: static + description: >- + Strategy type, always "static" for static chunking + static: + $ref: '#/components/schemas/VectorStoreChunkingStrategyStaticConfig' + description: >- + Configuration parameters for the static chunking strategy + additionalProperties: false + required: + - type + - static + title: VectorStoreChunkingStrategyStatic + description: >- + Static chunking strategy with configurable parameters. + VectorStoreChunkingStrategyStaticConfig: + type: object + properties: + chunk_overlap_tokens: + type: integer + default: 400 + description: >- + Number of tokens to overlap between adjacent chunks + max_chunk_size_tokens: + type: integer + default: 800 + description: >- + Maximum number of tokens per chunk, must be between 100 and 4096 + additionalProperties: false + required: + - chunk_overlap_tokens + - max_chunk_size_tokens + title: VectorStoreChunkingStrategyStaticConfig + description: >- + Configuration for static chunking strategy. + OpenaiCreateVectorStoreFileBatchRequest: + type: object + properties: + file_ids: type: array items: + type: string + description: >- + A list of File IDs that the vector store should use. + attributes: + type: object + additionalProperties: oneOf: + - type: 'null' + - type: boolean + - type: number - type: string - - $ref: '#/components/schemas/OpenAIChatCompletionContentPartTextParam' - - $ref: '#/components/schemas/OpenAIChatCompletionContentPartImageParam' + - type: array + - type: object description: >- - List of items to rerank. Each item can be a string, text content part, - or image content part. Each input must not exceed the model's max input - token length. - max_num_results: - type: integer + (Optional) Key-value attributes to store with the files. + chunking_strategy: + $ref: '#/components/schemas/VectorStoreChunkingStrategy' description: >- - (Optional) Maximum number of results to return. Default: returns all. + (Optional) The chunking strategy used to chunk the file(s). Defaults to + auto. additionalProperties: false required: - - model - - query - - items - title: RerankRequest - RerankData: + - file_ids + title: OpenaiCreateVectorStoreFileBatchRequest + VectorStoreFileBatchObject: type: object properties: - index: + id: + type: string + description: Unique identifier for the file batch + object: + type: string + default: vector_store.file_batch + description: >- + Object type identifier, always "vector_store.file_batch" + created_at: type: integer description: >- - The original index of the document in the input list - relevance_score: - type: number + Timestamp when the file batch was created + vector_store_id: + type: string description: >- - The relevance score from the model output. Values are inverted when applicable - so that higher scores indicate greater relevance. + ID of the vector store containing the file batch + status: + $ref: '#/components/schemas/VectorStoreFileStatus' + description: >- + Current processing status of the file batch + file_counts: + $ref: '#/components/schemas/VectorStoreFileCounts' + description: >- + File processing status counts for the batch additionalProperties: false required: - - index - - relevance_score - title: RerankData + - id + - object + - created_at + - vector_store_id + - status + - file_counts + title: VectorStoreFileBatchObject + description: OpenAI Vector Store File Batch object. + VectorStoreFileStatus: + oneOf: + - type: string + const: completed + - type: string + const: in_progress + - type: string + const: cancelled + - type: string + const: failed + VectorStoreFileLastError: + type: object + properties: + code: + oneOf: + - type: string + const: server_error + - type: string + const: rate_limit_exceeded + description: >- + Error code indicating the type of failure + message: + type: string + description: >- + Human-readable error message describing the failure + additionalProperties: false + required: + - code + - message + title: VectorStoreFileLastError description: >- - A single rerank result from a reranking response. - RerankResponse: + Error information for failed vector store file processing. + VectorStoreFileObject: type: object properties: + id: + type: string + description: Unique identifier for the file + object: + type: string + default: vector_store.file + description: >- + Object type identifier, always "vector_store.file" + attributes: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + Key-value attributes associated with the file + chunking_strategy: + oneOf: + - $ref: '#/components/schemas/VectorStoreChunkingStrategyAuto' + - $ref: '#/components/schemas/VectorStoreChunkingStrategyStatic' + discriminator: + propertyName: type + mapping: + auto: '#/components/schemas/VectorStoreChunkingStrategyAuto' + static: '#/components/schemas/VectorStoreChunkingStrategyStatic' + description: >- + Strategy used for splitting the file into chunks + created_at: + type: integer + description: >- + Timestamp when the file was added to the vector store + last_error: + $ref: '#/components/schemas/VectorStoreFileLastError' + description: >- + (Optional) Error information if file processing failed + status: + $ref: '#/components/schemas/VectorStoreFileStatus' + description: Current processing status of the file + usage_bytes: + type: integer + default: 0 + description: Storage space used by this file in bytes + vector_store_id: + type: string + description: >- + ID of the vector store containing this file + additionalProperties: false + required: + - id + - object + - attributes + - chunking_strategy + - created_at + - status + - usage_bytes + - vector_store_id + title: VectorStoreFileObject + description: OpenAI Vector Store File object. + VectorStoreFilesListInBatchResponse: + type: object + properties: + object: + type: string + default: list + description: Object type identifier, always "list" data: type: array items: - $ref: '#/components/schemas/RerankData' + $ref: '#/components/schemas/VectorStoreFileObject' description: >- - List of rerank result objects, sorted by relevance score (descending) + List of vector store file objects in the batch + first_id: + type: string + description: >- + (Optional) ID of the first file in the list for pagination + last_id: + type: string + description: >- + (Optional) ID of the last file in the list for pagination + has_more: + type: boolean + default: false + description: >- + Whether there are more files available beyond this page additionalProperties: false required: + - object - data - title: RerankResponse - description: Response from a reranking request. - ResumeAgentTurnRequest: + - has_more + title: VectorStoreFilesListInBatchResponse + description: >- + Response from listing files in a vector store file batch. + VectorStoreListFilesResponse: type: object properties: - tool_responses: + object: + type: string + default: list + description: Object type identifier, always "list" + data: type: array items: - $ref: '#/components/schemas/ToolResponse' + $ref: '#/components/schemas/VectorStoreFileObject' + description: List of vector store file objects + first_id: + type: string description: >- - The tool call responses to resume the turn with. - stream: + (Optional) ID of the first file in the list for pagination + last_id: + type: string + description: >- + (Optional) ID of the last file in the list for pagination + has_more: type: boolean - description: Whether to stream the response. + default: false + description: >- + Whether there are more files available beyond this page additionalProperties: false required: - - tool_responses - title: ResumeAgentTurnRequest - RunEvalRequest: + - object + - data + - has_more + title: VectorStoreListFilesResponse + description: >- + Response from listing files in a vector store. + OpenaiAttachFileToVectorStoreRequest: type: object properties: - benchmark_config: - $ref: '#/components/schemas/BenchmarkConfig' - description: The configuration for the benchmark. + file_id: + type: string + description: >- + The ID of the file to attach to the vector store. + attributes: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + The key-value attributes stored with the file, which can be used for filtering. + chunking_strategy: + $ref: '#/components/schemas/VectorStoreChunkingStrategy' + description: >- + The chunking strategy to use for the file. additionalProperties: false required: - - benchmark_config - title: RunEvalRequest - RunModerationRequest: + - file_id + title: OpenaiAttachFileToVectorStoreRequest + OpenaiUpdateVectorStoreFileRequest: type: object properties: - input: + attributes: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + The updated key-value attributes to store with the file. + additionalProperties: false + required: + - attributes + title: OpenaiUpdateVectorStoreFileRequest + VectorStoreFileDeleteResponse: + type: object + properties: + id: + type: string + description: Unique identifier of the deleted file + object: + type: string + default: vector_store.file.deleted + description: >- + Object type identifier for the deletion response + deleted: + type: boolean + default: true + description: >- + Whether the deletion operation was successful + additionalProperties: false + required: + - id + - object + - deleted + title: VectorStoreFileDeleteResponse + description: >- + Response from deleting a vector store file. + VectorStoreContent: + type: object + properties: + type: + type: string + const: text + description: >- + Content type, currently only "text" is supported + text: + type: string + description: The actual text content + additionalProperties: false + required: + - type + - text + title: VectorStoreContent + description: >- + Content item from a vector store file or search result. + VectorStoreFileContentsResponse: + type: object + properties: + file_id: + type: string + description: Unique identifier for the file + filename: + type: string + description: Name of the file + attributes: + type: object + additionalProperties: + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object + description: >- + Key-value attributes associated with the file + content: + type: array + items: + $ref: '#/components/schemas/VectorStoreContent' + description: List of content items from the file + additionalProperties: false + required: + - file_id + - filename + - attributes + - content + title: VectorStoreFileContentsResponse + description: >- + Response from retrieving the contents of a vector store file. + OpenaiSearchVectorStoreRequest: + type: object + properties: + query: oneOf: - type: string - type: array items: type: string description: >- - Input (or inputs) to classify. Can be a single string, an array of strings, - or an array of multi-modal input objects similar to other models. - model: - type: string - description: >- - The content moderation model you would like to use. - additionalProperties: false - required: - - input - - model - title: RunModerationRequest - ModerationObject: - type: object - properties: - id: - type: string - description: >- - The unique identifier for the moderation request. - model: - type: string - description: >- - The model used to generate the moderation results. - results: - type: array - items: - $ref: '#/components/schemas/ModerationObjectResults' - description: A list of moderation objects - additionalProperties: false - required: - - id - - model - - results - title: ModerationObject - description: A moderation object. - ModerationObjectResults: - type: object - properties: - flagged: - type: boolean - description: >- - Whether any of the below categories are flagged. - categories: + The query string or array for performing the search. + filters: type: object additionalProperties: - type: boolean + oneOf: + - type: 'null' + - type: boolean + - type: number + - type: string + - type: array + - type: object description: >- - A list of the categories, and whether they are flagged or not. - category_applied_input_types: + Filters based on file attributes to narrow the search results. + max_num_results: + type: integer + description: >- + Maximum number of results to return (1 to 50 inclusive, default 10). + ranking_options: type: object - additionalProperties: - type: array - items: + properties: + ranker: type: string + description: >- + (Optional) Name of the ranking algorithm to use + score_threshold: + type: number + default: 0.0 + description: >- + (Optional) Minimum relevance score threshold for results + additionalProperties: false description: >- - A list of the categories along with the input type(s) that the score applies - to. - category_scores: - type: object - additionalProperties: - type: number + Ranking options for fine-tuning the search results. + rewrite_query: + type: boolean description: >- - A list of the categories along with their scores as predicted by model. - user_message: + Whether to rewrite the natural language query for vector search (default + false) + search_mode: type: string - metadata: + description: >- + The search mode to use - "keyword", "vector", or "hybrid" (default "vector") + additionalProperties: false + required: + - query + title: OpenaiSearchVectorStoreRequest + VectorStoreSearchResponse: + type: object + properties: + file_id: + type: string + description: >- + Unique identifier of the file containing the result + filename: + type: string + description: Name of the file containing the result + score: + type: number + description: Relevance score for this search result + attributes: type: object additionalProperties: oneOf: - - type: 'null' - - type: boolean - - type: number - type: string - - type: array - - type: object - additionalProperties: false - required: - - flagged - - metadata - title: ModerationObjectResults - description: A moderation object. - Message: - oneOf: - - $ref: '#/components/schemas/UserMessage' - - $ref: '#/components/schemas/SystemMessage' - - $ref: '#/components/schemas/ToolResponseMessage' - - $ref: '#/components/schemas/CompletionMessage' - discriminator: - propertyName: role - mapping: - user: '#/components/schemas/UserMessage' - system: '#/components/schemas/SystemMessage' - tool: '#/components/schemas/ToolResponseMessage' - assistant: '#/components/schemas/CompletionMessage' - RunShieldRequest: - type: object - properties: - shield_id: - type: string - description: The identifier of the shield to run. - messages: - type: array - items: - $ref: '#/components/schemas/Message' - description: The messages to run the shield on. - params: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - type: number - - type: string - - type: array - - type: object - description: The parameters of the shield. - additionalProperties: false - required: - - shield_id - - messages - - params - title: RunShieldRequest - RunShieldResponse: - type: object - properties: - violation: - $ref: '#/components/schemas/SafetyViolation' + - type: boolean description: >- - (Optional) Safety violation detected by the shield, if any - additionalProperties: false - title: RunShieldResponse - description: Response from running a safety shield. - SaveSpansToDatasetRequest: - type: object - properties: - attribute_filters: + (Optional) Key-value attributes associated with the file + content: type: array items: - $ref: '#/components/schemas/QueryCondition' + $ref: '#/components/schemas/VectorStoreContent' description: >- - The attribute filters to apply to the spans. - attributes_to_save: + List of content items matching the search query + additionalProperties: false + required: + - file_id + - filename + - score + - content + title: VectorStoreSearchResponse + description: Response from searching a vector store. + VectorStoreSearchResponsePage: + type: object + properties: + object: + type: string + default: vector_store.search_results.page + description: >- + Object type identifier for the search results page + search_query: + type: string + description: >- + The original search query that was executed + data: type: array items: - type: string - description: The attributes to save to the dataset. - dataset_id: - type: string - description: >- - The ID of the dataset to save the spans to. - max_depth: - type: integer - description: The maximum depth of the tree. - additionalProperties: false - required: - - attribute_filters - - attributes_to_save - - dataset_id - title: SaveSpansToDatasetRequest - ScoreRequest: - type: object - properties: - input_rows: - type: array - items: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: The rows to score. - scoring_functions: - type: object - additionalProperties: - oneOf: - - $ref: '#/components/schemas/ScoringFnParams' - - type: 'null' - description: >- - The scoring functions to use for the scoring. - additionalProperties: false - required: - - input_rows - - scoring_functions - title: ScoreRequest - ScoreResponse: - type: object - properties: - results: - type: object - additionalProperties: - $ref: '#/components/schemas/ScoringResult' - description: >- - A map of scoring function name to ScoringResult. - additionalProperties: false - required: - - results - title: ScoreResponse - description: The response from scoring. - ScoreBatchRequest: - type: object - properties: - dataset_id: - type: string - description: The ID of the dataset to score. - scoring_functions: - type: object - additionalProperties: - oneOf: - - $ref: '#/components/schemas/ScoringFnParams' - - type: 'null' - description: >- - The scoring functions to use for the scoring. - save_results_dataset: - type: boolean - description: >- - Whether to save the results to a dataset. - additionalProperties: false - required: - - dataset_id - - scoring_functions - - save_results_dataset - title: ScoreBatchRequest - ScoreBatchResponse: - type: object - properties: - dataset_id: - type: string - description: >- - (Optional) The identifier of the dataset that was scored - results: - type: object - additionalProperties: - $ref: '#/components/schemas/ScoringResult' - description: >- - A map of scoring function name to ScoringResult - additionalProperties: false - required: - - results - title: ScoreBatchResponse - description: >- - Response from batch scoring operations on datasets. - SetDefaultVersionRequest: - type: object - properties: - version: - type: integer - description: The version to set as default. - additionalProperties: false - required: - - version - title: SetDefaultVersionRequest - AlgorithmConfig: - oneOf: - - $ref: '#/components/schemas/LoraFinetuningConfig' - - $ref: '#/components/schemas/QATFinetuningConfig' - discriminator: - propertyName: type - mapping: - LoRA: '#/components/schemas/LoraFinetuningConfig' - QAT: '#/components/schemas/QATFinetuningConfig' - LoraFinetuningConfig: - type: object - properties: - type: - type: string - const: LoRA - default: LoRA - description: Algorithm type identifier, always "LoRA" - lora_attn_modules: - type: array - items: - type: string - description: >- - List of attention module names to apply LoRA to - apply_lora_to_mlp: - type: boolean - description: Whether to apply LoRA to MLP layers - apply_lora_to_output: - type: boolean - description: >- - Whether to apply LoRA to output projection layers - rank: - type: integer - description: >- - Rank of the LoRA adaptation (lower rank = fewer parameters) - alpha: - type: integer - description: >- - LoRA scaling parameter that controls adaptation strength - use_dora: + $ref: '#/components/schemas/VectorStoreSearchResponse' + description: List of search result objects + has_more: type: boolean default: false description: >- - (Optional) Whether to use DoRA (Weight-Decomposed Low-Rank Adaptation) - quantize_base: - type: boolean - default: false + Whether there are more results available beyond this page + next_page: + type: string description: >- - (Optional) Whether to quantize the base model weights + (Optional) Token for retrieving the next page of results additionalProperties: false required: - - type - - lora_attn_modules - - apply_lora_to_mlp - - apply_lora_to_output - - rank - - alpha - title: LoraFinetuningConfig + - object + - search_query + - data + - has_more + title: VectorStoreSearchResponsePage description: >- - Configuration for Low-Rank Adaptation (LoRA) fine-tuning. - QATFinetuningConfig: - type: object - properties: - type: - type: string - const: QAT - default: QAT - description: Algorithm type identifier, always "QAT" - quantizer_name: - type: string - description: >- - Name of the quantization algorithm to use - group_size: - type: integer - description: Size of groups for grouped quantization - additionalProperties: false - required: - - type - - quantizer_name - - group_size - title: QATFinetuningConfig - description: >- - Configuration for Quantization-Aware Training (QAT) fine-tuning. - SupervisedFineTuneRequest: - type: object - properties: - job_uuid: - type: string - description: The UUID of the job to create. - training_config: - $ref: '#/components/schemas/TrainingConfig' - description: The training configuration. - hyperparam_search_config: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: The hyperparam search configuration. - logger_config: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: The logger configuration. - model: - type: string - description: The model to fine-tune. - checkpoint_dir: - type: string - description: The directory to save checkpoint(s) to. - algorithm_config: - $ref: '#/components/schemas/AlgorithmConfig' - description: The algorithm configuration. - additionalProperties: false - required: - - job_uuid - - training_config - - hyperparam_search_config - - logger_config - title: SupervisedFineTuneRequest - SyntheticDataGenerateRequest: - type: object - properties: - dialogs: - type: array - items: - $ref: '#/components/schemas/Message' - description: >- - List of conversation messages to use as input for synthetic data generation - filtering_function: - type: string - enum: - - none - - random - - top_k - - top_p - - top_k_top_p - - sigmoid - description: >- - Type of filtering to apply to generated synthetic data samples - model: - type: string - description: >- - (Optional) The identifier of the model to use. The model must be registered - with Llama Stack and available via the /models endpoint - additionalProperties: false - required: - - dialogs - - filtering_function - title: SyntheticDataGenerateRequest - SyntheticDataGenerationResponse: - type: object - properties: - synthetic_data: - type: array - items: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: >- - List of generated synthetic data samples that passed the filtering criteria - statistics: - type: object - additionalProperties: - oneOf: - - type: 'null' - - type: boolean - - type: number - - type: string - - type: array - - type: object - description: >- - (Optional) Statistical information about the generation process and filtering - results - additionalProperties: false - required: - - synthetic_data - title: SyntheticDataGenerationResponse - description: >- - Response from the synthetic data generation. Batch of (prompt, response, score) - tuples that pass the threshold. - UpdatePromptRequest: - type: object - properties: - prompt: - type: string - description: The updated prompt text content. - version: - type: integer - description: >- - The current version of the prompt being updated. - variables: - type: array - items: - type: string - description: >- - Updated list of variable names that can be used in the prompt template. - set_as_default: - type: boolean - description: >- - Set the new version as the default (default=True). - additionalProperties: false - required: - - prompt - - version - - set_as_default - title: UpdatePromptRequest + Paginated response from searching a vector store. VersionInfo: type: object properties: @@ -14127,29 +9196,85 @@ security: tags: - name: Agents description: >- - Main functionalities provided by this API: + APIs for creating and interacting with agentic systems. - - Create agents with specific instructions and ability to use tools. - - Interactions with agents are grouped into sessions ("threads"), and each interaction - is called a "turn". + ## Responses API - - Agents can be provided with various tools (see the ToolGroups and ToolRuntime - APIs for more details). - - Agents can be provided with various shields (see the Safety API for more details). + The Responses API provides OpenAI-compatible functionality with enhanced capabilities + for dynamic, stateful interactions. - - Agents can also use Memory to retrieve information from knowledge bases. See - the RAG Tool and Vector IO APIs for more details. - x-displayName: >- - Agents API for creating and interacting with agentic systems. - - name: Benchmarks - - name: DatasetIO - - name: Datasets - - name: Eval - x-displayName: >- - Llama Stack Evaluation API for running evaluations on model and agent candidates. + + > **✅ STABLE**: This API is production-ready with backward compatibility guarantees. + Recommended for production applications. + + + ### ✅ Supported Tools + + + The Responses API supports the following tool types: + + + - **`web_search`**: Search the web for current information and real-time data + + - **`file_search`**: Search through uploaded files and vector stores + - Supports dynamic `vector_store_ids` per call + - Compatible with OpenAI file search patterns + - **`function`**: Call custom functions with JSON schema validation + + - **`mcp_tool`**: Model Context Protocol integration + + + ### ✅ Supported Fields & Features + + + **Core Capabilities:** + + - **Dynamic Configuration**: Switch models, vector stores, and tools per request + without pre-configuration + + - **Conversation Branching**: Use `previous_response_id` to branch conversations + and explore different paths + + - **Rich Annotations**: Automatic file citations, URL citations, and container + file citations + + - **Status Tracking**: Monitor tool call execution status and handle failures + gracefully + + + ### 🚧 Work in Progress + + + - Full real-time response streaming support + + - `tool_choice` parameter + + - `max_tool_calls` parameter + + - Built-in tools (code interpreter, containers API) + + - Safety & guardrails + + - `reasoning` capabilities + + - `service_tier` + + - `logprobs` + + - `max_output_tokens` + + - `metadata` handling + + - `instructions` + + - `incomplete_details` + + - `background` + x-displayName: Agents - name: Files + description: '' - name: Inference description: >- This API provides the raw interface to the underlying models. Two kinds of models @@ -14163,37 +9288,45 @@ tags: Llama Stack Inference API for generating completions, chat completions, and embeddings. - name: Inspect + description: '' - name: Models - - name: PostTraining (Coming Soon) + description: '' - name: Prompts + description: '' x-displayName: >- Protocol for prompt management operations. - name: Providers + description: '' x-displayName: >- Providers API for inspecting, listing, and modifying providers and their configurations. - name: Safety + description: '' - name: Scoring + description: '' - name: ScoringFunctions + description: '' - name: Shields + description: '' - name: SyntheticDataGeneration (Coming Soon) + description: '' - name: Telemetry + description: '' - name: ToolGroups + description: '' - name: ToolRuntime + description: '' - name: VectorDBs + description: '' - name: VectorIO + description: '' x-tagGroups: - name: Operations tags: - Agents - - Benchmarks - - DatasetIO - - Datasets - - Eval - Files - Inference - Inspect - Models - - PostTraining (Coming Soon) - Prompts - Providers - Safety diff --git a/docs/supplementary/deprecated/agents-api.md b/docs/supplementary/deprecated/agents-api.md new file mode 100644 index 000000000..ddbf8f871 --- /dev/null +++ b/docs/supplementary/deprecated/agents-api.md @@ -0,0 +1,9 @@ +## Deprecated APIs + +> **⚠️ DEPRECATED**: These APIs are provided for migration reference and will be removed in future versions. Not recommended for new projects. + +### Migration Guidance + +If you are using deprecated versions of the Agents or Responses APIs, please migrate to: + +- **Responses API**: Use the stable v1 Responses API endpoints diff --git a/docs/supplementary/experimental/agents-api.md b/docs/supplementary/experimental/agents-api.md new file mode 100644 index 000000000..9737b6aba --- /dev/null +++ b/docs/supplementary/experimental/agents-api.md @@ -0,0 +1,21 @@ +## Agents API (Experimental) + +> **🧪 EXPERIMENTAL**: This API is in preview and may change based on user feedback. Great for exploring new capabilities and providing feedback to influence the final design. + +Main functionalities provided by this API: + +- Create agents with specific instructions and ability to use tools. +- Interactions with agents are grouped into sessions ("threads"), and each interaction is called a "turn". +- Agents can be provided with various tools (see the ToolGroups and ToolRuntime APIs for more details). +- Agents can be provided with various shields (see the Safety API for more details). +- Agents can also use Memory to retrieve information from knowledge bases. See the RAG Tool and Vector IO APIs for more details. + +### 🧪 Feedback Welcome + +This API is actively being developed. We welcome feedback on: +- API design and usability +- Performance characteristics +- Missing features or capabilities +- Integration patterns + +**Provide Feedback**: [GitHub Discussions](https://github.com/llamastack/llama-stack/discussions) or [GitHub Issues](https://github.com/llamastack/llama-stack/issues) \ No newline at end of file diff --git a/docs/supplementary/stable/agents-api.md b/docs/supplementary/stable/agents-api.md new file mode 100644 index 000000000..e2011f7a7 --- /dev/null +++ b/docs/supplementary/stable/agents-api.md @@ -0,0 +1,40 @@ +## Responses API + +The Responses API provides OpenAI-compatible functionality with enhanced capabilities for dynamic, stateful interactions. + +> **✅ STABLE**: This API is production-ready with backward compatibility guarantees. Recommended for production applications. + +### ✅ Supported Tools + +The Responses API supports the following tool types: + +- **`web_search`**: Search the web for current information and real-time data +- **`file_search`**: Search through uploaded files and vector stores + - Supports dynamic `vector_store_ids` per call + - Compatible with OpenAI file search patterns +- **`function`**: Call custom functions with JSON schema validation +- **`mcp_tool`**: Model Context Protocol integration + +### ✅ Supported Fields & Features + +**Core Capabilities:** +- **Dynamic Configuration**: Switch models, vector stores, and tools per request without pre-configuration +- **Conversation Branching**: Use `previous_response_id` to branch conversations and explore different paths +- **Rich Annotations**: Automatic file citations, URL citations, and container file citations +- **Status Tracking**: Monitor tool call execution status and handle failures gracefully + +### 🚧 Work in Progress + +- Full real-time response streaming support +- `tool_choice` parameter +- `max_tool_calls` parameter +- Built-in tools (code interpreter, containers API) +- Safety & guardrails +- `reasoning` capabilities +- `service_tier` +- `logprobs` +- `max_output_tokens` +- `metadata` handling +- `instructions` +- `incomplete_details` +- `background` \ No newline at end of file diff --git a/llama_stack/apis/agents/agents.py b/llama_stack/apis/agents/agents.py index f732dd1ed..97d80af59 100644 --- a/llama_stack/apis/agents/agents.py +++ b/llama_stack/apis/agents/agents.py @@ -472,20 +472,23 @@ class AgentStepResponse(BaseModel): @runtime_checkable class Agents(Protocol): - """Agents API for creating and interacting with agentic systems. + """Agents - Main functionalities provided by this API: - - Create agents with specific instructions and ability to use tools. - - Interactions with agents are grouped into sessions ("threads"), and each interaction is called a "turn". - - Agents can be provided with various tools (see the ToolGroups and ToolRuntime APIs for more details). - - Agents can be provided with various shields (see the Safety API for more details). - - Agents can also use Memory to retrieve information from knowledge bases. See the RAG Tool and Vector IO APIs for more details. - """ + APIs for creating and interacting with agentic systems.""" @webmethod( - route="/agents", method="POST", descriptive_name="create_agent", deprecated=True, level=LLAMA_STACK_API_V1 + route="/agents", + method="POST", + descriptive_name="create_agent", + deprecated=True, + level=LLAMA_STACK_API_V1, + ) + @webmethod( + route="/agents", + method="POST", + descriptive_name="create_agent", + level=LLAMA_STACK_API_V1ALPHA, ) - @webmethod(route="/agents", method="POST", descriptive_name="create_agent", level=LLAMA_STACK_API_V1ALPHA) async def create_agent( self, agent_config: AgentConfig, @@ -648,8 +651,17 @@ class Agents(Protocol): """ ... - @webmethod(route="/agents/{agent_id}/session/{session_id}", method="GET", deprecated=True, level=LLAMA_STACK_API_V1) - @webmethod(route="/agents/{agent_id}/session/{session_id}", method="GET", level=LLAMA_STACK_API_V1ALPHA) + @webmethod( + route="/agents/{agent_id}/session/{session_id}", + method="GET", + deprecated=True, + level=LLAMA_STACK_API_V1, + ) + @webmethod( + route="/agents/{agent_id}/session/{session_id}", + method="GET", + level=LLAMA_STACK_API_V1ALPHA, + ) async def get_agents_session( self, session_id: str, @@ -666,9 +678,16 @@ class Agents(Protocol): ... @webmethod( - route="/agents/{agent_id}/session/{session_id}", method="DELETE", deprecated=True, level=LLAMA_STACK_API_V1 + route="/agents/{agent_id}/session/{session_id}", + method="DELETE", + deprecated=True, + level=LLAMA_STACK_API_V1, + ) + @webmethod( + route="/agents/{agent_id}/session/{session_id}", + method="DELETE", + level=LLAMA_STACK_API_V1ALPHA, ) - @webmethod(route="/agents/{agent_id}/session/{session_id}", method="DELETE", level=LLAMA_STACK_API_V1ALPHA) async def delete_agents_session( self, session_id: str, @@ -681,7 +700,12 @@ class Agents(Protocol): """ ... - @webmethod(route="/agents/{agent_id}", method="DELETE", deprecated=True, level=LLAMA_STACK_API_V1) + @webmethod( + route="/agents/{agent_id}", + method="DELETE", + deprecated=True, + level=LLAMA_STACK_API_V1, + ) @webmethod(route="/agents/{agent_id}", method="DELETE", level=LLAMA_STACK_API_V1ALPHA) async def delete_agent( self, @@ -704,7 +728,12 @@ class Agents(Protocol): """ ... - @webmethod(route="/agents/{agent_id}", method="GET", deprecated=True, level=LLAMA_STACK_API_V1) + @webmethod( + route="/agents/{agent_id}", + method="GET", + deprecated=True, + level=LLAMA_STACK_API_V1, + ) @webmethod(route="/agents/{agent_id}", method="GET", level=LLAMA_STACK_API_V1ALPHA) async def get_agent(self, agent_id: str) -> Agent: """Describe an agent by its ID. @@ -714,7 +743,12 @@ class Agents(Protocol): """ ... - @webmethod(route="/agents/{agent_id}/sessions", method="GET", deprecated=True, level=LLAMA_STACK_API_V1) + @webmethod( + route="/agents/{agent_id}/sessions", + method="GET", + deprecated=True, + level=LLAMA_STACK_API_V1, + ) @webmethod(route="/agents/{agent_id}/sessions", method="GET", level=LLAMA_STACK_API_V1ALPHA) async def list_agent_sessions( self, @@ -793,7 +827,11 @@ class Agents(Protocol): """ ... - @webmethod(route="/responses/{response_id}/input_items", method="GET", level=LLAMA_STACK_API_V1) + @webmethod( + route="/responses/{response_id}/input_items", + method="GET", + level=LLAMA_STACK_API_V1, + ) async def list_openai_response_input_items( self, response_id: str, diff --git a/llama_stack/apis/datasetio/datasetio.py b/llama_stack/apis/datasetio/datasetio.py index 27e5336bc..5b23c83d6 100644 --- a/llama_stack/apis/datasetio/datasetio.py +++ b/llama_stack/apis/datasetio/datasetio.py @@ -8,7 +8,7 @@ from typing import Any, Protocol, runtime_checkable from llama_stack.apis.common.responses import PaginatedResponse from llama_stack.apis.datasets import Dataset -from llama_stack.apis.version import LLAMA_STACK_API_V1 +from llama_stack.apis.version import LLAMA_STACK_API_V1, LLAMA_STACK_API_V1BETA from llama_stack.schema_utils import webmethod @@ -21,7 +21,8 @@ class DatasetIO(Protocol): # keeping for aligning with inference/safety, but this is not used dataset_store: DatasetStore - @webmethod(route="/datasetio/iterrows/{dataset_id:path}", method="GET", level=LLAMA_STACK_API_V1) + @webmethod(route="/datasetio/iterrows/{dataset_id:path}", method="GET", deprecated=True, level=LLAMA_STACK_API_V1) + @webmethod(route="/datasetio/iterrows/{dataset_id:path}", method="GET", level=LLAMA_STACK_API_V1BETA) async def iterrows( self, dataset_id: str, @@ -45,7 +46,10 @@ class DatasetIO(Protocol): """ ... - @webmethod(route="/datasetio/append-rows/{dataset_id:path}", method="POST", level=LLAMA_STACK_API_V1) + @webmethod( + route="/datasetio/append-rows/{dataset_id:path}", method="POST", deprecated=True, level=LLAMA_STACK_API_V1 + ) + @webmethod(route="/datasetio/append-rows/{dataset_id:path}", method="POST", level=LLAMA_STACK_API_V1BETA) async def append_rows(self, dataset_id: str, rows: list[dict[str, Any]]) -> None: """Append rows to a dataset. diff --git a/llama_stack/apis/datasets/datasets.py b/llama_stack/apis/datasets/datasets.py index be0cbf09a..e46dfb6d4 100644 --- a/llama_stack/apis/datasets/datasets.py +++ b/llama_stack/apis/datasets/datasets.py @@ -10,7 +10,7 @@ from typing import Annotated, Any, Literal, Protocol from pydantic import BaseModel, Field from llama_stack.apis.resource import Resource, ResourceType -from llama_stack.apis.version import LLAMA_STACK_API_V1 +from llama_stack.apis.version import LLAMA_STACK_API_V1, LLAMA_STACK_API_V1BETA from llama_stack.schema_utils import json_schema_type, register_schema, webmethod @@ -146,7 +146,8 @@ class ListDatasetsResponse(BaseModel): class Datasets(Protocol): - @webmethod(route="/datasets", method="POST", level=LLAMA_STACK_API_V1) + @webmethod(route="/datasets", method="POST", deprecated=True, level=LLAMA_STACK_API_V1) + @webmethod(route="/datasets", method="POST", level=LLAMA_STACK_API_V1BETA) async def register_dataset( self, purpose: DatasetPurpose, @@ -215,7 +216,8 @@ class Datasets(Protocol): """ ... - @webmethod(route="/datasets/{dataset_id:path}", method="GET", level=LLAMA_STACK_API_V1) + @webmethod(route="/datasets/{dataset_id:path}", method="GET", deprecated=True, level=LLAMA_STACK_API_V1) + @webmethod(route="/datasets/{dataset_id:path}", method="GET", level=LLAMA_STACK_API_V1BETA) async def get_dataset( self, dataset_id: str, @@ -227,7 +229,8 @@ class Datasets(Protocol): """ ... - @webmethod(route="/datasets", method="GET", level=LLAMA_STACK_API_V1) + @webmethod(route="/datasets", method="GET", deprecated=True, level=LLAMA_STACK_API_V1) + @webmethod(route="/datasets", method="GET", level=LLAMA_STACK_API_V1BETA) async def list_datasets(self) -> ListDatasetsResponse: """List all datasets. @@ -235,7 +238,8 @@ class Datasets(Protocol): """ ... - @webmethod(route="/datasets/{dataset_id:path}", method="DELETE", level=LLAMA_STACK_API_V1) + @webmethod(route="/datasets/{dataset_id:path}", method="DELETE", deprecated=True, level=LLAMA_STACK_API_V1) + @webmethod(route="/datasets/{dataset_id:path}", method="DELETE", level=LLAMA_STACK_API_V1BETA) async def unregister_dataset( self, dataset_id: str, diff --git a/llama_stack/apis/inference/inference.py b/llama_stack/apis/inference/inference.py index c50986813..5525e4597 100644 --- a/llama_stack/apis/inference/inference.py +++ b/llama_stack/apis/inference/inference.py @@ -1008,28 +1008,6 @@ class InferenceProvider(Protocol): model_store: ModelStore | None = None - async def completion( - self, - model_id: str, - content: InterleavedContent, - sampling_params: SamplingParams | None = None, - response_format: ResponseFormat | None = None, - stream: bool | None = False, - logprobs: LogProbConfig | None = None, - ) -> CompletionResponse | AsyncIterator[CompletionResponseStreamChunk]: - """Generate a completion for the given content using the specified model. - - :param model_id: The identifier of the model to use. The model must be registered with Llama Stack and available via the /models endpoint. - :param content: The content to generate a completion for. - :param sampling_params: (Optional) Parameters to control the sampling strategy. - :param response_format: (Optional) Grammar specification for guided (structured) decoding. - :param stream: (Optional) If True, generate an SSE event stream of the response. Defaults to False. - :param logprobs: (Optional) If specified, log probabilities for each token position will be returned. - :returns: If stream=False, returns a CompletionResponse with the full completion. - If stream=True, returns an SSE event stream of CompletionResponseStreamChunk. - """ - ... - async def chat_completion( self, model_id: str, diff --git a/llama_stack/apis/telemetry/telemetry.py b/llama_stack/apis/telemetry/telemetry.py index 29dd23989..0e772da6a 100644 --- a/llama_stack/apis/telemetry/telemetry.py +++ b/llama_stack/apis/telemetry/telemetry.py @@ -16,7 +16,7 @@ from typing import ( from pydantic import BaseModel, Field -from llama_stack.apis.version import LLAMA_STACK_API_V1 +from llama_stack.apis.version import LLAMA_STACK_API_V1, LLAMA_STACK_API_V1ALPHA from llama_stack.models.llama.datatypes import Primitive from llama_stack.schema_utils import json_schema_type, register_schema, webmethod @@ -426,7 +426,14 @@ class Telemetry(Protocol): """ ... - @webmethod(route="/telemetry/traces", method="POST", required_scope=REQUIRED_SCOPE, level=LLAMA_STACK_API_V1) + @webmethod( + route="/telemetry/traces", + method="POST", + required_scope=REQUIRED_SCOPE, + deprecated=True, + level=LLAMA_STACK_API_V1, + ) + @webmethod(route="/telemetry/traces", method="POST", required_scope=REQUIRED_SCOPE, level=LLAMA_STACK_API_V1ALPHA) async def query_traces( self, attribute_filters: list[QueryCondition] | None = None, @@ -445,7 +452,17 @@ class Telemetry(Protocol): ... @webmethod( - route="/telemetry/traces/{trace_id:path}", method="GET", required_scope=REQUIRED_SCOPE, level=LLAMA_STACK_API_V1 + route="/telemetry/traces/{trace_id:path}", + method="GET", + required_scope=REQUIRED_SCOPE, + deprecated=True, + level=LLAMA_STACK_API_V1, + ) + @webmethod( + route="/telemetry/traces/{trace_id:path}", + method="GET", + required_scope=REQUIRED_SCOPE, + level=LLAMA_STACK_API_V1ALPHA, ) async def get_trace(self, trace_id: str) -> Trace: """Get a trace by its ID. @@ -459,8 +476,15 @@ class Telemetry(Protocol): route="/telemetry/traces/{trace_id:path}/spans/{span_id:path}", method="GET", required_scope=REQUIRED_SCOPE, + deprecated=True, level=LLAMA_STACK_API_V1, ) + @webmethod( + route="/telemetry/traces/{trace_id:path}/spans/{span_id:path}", + method="GET", + required_scope=REQUIRED_SCOPE, + level=LLAMA_STACK_API_V1ALPHA, + ) async def get_span(self, trace_id: str, span_id: str) -> Span: """Get a span by its ID. @@ -473,9 +497,16 @@ class Telemetry(Protocol): @webmethod( route="/telemetry/spans/{span_id:path}/tree", method="POST", + deprecated=True, required_scope=REQUIRED_SCOPE, level=LLAMA_STACK_API_V1, ) + @webmethod( + route="/telemetry/spans/{span_id:path}/tree", + method="POST", + required_scope=REQUIRED_SCOPE, + level=LLAMA_STACK_API_V1ALPHA, + ) async def get_span_tree( self, span_id: str, @@ -491,7 +522,14 @@ class Telemetry(Protocol): """ ... - @webmethod(route="/telemetry/spans", method="POST", required_scope=REQUIRED_SCOPE, level=LLAMA_STACK_API_V1) + @webmethod( + route="/telemetry/spans", + method="POST", + required_scope=REQUIRED_SCOPE, + deprecated=True, + level=LLAMA_STACK_API_V1, + ) + @webmethod(route="/telemetry/spans", method="POST", required_scope=REQUIRED_SCOPE, level=LLAMA_STACK_API_V1ALPHA) async def query_spans( self, attribute_filters: list[QueryCondition], @@ -507,7 +545,8 @@ class Telemetry(Protocol): """ ... - @webmethod(route="/telemetry/spans/export", method="POST", level=LLAMA_STACK_API_V1) + @webmethod(route="/telemetry/spans/export", method="POST", deprecated=True, level=LLAMA_STACK_API_V1) + @webmethod(route="/telemetry/spans/export", method="POST", level=LLAMA_STACK_API_V1ALPHA) async def save_spans_to_dataset( self, attribute_filters: list[QueryCondition], @@ -525,7 +564,17 @@ class Telemetry(Protocol): ... @webmethod( - route="/telemetry/metrics/{metric_name}", method="POST", required_scope=REQUIRED_SCOPE, level=LLAMA_STACK_API_V1 + route="/telemetry/metrics/{metric_name}", + method="POST", + required_scope=REQUIRED_SCOPE, + deprecated=True, + level=LLAMA_STACK_API_V1, + ) + @webmethod( + route="/telemetry/metrics/{metric_name}", + method="POST", + required_scope=REQUIRED_SCOPE, + level=LLAMA_STACK_API_V1ALPHA, ) async def query_metrics( self, diff --git a/llama_stack/core/routers/inference.py b/llama_stack/core/routers/inference.py index 80f47fb5d..4b004a82c 100644 --- a/llama_stack/core/routers/inference.py +++ b/llama_stack/core/routers/inference.py @@ -267,47 +267,6 @@ class InferenceRouter(Inference): ) return response - async def completion( - self, - model_id: str, - content: InterleavedContent, - sampling_params: SamplingParams | None = None, - response_format: ResponseFormat | None = None, - stream: bool | None = False, - logprobs: LogProbConfig | None = None, - ) -> AsyncGenerator: - if sampling_params is None: - sampling_params = SamplingParams() - logger.debug( - f"InferenceRouter.completion: {model_id=}, {stream=}, {content=}, {sampling_params=}, {response_format=}", - ) - model = await self._get_model(model_id, ModelType.llm) - provider = await self.routing_table.get_provider_impl(model_id) - params = dict( - model_id=model_id, - content=content, - sampling_params=sampling_params, - response_format=response_format, - stream=stream, - logprobs=logprobs, - ) - - prompt_tokens = await self._count_tokens(content) - response = await provider.completion(**params) - if stream: - return self.stream_tokens_and_compute_metrics( - response=response, - prompt_tokens=prompt_tokens, - model=model, - ) - - metrics = await self.count_tokens_and_compute_metrics( - response=response, prompt_tokens=prompt_tokens, model=model - ) - response.metrics = metrics if response.metrics is None else response.metrics + metrics - - return response - async def openai_completion( self, model: str, diff --git a/llama_stack/log.py b/llama_stack/log.py index cc4c9d4cf..2a11516fa 100644 --- a/llama_stack/log.py +++ b/llama_stack/log.py @@ -247,7 +247,16 @@ def get_logger( _category_levels.update(parse_yaml_config(config)) logger = logging.getLogger(name) - logger.setLevel(_category_levels.get(category, DEFAULT_LOG_LEVEL)) + if category in _category_levels: + log_level = _category_levels[category] + else: + root_category = category.split("::")[0] + if root_category in _category_levels: + log_level = _category_levels[root_category] + else: + log_level = _category_levels.get("root", DEFAULT_LOG_LEVEL) + logging.warning(f"Unknown logging category: {category}. Falling back to default 'root' level: {log_level}") + logger.setLevel(log_level) return logging.LoggerAdapter(logger, {"category": category}) diff --git a/llama_stack/providers/inline/agents/meta_reference/responses/streaming.py b/llama_stack/providers/inline/agents/meta_reference/responses/streaming.py index 4d5b5bda6..7eaf08e13 100644 --- a/llama_stack/providers/inline/agents/meta_reference/responses/streaming.py +++ b/llama_stack/providers/inline/agents/meta_reference/responses/streaming.py @@ -355,8 +355,11 @@ class StreamingResponseOrchestrator: # Emit arguments.done events for completed tool calls (differentiate between MCP and function calls) for tool_call_index in sorted(chat_response_tool_calls.keys()): + tool_call = chat_response_tool_calls[tool_call_index] + # Ensure that arguments, if sent back to the inference provider, are not None + tool_call.function.arguments = tool_call.function.arguments or "{}" tool_call_item_id = tool_call_item_ids[tool_call_index] - final_arguments = chat_response_tool_calls[tool_call_index].function.arguments or "" + final_arguments = tool_call.function.arguments tool_call_name = chat_response_tool_calls[tool_call_index].function.name # Check if this is an MCP tool call diff --git a/llama_stack/providers/inline/inference/meta_reference/inference.py b/llama_stack/providers/inline/inference/meta_reference/inference.py index f9e295014..db022d65d 100644 --- a/llama_stack/providers/inline/inference/meta_reference/inference.py +++ b/llama_stack/providers/inline/inference/meta_reference/inference.py @@ -24,11 +24,7 @@ from llama_stack.apis.inference import ( ChatCompletionResponseEventType, ChatCompletionResponseStreamChunk, CompletionMessage, - CompletionRequest, - CompletionResponse, - CompletionResponseStreamChunk, InferenceProvider, - InterleavedContent, LogProbConfig, Message, ResponseFormat, @@ -59,10 +55,8 @@ from llama_stack.providers.utils.inference.model_registry import ( ) from llama_stack.providers.utils.inference.openai_compat import ( OpenAIChatCompletionToLlamaStackMixin, - OpenAICompletionToLlamaStackMixin, ) from llama_stack.providers.utils.inference.prompt_adapter import ( - augment_content_with_response_format_prompt, chat_completion_request_to_messages, convert_request_to_raw, ) @@ -82,7 +76,6 @@ def llama_builder_fn(config: MetaReferenceInferenceConfig, model_id: str, llama_ class MetaReferenceInferenceImpl( - OpenAICompletionToLlamaStackMixin, OpenAIChatCompletionToLlamaStackMixin, SentenceTransformerEmbeddingMixin, InferenceProvider, @@ -100,6 +93,9 @@ class MetaReferenceInferenceImpl( if self.config.create_distributed_process_group: self.generator.stop() + async def openai_completion(self, *args, **kwargs): + raise NotImplementedError("OpenAI completion not supported by meta reference provider") + async def should_refresh_models(self) -> bool: return False @@ -165,11 +161,6 @@ class MetaReferenceInferenceImpl( self.llama_model = llama_model log.info("Warming up...") - await self.completion( - model_id=model_id, - content="Hello, world!", - sampling_params=SamplingParams(max_tokens=10), - ) await self.chat_completion( model_id=model_id, messages=[UserMessage(content="Hi how are you?")], @@ -185,137 +176,6 @@ class MetaReferenceInferenceImpl( elif request.model != self.model_id: raise RuntimeError(f"Model mismatch: request model: {request.model} != loaded model: {self.model_id}") - async def completion( - self, - model_id: str, - content: InterleavedContent, - sampling_params: SamplingParams | None = None, - response_format: ResponseFormat | None = None, - stream: bool | None = False, - logprobs: LogProbConfig | None = None, - ) -> CompletionResponse | CompletionResponseStreamChunk: - if sampling_params is None: - sampling_params = SamplingParams() - if logprobs: - assert logprobs.top_k == 1, f"Unexpected top_k={logprobs.top_k}" - - content = augment_content_with_response_format_prompt(response_format, content) - request = CompletionRequest( - model=model_id, - content=content, - sampling_params=sampling_params, - response_format=response_format, - stream=stream, - logprobs=logprobs, - ) - self.check_model(request) - request = await convert_request_to_raw(request) - - if request.stream: - return self._stream_completion(request) - else: - results = await self._nonstream_completion([request]) - return results[0] - - async def _stream_completion(self, request: CompletionRequest) -> AsyncGenerator: - tokenizer = self.generator.formatter.tokenizer - - def impl(): - stop_reason = None - - for token_results in self.generator.completion([request]): - token_result = token_results[0] - if token_result.token == tokenizer.eot_id: - stop_reason = StopReason.end_of_turn - text = "" - elif token_result.token == tokenizer.eom_id: - stop_reason = StopReason.end_of_message - text = "" - else: - text = token_result.text - - logprobs = None - if stop_reason is None: - if request.logprobs: - assert len(token_result.logprobs) == 1 - - logprobs = [TokenLogProbs(logprobs_by_token={token_result.text: token_result.logprobs[0]})] - - yield CompletionResponseStreamChunk( - delta=text, - stop_reason=stop_reason, - logprobs=logprobs if request.logprobs else None, - ) - - if stop_reason is None: - yield CompletionResponseStreamChunk( - delta="", - stop_reason=StopReason.out_of_tokens, - ) - - if self.config.create_distributed_process_group: - async with SEMAPHORE: - for x in impl(): - yield x - else: - for x in impl(): - yield x - - async def _nonstream_completion(self, request_batch: list[CompletionRequest]) -> list[CompletionResponse]: - tokenizer = self.generator.formatter.tokenizer - - first_request = request_batch[0] - - class ItemState(BaseModel): - tokens: list[int] = [] - logprobs: list[TokenLogProbs] = [] - stop_reason: StopReason | None = None - finished: bool = False - - def impl(): - states = [ItemState() for _ in request_batch] - - results = [] - for token_results in self.generator.completion(request_batch): - for result in token_results: - idx = result.batch_idx - state = states[idx] - if state.finished or result.ignore_token: - continue - - state.finished = result.finished - if first_request.logprobs: - state.logprobs.append(TokenLogProbs(logprobs_by_token={result.text: result.logprobs[0]})) - - state.tokens.append(result.token) - if result.token == tokenizer.eot_id: - state.stop_reason = StopReason.end_of_turn - elif result.token == tokenizer.eom_id: - state.stop_reason = StopReason.end_of_message - - for state in states: - if state.stop_reason is None: - state.stop_reason = StopReason.out_of_tokens - - if state.tokens[-1] in self.generator.formatter.tokenizer.stop_tokens: - state.tokens = state.tokens[:-1] - content = self.generator.formatter.tokenizer.decode(state.tokens) - results.append( - CompletionResponse( - content=content, - stop_reason=state.stop_reason, - logprobs=state.logprobs if first_request.logprobs else None, - ) - ) - - return results - - if self.config.create_distributed_process_group: - async with SEMAPHORE: - return impl() - else: - return impl() - async def chat_completion( self, model_id: str, diff --git a/llama_stack/providers/inline/inference/meta_reference/model_parallel.py b/llama_stack/providers/inline/inference/meta_reference/model_parallel.py index 9031d36b3..9d0295d65 100644 --- a/llama_stack/providers/inline/inference/meta_reference/model_parallel.py +++ b/llama_stack/providers/inline/inference/meta_reference/model_parallel.py @@ -27,8 +27,6 @@ class ModelRunner: def __call__(self, task: Any): if task[0] == "chat_completion": return self.llama.chat_completion(task[1]) - elif task[0] == "completion": - return self.llama.completion(task[1]) else: raise ValueError(f"Unexpected task type {task[0]}") diff --git a/llama_stack/providers/inline/inference/sentence_transformers/sentence_transformers.py b/llama_stack/providers/inline/inference/sentence_transformers/sentence_transformers.py index 34665b63e..cd682dca6 100644 --- a/llama_stack/providers/inline/inference/sentence_transformers/sentence_transformers.py +++ b/llama_stack/providers/inline/inference/sentence_transformers/sentence_transformers.py @@ -5,9 +5,9 @@ # the root directory of this source tree. from collections.abc import AsyncGenerator +from typing import Any from llama_stack.apis.inference import ( - CompletionResponse, InferenceProvider, LogProbConfig, Message, @@ -18,6 +18,7 @@ from llama_stack.apis.inference import ( ToolDefinition, ToolPromptFormat, ) +from llama_stack.apis.inference.inference import OpenAICompletion from llama_stack.apis.models import ModelType from llama_stack.log import get_logger from llama_stack.providers.datatypes import Model, ModelsProtocolPrivate @@ -26,7 +27,6 @@ from llama_stack.providers.utils.inference.embedding_mixin import ( ) from llama_stack.providers.utils.inference.openai_compat import ( OpenAIChatCompletionToLlamaStackMixin, - OpenAICompletionToLlamaStackMixin, ) from .config import SentenceTransformersInferenceConfig @@ -36,7 +36,6 @@ log = get_logger(name=__name__, category="inference") class SentenceTransformersInferenceImpl( OpenAIChatCompletionToLlamaStackMixin, - OpenAICompletionToLlamaStackMixin, SentenceTransformerEmbeddingMixin, InferenceProvider, ModelsProtocolPrivate, @@ -74,17 +73,6 @@ class SentenceTransformersInferenceImpl( async def unregister_model(self, model_id: str) -> None: pass - async def completion( - self, - model_id: str, - content: str, - sampling_params: SamplingParams | None = None, - response_format: ResponseFormat | None = None, - stream: bool | None = False, - logprobs: LogProbConfig | None = None, - ) -> CompletionResponse | AsyncGenerator: - raise ValueError("Sentence transformers don't support completion") - async def chat_completion( self, model_id: str, @@ -99,3 +87,31 @@ class SentenceTransformersInferenceImpl( tool_config: ToolConfig | None = None, ) -> AsyncGenerator: raise ValueError("Sentence transformers don't support chat completion") + + async def openai_completion( + self, + # Standard OpenAI completion parameters + model: str, + prompt: str | list[str] | list[int] | list[list[int]], + best_of: int | None = None, + echo: bool | None = None, + frequency_penalty: float | None = None, + logit_bias: dict[str, float] | None = None, + logprobs: bool | None = None, + max_tokens: int | None = None, + n: int | None = None, + presence_penalty: float | None = None, + seed: int | None = None, + stop: str | list[str] | None = None, + stream: bool | None = None, + stream_options: dict[str, Any] | None = None, + temperature: float | None = None, + top_p: float | None = None, + user: str | None = None, + # vLLM-specific parameters + guided_choice: list[str] | None = None, + prompt_logprobs: int | None = None, + # for fill-in-the-middle type completion + suffix: str | None = None, + ) -> OpenAICompletion: + raise NotImplementedError("OpenAI completion not supported by sentence transformers provider") diff --git a/llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/llm_as_judge_scoring_fn.py b/llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/llm_as_judge_scoring_fn.py index 340215a53..d60efe828 100644 --- a/llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/llm_as_judge_scoring_fn.py +++ b/llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/llm_as_judge_scoring_fn.py @@ -6,7 +6,7 @@ import re from typing import Any -from llama_stack.apis.inference import Inference, UserMessage +from llama_stack.apis.inference import Inference from llama_stack.apis.scoring import ScoringResultRow from llama_stack.apis.scoring_functions import ScoringFnParams from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn @@ -55,15 +55,16 @@ class LlmAsJudgeScoringFn(RegisteredBaseScoringFn): generated_answer=generated_answer, ) - judge_response = await self.inference_api.chat_completion( - model_id=fn_def.params.judge_model, + judge_response = await self.inference_api.openai_chat_completion( + model=fn_def.params.judge_model, messages=[ - UserMessage( - content=judge_input_msg, - ), + { + "role": "user", + "content": judge_input_msg, + } ], ) - content = judge_response.completion_message.content + content = judge_response.choices[0].message.content rating_regexes = fn_def.params.judge_score_regexes judge_rating = None diff --git a/llama_stack/providers/remote/inference/bedrock/bedrock.py b/llama_stack/providers/remote/inference/bedrock/bedrock.py index 2206aa641..f87a5b5e2 100644 --- a/llama_stack/providers/remote/inference/bedrock/bedrock.py +++ b/llama_stack/providers/remote/inference/bedrock/bedrock.py @@ -6,12 +6,10 @@ import json from collections.abc import AsyncGenerator, AsyncIterator +from typing import Any from botocore.client import BaseClient -from llama_stack.apis.common.content_types import ( - InterleavedContent, -) from llama_stack.apis.inference import ( ChatCompletionRequest, ChatCompletionResponse, @@ -27,6 +25,7 @@ from llama_stack.apis.inference import ( ToolDefinition, ToolPromptFormat, ) +from llama_stack.apis.inference.inference import OpenAICompletion from llama_stack.providers.remote.inference.bedrock.config import BedrockConfig from llama_stack.providers.utils.bedrock.client import create_bedrock_client from llama_stack.providers.utils.inference.model_registry import ( @@ -36,7 +35,6 @@ from llama_stack.providers.utils.inference.openai_compat import ( OpenAIChatCompletionToLlamaStackMixin, OpenAICompatCompletionChoice, OpenAICompatCompletionResponse, - OpenAICompletionToLlamaStackMixin, get_sampling_strategy_options, process_chat_completion_response, process_chat_completion_stream_response, @@ -89,7 +87,6 @@ class BedrockInferenceAdapter( ModelRegistryHelper, Inference, OpenAIChatCompletionToLlamaStackMixin, - OpenAICompletionToLlamaStackMixin, ): def __init__(self, config: BedrockConfig) -> None: ModelRegistryHelper.__init__(self, model_entries=MODEL_ENTRIES) @@ -109,17 +106,6 @@ class BedrockInferenceAdapter( if self._client is not None: self._client.close() - async def completion( - self, - model_id: str, - content: InterleavedContent, - sampling_params: SamplingParams | None = None, - response_format: ResponseFormat | None = None, - stream: bool | None = False, - logprobs: LogProbConfig | None = None, - ) -> AsyncGenerator: - raise NotImplementedError() - async def chat_completion( self, model_id: str, @@ -221,3 +207,31 @@ class BedrockInferenceAdapter( user: str | None = None, ) -> OpenAIEmbeddingsResponse: raise NotImplementedError() + + async def openai_completion( + self, + # Standard OpenAI completion parameters + model: str, + prompt: str | list[str] | list[int] | list[list[int]], + best_of: int | None = None, + echo: bool | None = None, + frequency_penalty: float | None = None, + logit_bias: dict[str, float] | None = None, + logprobs: bool | None = None, + max_tokens: int | None = None, + n: int | None = None, + presence_penalty: float | None = None, + seed: int | None = None, + stop: str | list[str] | None = None, + stream: bool | None = None, + stream_options: dict[str, Any] | None = None, + temperature: float | None = None, + top_p: float | None = None, + user: str | None = None, + # vLLM-specific parameters + guided_choice: list[str] | None = None, + prompt_logprobs: int | None = None, + # for fill-in-the-middle type completion + suffix: str | None = None, + ) -> OpenAICompletion: + raise NotImplementedError("OpenAI completion not supported by the Bedrock provider") diff --git a/llama_stack/providers/remote/inference/cerebras/cerebras.py b/llama_stack/providers/remote/inference/cerebras/cerebras.py index 6be39fa5d..95da71de8 100644 --- a/llama_stack/providers/remote/inference/cerebras/cerebras.py +++ b/llama_stack/providers/remote/inference/cerebras/cerebras.py @@ -9,9 +9,6 @@ from urllib.parse import urljoin from cerebras.cloud.sdk import AsyncCerebras -from llama_stack.apis.common.content_types import ( - InterleavedContent, -) from llama_stack.apis.inference import ( ChatCompletionRequest, CompletionRequest, @@ -35,8 +32,6 @@ from llama_stack.providers.utils.inference.openai_compat import ( get_sampling_options, process_chat_completion_response, process_chat_completion_stream_response, - process_completion_response, - process_completion_stream_response, ) from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin from llama_stack.providers.utils.inference.prompt_adapter import ( @@ -73,48 +68,6 @@ class CerebrasInferenceAdapter( async def shutdown(self) -> None: pass - async def completion( - self, - model_id: str, - content: InterleavedContent, - sampling_params: SamplingParams | None = None, - response_format: ResponseFormat | None = None, - stream: bool | None = False, - logprobs: LogProbConfig | None = None, - ) -> AsyncGenerator: - if sampling_params is None: - sampling_params = SamplingParams() - model = await self.model_store.get_model(model_id) - request = CompletionRequest( - model=model.provider_resource_id, - content=content, - sampling_params=sampling_params, - response_format=response_format, - stream=stream, - logprobs=logprobs, - ) - if stream: - return self._stream_completion( - request, - ) - else: - return await self._nonstream_completion(request) - - async def _nonstream_completion(self, request: CompletionRequest) -> CompletionResponse: - params = await self._get_params(request) - - r = await self._cerebras_client.completions.create(**params) - - return process_completion_response(r) - - async def _stream_completion(self, request: CompletionRequest) -> AsyncGenerator: - params = await self._get_params(request) - - stream = await self._cerebras_client.completions.create(**params) - - async for chunk in process_completion_stream_response(stream): - yield chunk - async def chat_completion( self, model_id: str, diff --git a/llama_stack/providers/remote/inference/databricks/databricks.py b/llama_stack/providers/remote/inference/databricks/databricks.py index d85b477f5..cd5dfb40d 100644 --- a/llama_stack/providers/remote/inference/databricks/databricks.py +++ b/llama_stack/providers/remote/inference/databricks/databricks.py @@ -9,14 +9,9 @@ from typing import Any from databricks.sdk import WorkspaceClient -from llama_stack.apis.common.content_types import ( - InterleavedContent, -) from llama_stack.apis.inference import ( ChatCompletionResponse, ChatCompletionResponseStreamChunk, - CompletionResponse, - CompletionResponseStreamChunk, Inference, LogProbConfig, Message, @@ -63,17 +58,6 @@ class DatabricksInferenceAdapter( async def shutdown(self) -> None: pass - async def completion( - self, - model_id: str, - content: InterleavedContent, - sampling_params: SamplingParams | None = None, - response_format: ResponseFormat | None = None, - stream: bool | None = False, - logprobs: LogProbConfig | None = None, - ) -> CompletionResponse | AsyncIterator[CompletionResponseStreamChunk]: - raise NotImplementedError() - async def openai_completion( self, model: str, diff --git a/llama_stack/providers/remote/inference/fireworks/fireworks.py b/llama_stack/providers/remote/inference/fireworks/fireworks.py index ed4b56fad..dcc9e240b 100644 --- a/llama_stack/providers/remote/inference/fireworks/fireworks.py +++ b/llama_stack/providers/remote/inference/fireworks/fireworks.py @@ -8,14 +8,9 @@ from collections.abc import AsyncGenerator from fireworks.client import Fireworks -from llama_stack.apis.common.content_types import ( - InterleavedContent, -) from llama_stack.apis.inference import ( ChatCompletionRequest, ChatCompletionResponse, - CompletionRequest, - CompletionResponse, Inference, LogProbConfig, Message, @@ -37,13 +32,10 @@ from llama_stack.providers.utils.inference.openai_compat import ( get_sampling_options, process_chat_completion_response, process_chat_completion_stream_response, - process_completion_response, - process_completion_stream_response, ) from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin from llama_stack.providers.utils.inference.prompt_adapter import ( chat_completion_request_to_prompt, - completion_request_to_prompt, request_has_media, ) @@ -94,79 +86,6 @@ class FireworksInferenceAdapter(OpenAIMixin, ModelRegistryHelper, Inference, Nee return prompt[len("<|begin_of_text|>") :] return prompt - async def completion( - self, - model_id: str, - content: InterleavedContent, - sampling_params: SamplingParams | None = None, - response_format: ResponseFormat | None = None, - stream: bool | None = False, - logprobs: LogProbConfig | None = None, - ) -> AsyncGenerator: - if sampling_params is None: - sampling_params = SamplingParams() - model = await self.model_store.get_model(model_id) - request = CompletionRequest( - model=model.provider_resource_id, - content=content, - sampling_params=sampling_params, - response_format=response_format, - stream=stream, - logprobs=logprobs, - ) - if stream: - return self._stream_completion(request) - else: - return await self._nonstream_completion(request) - - async def _nonstream_completion(self, request: CompletionRequest) -> CompletionResponse: - params = await self._get_params(request) - r = await self._get_client().completion.acreate(**params) - return process_completion_response(r) - - async def _stream_completion(self, request: CompletionRequest) -> AsyncGenerator: - params = await self._get_params(request) - - # Wrapper for async generator similar - async def _to_async_generator(): - stream = self._get_client().completion.create(**params) - for chunk in stream: - yield chunk - - stream = _to_async_generator() - async for chunk in process_completion_stream_response(stream): - yield chunk - - def _build_options( - self, - sampling_params: SamplingParams | None, - fmt: ResponseFormat, - logprobs: LogProbConfig | None, - ) -> dict: - options = get_sampling_options(sampling_params) - options.setdefault("max_tokens", 512) - - if fmt: - if fmt.type == ResponseFormatType.json_schema.value: - options["response_format"] = { - "type": "json_object", - "schema": fmt.json_schema, - } - elif fmt.type == ResponseFormatType.grammar.value: - options["response_format"] = { - "type": "grammar", - "grammar": fmt.bnf, - } - else: - raise ValueError(f"Unknown response format {fmt.type}") - - if logprobs and logprobs.top_k: - options["logprobs"] = logprobs.top_k - if options["logprobs"] <= 0 or options["logprobs"] >= 5: - raise ValueError("Required range: 0 < top_k < 5") - - return options - async def chat_completion( self, model_id: str, @@ -222,22 +141,46 @@ class FireworksInferenceAdapter(OpenAIMixin, ModelRegistryHelper, Inference, Nee async for chunk in process_chat_completion_stream_response(stream, request): yield chunk - async def _get_params(self, request: ChatCompletionRequest | CompletionRequest) -> dict: + def _build_options( + self, + sampling_params: SamplingParams | None, + fmt: ResponseFormat | None, + logprobs: LogProbConfig | None, + ) -> dict: + options = get_sampling_options(sampling_params) + options.setdefault("max_tokens", 512) + + if fmt: + if fmt.type == ResponseFormatType.json_schema.value: + options["response_format"] = { + "type": "json_object", + "schema": fmt.json_schema, + } + elif fmt.type == ResponseFormatType.grammar.value: + options["response_format"] = { + "type": "grammar", + "grammar": fmt.bnf, + } + else: + raise ValueError(f"Unknown response format {fmt.type}") + + if logprobs and logprobs.top_k: + options["logprobs"] = logprobs.top_k + if options["logprobs"] <= 0 or options["logprobs"] >= 5: + raise ValueError("Required range: 0 < top_k < 5") + + return options + + async def _get_params(self, request: ChatCompletionRequest) -> dict: input_dict = {} media_present = request_has_media(request) llama_model = self.get_llama_model(request.model) - if isinstance(request, ChatCompletionRequest): - # TODO: tools are never added to the request, so we need to add them here - if media_present or not llama_model: - input_dict["messages"] = [ - await convert_message_to_openai_dict(m, download=True) for m in request.messages - ] - else: - input_dict["prompt"] = await chat_completion_request_to_prompt(request, llama_model) + # TODO: tools are never added to the request, so we need to add them here + if media_present or not llama_model: + input_dict["messages"] = [await convert_message_to_openai_dict(m, download=True) for m in request.messages] else: - assert not media_present, "Fireworks does not support media for Completion requests" - input_dict["prompt"] = await completion_request_to_prompt(request) + input_dict["prompt"] = await chat_completion_request_to_prompt(request, llama_model) # Fireworks always prepends with BOS if "prompt" in input_dict: diff --git a/llama_stack/providers/remote/inference/nvidia/nvidia.py b/llama_stack/providers/remote/inference/nvidia/nvidia.py index a31981adb..8619b6b68 100644 --- a/llama_stack/providers/remote/inference/nvidia/nvidia.py +++ b/llama_stack/providers/remote/inference/nvidia/nvidia.py @@ -9,16 +9,10 @@ from collections.abc import AsyncIterator from openai import NOT_GIVEN, APIConnectionError -from llama_stack.apis.common.content_types import ( - InterleavedContent, -) from llama_stack.apis.inference import ( ChatCompletionRequest, ChatCompletionResponse, ChatCompletionResponseStreamChunk, - CompletionRequest, - CompletionResponse, - CompletionResponseStreamChunk, Inference, LogProbConfig, Message, @@ -37,14 +31,10 @@ from llama_stack.providers.utils.inference.openai_compat import ( convert_openai_chat_completion_stream, ) from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin -from llama_stack.providers.utils.inference.prompt_adapter import content_has_media from . import NVIDIAConfig from .openai_utils import ( convert_chat_completion_request, - convert_completion_request, - convert_openai_completion_choice, - convert_openai_completion_stream, ) from .utils import _is_nvidia_hosted @@ -109,48 +99,6 @@ class NVIDIAInferenceAdapter(OpenAIMixin, Inference): """ return f"{self._config.url}/v1" if self._config.append_api_version else self._config.url - async def completion( - self, - model_id: str, - content: InterleavedContent, - sampling_params: SamplingParams | None = None, - response_format: ResponseFormat | None = None, - stream: bool | None = False, - logprobs: LogProbConfig | None = None, - ) -> CompletionResponse | AsyncIterator[CompletionResponseStreamChunk]: - if sampling_params is None: - sampling_params = SamplingParams() - if content_has_media(content): - raise NotImplementedError("Media is not supported") - - # ToDo: check health of NeMo endpoints and enable this - # removing this health check as NeMo customizer endpoint health check is returning 404 - # await check_health(self._config) # this raises errors - - provider_model_id = await self._get_provider_model_id(model_id) - request = convert_completion_request( - request=CompletionRequest( - model=provider_model_id, - content=content, - sampling_params=sampling_params, - response_format=response_format, - stream=stream, - logprobs=logprobs, - ), - n=1, - ) - - try: - response = await self.client.completions.create(**request) - except APIConnectionError as e: - raise ConnectionError(f"Failed to connect to NVIDIA NIM at {self._config.url}: {e}") from e - - if stream: - return convert_openai_completion_stream(response) - else: - # we pass n=1 to get only one completion - return convert_openai_completion_choice(response.choices[0]) - async def openai_embeddings( self, model: str, diff --git a/llama_stack/providers/remote/inference/ollama/ollama.py b/llama_stack/providers/remote/inference/ollama/ollama.py index 16b104fb5..85ad62f9a 100644 --- a/llama_stack/providers/remote/inference/ollama/ollama.py +++ b/llama_stack/providers/remote/inference/ollama/ollama.py @@ -13,7 +13,6 @@ from ollama import AsyncClient as AsyncOllamaClient from llama_stack.apis.common.content_types import ( ImageContentItem, - InterleavedContent, TextContentItem, ) from llama_stack.apis.common.errors import UnsupportedModelError @@ -21,9 +20,6 @@ from llama_stack.apis.inference import ( ChatCompletionRequest, ChatCompletionResponse, ChatCompletionResponseStreamChunk, - CompletionRequest, - CompletionResponse, - CompletionResponseStreamChunk, GrammarResponseFormat, InferenceProvider, JsonSchemaResponseFormat, @@ -55,13 +51,10 @@ from llama_stack.providers.utils.inference.openai_compat import ( get_sampling_options, process_chat_completion_response, process_chat_completion_stream_response, - process_completion_response, - process_completion_stream_response, ) from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin from llama_stack.providers.utils.inference.prompt_adapter import ( chat_completion_request_to_prompt, - completion_request_to_prompt, convert_image_content_to_url, request_has_media, ) @@ -168,67 +161,6 @@ class OllamaInferenceAdapter( raise ValueError("Model store not set") return await self.model_store.get_model(model_id) - async def completion( - self, - model_id: str, - content: InterleavedContent, - sampling_params: SamplingParams | None = None, - response_format: ResponseFormat | None = None, - stream: bool | None = False, - logprobs: LogProbConfig | None = None, - ) -> CompletionResponse | AsyncGenerator[CompletionResponseStreamChunk, None]: - if sampling_params is None: - sampling_params = SamplingParams() - model = await self._get_model(model_id) - if model.provider_resource_id is None: - raise ValueError(f"Model {model_id} has no provider_resource_id set") - request = CompletionRequest( - model=model.provider_resource_id, - content=content, - sampling_params=sampling_params, - response_format=response_format, - stream=stream, - logprobs=logprobs, - ) - if stream: - return self._stream_completion(request) - else: - return await self._nonstream_completion(request) - - async def _stream_completion( - self, request: CompletionRequest - ) -> AsyncGenerator[CompletionResponseStreamChunk, None]: - params = await self._get_params(request) - - async def _generate_and_convert_to_openai_compat(): - s = await self.ollama_client.generate(**params) - async for chunk in s: - choice = OpenAICompatCompletionChoice( - finish_reason=chunk["done_reason"] if chunk["done"] else None, - text=chunk["response"], - ) - yield OpenAICompatCompletionResponse( - choices=[choice], - ) - - stream = _generate_and_convert_to_openai_compat() - async for chunk in process_completion_stream_response(stream): - yield chunk - - async def _nonstream_completion(self, request: CompletionRequest) -> CompletionResponse: - params = await self._get_params(request) - r = await self.ollama_client.generate(**params) - - choice = OpenAICompatCompletionChoice( - finish_reason=r["done_reason"] if r["done"] else None, - text=r["response"], - ) - response = OpenAICompatCompletionResponse( - choices=[choice], - ) - - return process_completion_response(response) - async def chat_completion( self, model_id: str, @@ -262,7 +194,7 @@ class OllamaInferenceAdapter( else: return await self._nonstream_chat_completion(request) - async def _get_params(self, request: ChatCompletionRequest | CompletionRequest) -> dict: + async def _get_params(self, request: ChatCompletionRequest) -> dict: sampling_options = get_sampling_options(request.sampling_params) # This is needed since the Ollama API expects num_predict to be set # for early truncation instead of max_tokens. @@ -272,21 +204,16 @@ class OllamaInferenceAdapter( input_dict: dict[str, Any] = {} media_present = request_has_media(request) llama_model = self.get_llama_model(request.model) - if isinstance(request, ChatCompletionRequest): - if media_present or not llama_model: - contents = [await convert_message_to_openai_dict_for_ollama(m) for m in request.messages] - # flatten the list of lists - input_dict["messages"] = [item for sublist in contents for item in sublist] - else: - input_dict["raw"] = True - input_dict["prompt"] = await chat_completion_request_to_prompt( - request, - llama_model, - ) + if media_present or not llama_model: + contents = [await convert_message_to_openai_dict_for_ollama(m) for m in request.messages] + # flatten the list of lists + input_dict["messages"] = [item for sublist in contents for item in sublist] else: - assert not media_present, "Ollama does not support media for Completion requests" - input_dict["prompt"] = await completion_request_to_prompt(request) input_dict["raw"] = True + input_dict["prompt"] = await chat_completion_request_to_prompt( + request, + llama_model, + ) if fmt := request.response_format: if isinstance(fmt, JsonSchemaResponseFormat): diff --git a/llama_stack/providers/remote/inference/passthrough/passthrough.py b/llama_stack/providers/remote/inference/passthrough/passthrough.py index ae482b7b0..3ac45e949 100644 --- a/llama_stack/providers/remote/inference/passthrough/passthrough.py +++ b/llama_stack/providers/remote/inference/passthrough/passthrough.py @@ -9,7 +9,6 @@ from typing import Any from llama_stack_client import AsyncLlamaStackClient -from llama_stack.apis.common.content_types import InterleavedContent from llama_stack.apis.inference import ( ChatCompletionResponse, ChatCompletionResponseStreamChunk, @@ -86,37 +85,6 @@ class PassthroughInferenceAdapter(Inference): provider_data=provider_data, ) - async def completion( - self, - model_id: str, - content: InterleavedContent, - sampling_params: SamplingParams | None = None, - response_format: ResponseFormat | None = None, - stream: bool | None = False, - logprobs: LogProbConfig | None = None, - ) -> AsyncGenerator: - if sampling_params is None: - sampling_params = SamplingParams() - client = self._get_client() - model = await self.model_store.get_model(model_id) - - request_params = { - "model_id": model.provider_resource_id, - "content": content, - "sampling_params": sampling_params, - "response_format": response_format, - "stream": stream, - "logprobs": logprobs, - } - - request_params = {key: value for key, value in request_params.items() if value is not None} - - # cast everything to json dict - json_params = self.cast_value_to_json_dict(request_params) - - # only pass through the not None params - return await client.inference.completion(**json_params) - async def chat_completion( self, model_id: str, diff --git a/llama_stack/providers/remote/inference/tgi/tgi.py b/llama_stack/providers/remote/inference/tgi/tgi.py index e1632e4a0..27fc263a6 100644 --- a/llama_stack/providers/remote/inference/tgi/tgi.py +++ b/llama_stack/providers/remote/inference/tgi/tgi.py @@ -10,13 +10,9 @@ from collections.abc import AsyncGenerator from huggingface_hub import AsyncInferenceClient, HfApi from pydantic import SecretStr -from llama_stack.apis.common.content_types import ( - InterleavedContent, -) from llama_stack.apis.inference import ( ChatCompletionRequest, ChatCompletionResponse, - CompletionRequest, Inference, LogProbConfig, Message, @@ -44,13 +40,10 @@ from llama_stack.providers.utils.inference.openai_compat import ( get_sampling_options, process_chat_completion_response, process_chat_completion_stream_response, - process_completion_response, - process_completion_stream_response, ) from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin from llama_stack.providers.utils.inference.prompt_adapter import ( chat_completion_request_to_model_input_info, - completion_request_to_prompt_model_input_info, ) from .config import InferenceAPIImplConfig, InferenceEndpointImplConfig, TGIImplConfig @@ -122,31 +115,6 @@ class _HfAdapter( async def unregister_model(self, model_id: str) -> None: pass - async def completion( - self, - model_id: str, - content: InterleavedContent, - sampling_params: SamplingParams | None = None, - response_format: ResponseFormat | None = None, - stream: bool | None = False, - logprobs: LogProbConfig | None = None, - ) -> AsyncGenerator: - if sampling_params is None: - sampling_params = SamplingParams() - model = await self.model_store.get_model(model_id) - request = CompletionRequest( - model=model.provider_resource_id, - content=content, - sampling_params=sampling_params, - response_format=response_format, - stream=stream, - logprobs=logprobs, - ) - if stream: - return self._stream_completion(request) - else: - return await self._nonstream_completion(request) - def _get_max_new_tokens(self, sampling_params, input_tokens): return min( sampling_params.max_tokens or (self.max_tokens - input_tokens), @@ -180,53 +148,6 @@ class _HfAdapter( return options - async def _get_params_for_completion(self, request: CompletionRequest) -> dict: - prompt, input_tokens = await completion_request_to_prompt_model_input_info(request) - - return dict( - prompt=prompt, - stream=request.stream, - details=True, - max_new_tokens=self._get_max_new_tokens(request.sampling_params, input_tokens), - stop_sequences=["<|eom_id|>", "<|eot_id|>"], - **self._build_options(request.sampling_params, request.response_format), - ) - - async def _stream_completion(self, request: CompletionRequest) -> AsyncGenerator: - params = await self._get_params_for_completion(request) - - async def _generate_and_convert_to_openai_compat(): - s = await self.hf_client.text_generation(**params) - async for chunk in s: - token_result = chunk.token - finish_reason = None - if chunk.details: - finish_reason = chunk.details.finish_reason - - choice = OpenAICompatCompletionChoice(text=token_result.text, finish_reason=finish_reason) - yield OpenAICompatCompletionResponse( - choices=[choice], - ) - - stream = _generate_and_convert_to_openai_compat() - async for chunk in process_completion_stream_response(stream): - yield chunk - - async def _nonstream_completion(self, request: CompletionRequest) -> AsyncGenerator: - params = await self._get_params_for_completion(request) - r = await self.hf_client.text_generation(**params) - - choice = OpenAICompatCompletionChoice( - finish_reason=r.details.finish_reason, - text="".join(t.text for t in r.details.tokens), - ) - - response = OpenAICompatCompletionResponse( - choices=[choice], - ) - - return process_completion_response(response) - async def chat_completion( self, model_id: str, diff --git a/llama_stack/providers/remote/inference/together/together.py b/llama_stack/providers/remote/inference/together/together.py index 083c528bb..0c8363f6a 100644 --- a/llama_stack/providers/remote/inference/together/together.py +++ b/llama_stack/providers/remote/inference/together/together.py @@ -10,13 +10,9 @@ from openai import AsyncOpenAI from together import AsyncTogether from together.constants import BASE_URL -from llama_stack.apis.common.content_types import ( - InterleavedContent, -) from llama_stack.apis.inference import ( ChatCompletionRequest, ChatCompletionResponse, - CompletionRequest, Inference, LogProbConfig, Message, @@ -39,13 +35,10 @@ from llama_stack.providers.utils.inference.openai_compat import ( get_sampling_options, process_chat_completion_response, process_chat_completion_stream_response, - process_completion_response, - process_completion_stream_response, ) from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin from llama_stack.providers.utils.inference.prompt_adapter import ( chat_completion_request_to_prompt, - completion_request_to_prompt, request_has_media, ) @@ -81,31 +74,6 @@ class TogetherInferenceAdapter(OpenAIMixin, ModelRegistryHelper, Inference, Need async def shutdown(self) -> None: pass - async def completion( - self, - model_id: str, - content: InterleavedContent, - sampling_params: SamplingParams | None = None, - response_format: ResponseFormat | None = None, - stream: bool | None = False, - logprobs: LogProbConfig | None = None, - ) -> AsyncGenerator: - if sampling_params is None: - sampling_params = SamplingParams() - model = await self.model_store.get_model(model_id) - request = CompletionRequest( - model=model.provider_resource_id, - content=content, - sampling_params=sampling_params, - response_format=response_format, - stream=stream, - logprobs=logprobs, - ) - if stream: - return self._stream_completion(request) - else: - return await self._nonstream_completion(request) - def _get_client(self) -> AsyncTogether: together_api_key = None config_api_key = self.config.api_key.get_secret_value() if self.config.api_key else None @@ -127,19 +95,6 @@ class TogetherInferenceAdapter(OpenAIMixin, ModelRegistryHelper, Inference, Need api_key=together_client.api_key, ) - async def _nonstream_completion(self, request: CompletionRequest) -> ChatCompletionResponse: - params = await self._get_params(request) - client = self._get_client() - r = await client.completions.create(**params) - return process_completion_response(r) - - async def _stream_completion(self, request: CompletionRequest) -> AsyncGenerator: - params = await self._get_params(request) - client = self._get_client() - stream = await client.completions.create(**params) - async for chunk in process_completion_stream_response(stream): - yield chunk - def _build_options( self, sampling_params: SamplingParams | None, @@ -219,18 +174,14 @@ class TogetherInferenceAdapter(OpenAIMixin, ModelRegistryHelper, Inference, Need async for chunk in process_chat_completion_stream_response(stream, request): yield chunk - async def _get_params(self, request: ChatCompletionRequest | CompletionRequest) -> dict: + async def _get_params(self, request: ChatCompletionRequest) -> dict: input_dict = {} media_present = request_has_media(request) llama_model = self.get_llama_model(request.model) - if isinstance(request, ChatCompletionRequest): - if media_present or not llama_model: - input_dict["messages"] = [await convert_message_to_openai_dict(m) for m in request.messages] - else: - input_dict["prompt"] = await chat_completion_request_to_prompt(request, llama_model) + if media_present or not llama_model: + input_dict["messages"] = [await convert_message_to_openai_dict(m) for m in request.messages] else: - assert not media_present, "Together does not support media for Completion requests" - input_dict["prompt"] = await completion_request_to_prompt(request) + input_dict["prompt"] = await chat_completion_request_to_prompt(request, llama_model) params = { "model": request.model, diff --git a/llama_stack/providers/remote/inference/vllm/vllm.py b/llama_stack/providers/remote/inference/vllm/vllm.py index bef5cbf2c..44b3dc3db 100644 --- a/llama_stack/providers/remote/inference/vllm/vllm.py +++ b/llama_stack/providers/remote/inference/vllm/vllm.py @@ -15,7 +15,6 @@ from openai.types.chat.chat_completion_chunk import ( ) from llama_stack.apis.common.content_types import ( - InterleavedContent, TextDelta, ToolCallDelta, ToolCallParseStatus, @@ -27,9 +26,6 @@ from llama_stack.apis.inference import ( ChatCompletionResponseEventType, ChatCompletionResponseStreamChunk, CompletionMessage, - CompletionRequest, - CompletionResponse, - CompletionResponseStreamChunk, GrammarResponseFormat, Inference, JsonSchemaResponseFormat, @@ -64,14 +60,8 @@ from llama_stack.providers.utils.inference.openai_compat import ( convert_tool_call, get_sampling_options, process_chat_completion_stream_response, - process_completion_response, - process_completion_stream_response, ) from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin -from llama_stack.providers.utils.inference.prompt_adapter import ( - completion_request_to_prompt, - request_has_media, -) from .config import VLLMInferenceAdapterConfig @@ -363,33 +353,6 @@ class VLLMInferenceAdapter(OpenAIMixin, LiteLLMOpenAIMixin, Inference, ModelsPro def get_extra_client_params(self): return {"http_client": httpx.AsyncClient(verify=self.config.tls_verify)} - async def completion( # type: ignore[override] # Return type more specific than base class which is allows for both streaming and non-streaming responses. - self, - model_id: str, - content: InterleavedContent, - sampling_params: SamplingParams | None = None, - response_format: ResponseFormat | None = None, - stream: bool | None = False, - logprobs: LogProbConfig | None = None, - ) -> CompletionResponse | AsyncGenerator[CompletionResponseStreamChunk, None]: - if sampling_params is None: - sampling_params = SamplingParams() - model = await self._get_model(model_id) - if model.provider_resource_id is None: - raise ValueError(f"Model {model_id} has no provider_resource_id set") - request = CompletionRequest( - model=model.provider_resource_id, - content=content, - sampling_params=sampling_params, - response_format=response_format, - stream=stream, - logprobs=logprobs, - ) - if stream: - return self._stream_completion(request) - else: - return await self._nonstream_completion(request) - async def chat_completion( self, model_id: str, @@ -474,24 +437,6 @@ class VLLMInferenceAdapter(OpenAIMixin, LiteLLMOpenAIMixin, Inference, ModelsPro async for chunk in res: yield chunk - async def _nonstream_completion(self, request: CompletionRequest) -> CompletionResponse: - if self.client is None: - raise RuntimeError("Client is not initialized") - params = await self._get_params(request) - r = await self.client.completions.create(**params) - return process_completion_response(r) - - async def _stream_completion( - self, request: CompletionRequest - ) -> AsyncGenerator[CompletionResponseStreamChunk, None]: - if self.client is None: - raise RuntimeError("Client is not initialized") - params = await self._get_params(request) - - stream = await self.client.completions.create(**params) - async for chunk in process_completion_stream_response(stream): - yield chunk - async def register_model(self, model: Model) -> Model: try: model = await self.register_helper.register_model(model) @@ -511,7 +456,7 @@ class VLLMInferenceAdapter(OpenAIMixin, LiteLLMOpenAIMixin, Inference, ModelsPro ) return model - async def _get_params(self, request: ChatCompletionRequest | CompletionRequest) -> dict: + async def _get_params(self, request: ChatCompletionRequest) -> dict: options = get_sampling_options(request.sampling_params) if "max_tokens" not in options: options["max_tokens"] = self.config.max_tokens @@ -521,11 +466,7 @@ class VLLMInferenceAdapter(OpenAIMixin, LiteLLMOpenAIMixin, Inference, ModelsPro if isinstance(request, ChatCompletionRequest) and request.tools: input_dict = {"tools": _convert_to_vllm_tools_in_request(request.tools)} - if isinstance(request, ChatCompletionRequest): - input_dict["messages"] = [await convert_message_to_openai_dict(m, download=True) for m in request.messages] - else: - assert not request_has_media(request), "vLLM does not support media for Completion requests" - input_dict["prompt"] = await completion_request_to_prompt(request) + input_dict["messages"] = [await convert_message_to_openai_dict(m, download=True) for m in request.messages] if fmt := request.response_format: if isinstance(fmt, JsonSchemaResponseFormat): diff --git a/llama_stack/providers/remote/inference/watsonx/watsonx.py b/llama_stack/providers/remote/inference/watsonx/watsonx.py index 00b9acc06..cb9d61102 100644 --- a/llama_stack/providers/remote/inference/watsonx/watsonx.py +++ b/llama_stack/providers/remote/inference/watsonx/watsonx.py @@ -11,7 +11,6 @@ from ibm_watsonx_ai.foundation_models import Model from ibm_watsonx_ai.metanames import GenTextParamsMetaNames as GenParams from openai import AsyncOpenAI -from llama_stack.apis.common.content_types import InterleavedContent from llama_stack.apis.inference import ( ChatCompletionRequest, ChatCompletionResponse, @@ -43,8 +42,6 @@ from llama_stack.providers.utils.inference.openai_compat import ( prepare_openai_completion_params, process_chat_completion_response, process_chat_completion_stream_response, - process_completion_response, - process_completion_stream_response, ) from llama_stack.providers.utils.inference.prompt_adapter import ( chat_completion_request_to_prompt, @@ -87,31 +84,6 @@ class WatsonXInferenceAdapter(Inference, ModelRegistryHelper): async def shutdown(self) -> None: pass - async def completion( - self, - model_id: str, - content: InterleavedContent, - sampling_params: SamplingParams | None = None, - response_format: ResponseFormat | None = None, - stream: bool | None = False, - logprobs: LogProbConfig | None = None, - ) -> AsyncGenerator: - if sampling_params is None: - sampling_params = SamplingParams() - model = await self.model_store.get_model(model_id) - request = CompletionRequest( - model=model.provider_resource_id, - content=content, - sampling_params=sampling_params, - response_format=response_format, - stream=stream, - logprobs=logprobs, - ) - if stream: - return self._stream_completion(request) - else: - return await self._nonstream_completion(request) - def _get_client(self, model_id) -> Model: config_api_key = self._config.api_key.get_secret_value() if self._config.api_key else None config_url = self._config.url @@ -128,40 +100,6 @@ class WatsonXInferenceAdapter(Inference, ModelRegistryHelper): ) return self._openai_client - async def _nonstream_completion(self, request: CompletionRequest) -> ChatCompletionResponse: - params = await self._get_params(request) - r = self._get_client(request.model).generate(**params) - choices = [] - if "results" in r: - for result in r["results"]: - choice = OpenAICompatCompletionChoice( - finish_reason=result["stop_reason"] if result["stop_reason"] else None, - text=result["generated_text"], - ) - choices.append(choice) - response = OpenAICompatCompletionResponse( - choices=choices, - ) - return process_completion_response(response) - - async def _stream_completion(self, request: CompletionRequest) -> AsyncGenerator: - params = await self._get_params(request) - - async def _generate_and_convert_to_openai_compat(): - s = self._get_client(request.model).generate_text_stream(**params) - for chunk in s: - choice = OpenAICompatCompletionChoice( - finish_reason=None, - text=chunk, - ) - yield OpenAICompatCompletionResponse( - choices=[choice], - ) - - stream = _generate_and_convert_to_openai_compat() - async for chunk in process_completion_stream_response(stream): - yield chunk - async def chat_completion( self, model_id: str, diff --git a/llama_stack/providers/utils/inference/litellm_openai_mixin.py b/llama_stack/providers/utils/inference/litellm_openai_mixin.py index 10df664eb..c8d3bddc7 100644 --- a/llama_stack/providers/utils/inference/litellm_openai_mixin.py +++ b/llama_stack/providers/utils/inference/litellm_openai_mixin.py @@ -4,14 +4,11 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. -from collections.abc import AsyncGenerator, AsyncIterator +from collections.abc import AsyncIterator from typing import Any import litellm -from llama_stack.apis.common.content_types import ( - InterleavedContent, -) from llama_stack.apis.inference import ( ChatCompletionRequest, ChatCompletionResponse, @@ -62,7 +59,7 @@ class LiteLLMOpenAIMixin( self, litellm_provider_name: str, api_key_from_config: str | None, - provider_data_api_key_field: str, + provider_data_api_key_field: str | None = None, model_entries: list[ProviderModelEntry] | None = None, openai_compat_api_base: str | None = None, download_images: bool = False, @@ -73,7 +70,7 @@ class LiteLLMOpenAIMixin( :param model_entries: The model entries to register. :param api_key_from_config: The API key to use from the config. - :param provider_data_api_key_field: The field in the provider data that contains the API key. + :param provider_data_api_key_field: The field in the provider data that contains the API key (optional). :param litellm_provider_name: The name of the provider, used for model lookups. :param openai_compat_api_base: The base URL for OpenAI compatibility, or None if not using OpenAI compatibility. :param download_images: Whether to download images and convert to base64 for message conversion. @@ -108,17 +105,6 @@ class LiteLLMOpenAIMixin( else model_id ) - async def completion( - self, - model_id: str, - content: InterleavedContent, - sampling_params: SamplingParams | None = None, - response_format: ResponseFormat | None = None, - stream: bool | None = False, - logprobs: LogProbConfig | None = None, - ) -> AsyncGenerator: - raise NotImplementedError("LiteLLM does not support completion requests") - async def chat_completion( self, model_id: str, diff --git a/llama_stack/providers/utils/inference/model_registry.py b/llama_stack/providers/utils/inference/model_registry.py index 746ebd8f6..4913c2e1f 100644 --- a/llama_stack/providers/utils/inference/model_registry.py +++ b/llama_stack/providers/utils/inference/model_registry.py @@ -63,7 +63,7 @@ class ModelRegistryHelper(ModelsProtocolPrivate): model_entries: list[ProviderModelEntry] | None = None, allowed_models: list[str] | None = None, ): - self.allowed_models = allowed_models + self.allowed_models = allowed_models if allowed_models else [] self.alias_to_provider_id_map = {} self.provider_id_to_llama_model_map = {} diff --git a/llama_stack/providers/utils/inference/openai_compat.py b/llama_stack/providers/utils/inference/openai_compat.py index cdd471d5e..da97d7c79 100644 --- a/llama_stack/providers/utils/inference/openai_compat.py +++ b/llama_stack/providers/utils/inference/openai_compat.py @@ -103,8 +103,6 @@ from llama_stack.apis.inference import ( JsonSchemaResponseFormat, Message, OpenAIChatCompletion, - OpenAICompletion, - OpenAICompletionChoice, OpenAIEmbeddingData, OpenAIMessageParam, OpenAIResponseFormatParam, @@ -1281,76 +1279,6 @@ async def prepare_openai_completion_params(**params): return completion_params -class OpenAICompletionToLlamaStackMixin: - async def openai_completion( - self, - model: str, - prompt: str | list[str] | list[int] | list[list[int]], - best_of: int | None = None, - echo: bool | None = None, - frequency_penalty: float | None = None, - logit_bias: dict[str, float] | None = None, - logprobs: bool | None = None, - max_tokens: int | None = None, - n: int | None = None, - presence_penalty: float | None = None, - seed: int | None = None, - stop: str | list[str] | None = None, - stream: bool | None = None, - stream_options: dict[str, Any] | None = None, - temperature: float | None = None, - top_p: float | None = None, - user: str | None = None, - guided_choice: list[str] | None = None, - prompt_logprobs: int | None = None, - suffix: str | None = None, - ) -> OpenAICompletion: - if stream: - raise ValueError(f"{self.__class__.__name__} doesn't support streaming openai completions") - - # This is a pretty hacky way to do emulate completions - - # basically just de-batches them... - prompts = [prompt] if not isinstance(prompt, list) else prompt - - sampling_params = _convert_openai_sampling_params( - max_tokens=max_tokens, - temperature=temperature, - top_p=top_p, - ) - - choices = [] - # "n" is the number of completions to generate per prompt - n = n or 1 - for _i in range(0, n): - # and we may have multiple prompts, if batching was used - - for prompt in prompts: - result = self.completion( - model_id=model, - content=prompt, - sampling_params=sampling_params, - ) - - index = len(choices) - text = result.content - finish_reason = _convert_stop_reason_to_openai_finish_reason(result.stop_reason) - - choice = OpenAICompletionChoice( - index=index, - text=text, - finish_reason=finish_reason, - ) - choices.append(choice) - - return OpenAICompletion( - id=f"cmpl-{uuid.uuid4()}", - choices=choices, - created=int(time.time()), - model=model, - object="text_completion", - ) - - class OpenAIChatCompletionToLlamaStackMixin: async def openai_chat_completion( self, diff --git a/llama_stack/providers/utils/inference/openai_mixin.py b/llama_stack/providers/utils/inference/openai_mixin.py index 7da97e6b1..becec5fb3 100644 --- a/llama_stack/providers/utils/inference/openai_mixin.py +++ b/llama_stack/providers/utils/inference/openai_mixin.py @@ -24,6 +24,7 @@ from llama_stack.apis.inference import ( OpenAIResponseFormatParam, ) from llama_stack.apis.models import ModelType +from llama_stack.core.request_headers import NeedsRequestProviderData from llama_stack.log import get_logger from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper from llama_stack.providers.utils.inference.openai_compat import prepare_openai_completion_params @@ -32,7 +33,7 @@ from llama_stack.providers.utils.inference.prompt_adapter import localize_image_ logger = get_logger(name=__name__, category="providers::utils") -class OpenAIMixin(ModelRegistryHelper, ABC): +class OpenAIMixin(ModelRegistryHelper, NeedsRequestProviderData, ABC): """ Mixin class that provides OpenAI-specific functionality for inference providers. This class handles direct OpenAI API calls using the AsyncOpenAI client. @@ -69,6 +70,9 @@ class OpenAIMixin(ModelRegistryHelper, ABC): # List of allowed models for this provider, if empty all models allowed allowed_models: list[str] = [] + # Optional field name in provider data to look for API key, which takes precedence + provider_data_api_key_field: str | None = None + @abstractmethod def get_api_key(self) -> str: """ @@ -111,9 +115,28 @@ class OpenAIMixin(ModelRegistryHelper, ABC): Uses the abstract methods get_api_key() and get_base_url() which must be implemented by child classes. + + Users can also provide the API key via the provider data header, which + is used instead of any config API key. """ + + api_key = self.get_api_key() + + if self.provider_data_api_key_field: + provider_data = self.get_request_provider_data() + if provider_data and getattr(provider_data, self.provider_data_api_key_field, None): + api_key = getattr(provider_data, self.provider_data_api_key_field) + + if not api_key: # TODO: let get_api_key return None + raise ValueError( + "API key is not set. Please provide a valid API key in the " + "provider data header, e.g. x-llamastack-provider-data: " + f'{{"{self.provider_data_api_key_field}": ""}}, ' + "or in the provider config." + ) + return AsyncOpenAI( - api_key=self.get_api_key(), + api_key=api_key, base_url=self.get_base_url(), **self.get_extra_client_params(), ) diff --git a/llama_stack/providers/utils/inference/prompt_adapter.py b/llama_stack/providers/utils/inference/prompt_adapter.py index ca6fdaf7e..728bbf8c9 100644 --- a/llama_stack/providers/utils/inference/prompt_adapter.py +++ b/llama_stack/providers/utils/inference/prompt_adapter.py @@ -229,28 +229,6 @@ async def convert_image_content_to_url( return base64.b64encode(content).decode("utf-8") -async def completion_request_to_prompt(request: CompletionRequest) -> str: - content = augment_content_with_response_format_prompt(request.response_format, request.content) - request.content = content - request = await convert_request_to_raw(request) - - formatter = ChatFormat(tokenizer=Tokenizer.get_instance()) - model_input = formatter.encode_content(request.content) - return formatter.tokenizer.decode(model_input.tokens) - - -async def completion_request_to_prompt_model_input_info( - request: CompletionRequest, -) -> tuple[str, int]: - content = augment_content_with_response_format_prompt(request.response_format, request.content) - request.content = content - request = await convert_request_to_raw(request) - - formatter = ChatFormat(tokenizer=Tokenizer.get_instance()) - model_input = formatter.encode_content(request.content) - return (formatter.tokenizer.decode(model_input.tokens), len(model_input.tokens)) - - def augment_content_with_response_format_prompt(response_format, content): if fmt_prompt := response_format_prompt(response_format): if isinstance(content, list): diff --git a/tests/integration/agents/test_openai_responses.py b/tests/integration/agents/test_openai_responses.py index c783cf99b..6648257e6 100644 --- a/tests/integration/agents/test_openai_responses.py +++ b/tests/integration/agents/test_openai_responses.py @@ -264,3 +264,36 @@ def test_function_call_output_response(openai_client, client_with_models, text_m assert ( "sunny" in response2.output[0].content[0].text.lower() or "warm" in response2.output[0].content[0].text.lower() ) + + +def test_function_call_output_response_with_none_arguments(openai_client, client_with_models, text_model_id): + """Test handling of function call outputs in responses when function does not accept arguments.""" + if isinstance(client_with_models, LlamaStackAsLibraryClient): + pytest.skip("OpenAI responses are not supported when testing with library client yet.") + + client = openai_client + + # First create a response that triggers a function call + response = client.responses.create( + model=text_model_id, + input=[ + { + "role": "user", + "content": "what's the current time? You MUST call the `get_current_time` function to find out.", + } + ], + tools=[ + { + "type": "function", + "name": "get_current_time", + "description": "Get the current time", + "parameters": {}, + } + ], + stream=False, + ) + + # Verify we got a function call + assert response.output[0].type == "function_call" + assert response.output[0].arguments == "{}" + _ = response.output[0].call_id diff --git a/tests/integration/recordings/responses/05e3ebc68306.json b/tests/integration/recordings/responses/05e3ebc68306.json index 53b7c8a89..b7d0a6e8e 100644 --- a/tests/integration/recordings/responses/05e3ebc68306.json +++ b/tests/integration/recordings/responses/05e3ebc68306.json @@ -21,7 +21,7 @@ "body": { "__type__": "openai.types.chat.chat_completion.ChatCompletion", "__data__": { - "id": "chatcmpl-618", + "id": "chatcmpl-447", "choices": [ { "finish_reason": "stop", @@ -38,7 +38,7 @@ } } ], - "created": 1759245078, + "created": 1759282456, "model": "llama-guard3:1b", "object": "chat.completion", "service_tier": null, diff --git a/tests/integration/recordings/responses/0b27fd737699.json b/tests/integration/recordings/responses/0b27fd737699.json index e25cde820..76979dd28 100644 --- a/tests/integration/recordings/responses/0b27fd737699.json +++ b/tests/integration/recordings/responses/0b27fd737699.json @@ -20,15 +20,15 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama-guard3:1b", - "created_at": "2025-09-03T17:37:47.461886Z", + "created_at": "2025-09-30T17:37:24.035083658Z", "done": true, "done_reason": "stop", - "total_duration": 338927833, - "load_duration": 100895125, + "total_duration": 2990785181, + "load_duration": 52933018, "prompt_eval_count": 223, - "prompt_eval_duration": 221583042, + "prompt_eval_duration": 2884018743, "eval_count": 2, - "eval_duration": 12341416, + "eval_duration": 53216446, "response": "safe", "thinking": null, "context": null diff --git a/tests/integration/recordings/responses/0b3f2e4754ff.json b/tests/integration/recordings/responses/0b3f2e4754ff.json index 8496deeb0..fdfc30e1f 100644 --- a/tests/integration/recordings/responses/0b3f2e4754ff.json +++ b/tests/integration/recordings/responses/0b3f2e4754ff.json @@ -24,7 +24,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-414", + "id": "chatcmpl-106", "choices": [ { "delta": { @@ -39,7 +39,7 @@ "logprobs": null } ], - "created": 1756921333, + "created": 1759254065, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -50,7 +50,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-414", + "id": "chatcmpl-106", "choices": [ { "delta": { @@ -65,7 +65,7 @@ "logprobs": null } ], - "created": 1756921333, + "created": 1759254066, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -76,7 +76,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-414", + "id": "chatcmpl-106", "choices": [ { "delta": { @@ -91,7 +91,7 @@ "logprobs": null } ], - "created": 1756921333, + "created": 1759254066, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -102,7 +102,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-414", + "id": "chatcmpl-106", "choices": [ { "delta": { @@ -117,7 +117,7 @@ "logprobs": null } ], - "created": 1756921333, + "created": 1759254066, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -128,7 +128,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-414", + "id": "chatcmpl-106", "choices": [ { "delta": { @@ -143,7 +143,7 @@ "logprobs": null } ], - "created": 1756921334, + "created": 1759254066, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -154,7 +154,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-414", + "id": "chatcmpl-106", "choices": [ { "delta": { @@ -169,7 +169,7 @@ "logprobs": null } ], - "created": 1756921334, + "created": 1759254066, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -180,7 +180,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-414", + "id": "chatcmpl-106", "choices": [ { "delta": { @@ -195,7 +195,7 @@ "logprobs": null } ], - "created": 1756921334, + "created": 1759254067, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -206,7 +206,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-414", + "id": "chatcmpl-106", "choices": [ { "delta": { @@ -221,7 +221,7 @@ "logprobs": null } ], - "created": 1756921334, + "created": 1759254067, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, diff --git a/tests/integration/recordings/responses/0ff78129bb3a.json b/tests/integration/recordings/responses/0ff78129bb3a.json deleted file mode 100644 index 3a52c789b..000000000 --- a/tests/integration/recordings/responses/0ff78129bb3a.json +++ /dev/null @@ -1,167 +0,0 @@ -{ - "request": { - "method": "POST", - "url": "http://localhost:11434/api/generate", - "headers": {}, - "body": { - "model": "llama3.2:3b-instruct-fp16", - "raw": true, - "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant. You have access to functions, but you should only use them if they are required.\nYou are an expert in composing functions. You are given a question and a set of possible functions.\nBased on the question, you may or may not need to make one function/tool call to achieve the purpose.\n\nIf you decide to invoke any of the function(s), you MUST put it in the format of [func_name1(params_name1=params_value1, params_name2=params_value2...), func_name2(params)]\nIf you decide to invoke a function, you SHOULD NOT include any other text in the response. besides the function call in the above format.\nFor a boolean parameter, be sure to use `True` or `False` (capitalized) for the value.\n\n\nHere is a list of functions in JSON format that you can invoke.\n\n[\n {\n \"name\": \"greet_everyone\",\n \"description\": \"\",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"url\"],\n \"properties\": {\n \"url\": {\n \"type\": \"string\",\n \"description\": \"\"\n }\n }\n }\n },\n {\n \"name\": \"get_boiling_point\",\n \"description\": \"\nReturns the boiling point of a liquid in Celsius or Fahrenheit.\n\n:param liquid_name: The name of the liquid\n:param celsius: Whether to return the boiling point in Celsius\n:return: The boiling point of the liquid in Celcius or Fahrenheit\n\",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"liquid_name\", \"celsius\"],\n \"properties\": {\n \"liquid_name\": {\n \"type\": \"string\",\n \"description\": \"\"\n },\n \"celsius\": {\n \"type\": \"boolean\",\n \"description\": \"\"\n }\n }\n }\n }\n]\n\nYou can answer general questions or invoke tools when necessary.\nIn addition to tool calls, you should also augment your responses by using the tool outputs.\nYou are a helpful assistant.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nSay hi to the world. Use tools to do so.<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n[greet_everyone(url=\"world\")]<|eot_id|><|start_header_id|>ipython<|end_header_id|>\n\nHello, world!<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", - "options": { - "temperature": 0.0 - }, - "stream": true - }, - "endpoint": "/api/generate", - "model": "llama3.2:3b-instruct-fp16" - }, - "response": { - "body": [ - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-07-29T23:26:18.143606Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "How", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-07-29T23:26:18.186151Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " can", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-07-29T23:26:18.229036Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " I", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-07-29T23:26:18.271516Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " assist", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-07-29T23:26:18.316272Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " you", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-07-29T23:26:18.361005Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " further", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-07-29T23:26:18.404689Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "?", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-07-29T23:26:18.447699Z", - "done": true, - "done_reason": "stop", - "total_duration": 456939083, - "load_duration": 79653292, - "prompt_eval_count": 471, - "prompt_eval_duration": 71724667, - "eval_count": 8, - "eval_duration": 304859000, - "response": "", - "thinking": null, - "context": null - } - } - ], - "is_streaming": true - } -} diff --git a/tests/integration/recordings/responses/173ecb3aab28.json b/tests/integration/recordings/responses/173ecb3aab28.json index 0c29b278b..83f58a36d 100644 --- a/tests/integration/recordings/responses/173ecb3aab28.json +++ b/tests/integration/recordings/responses/173ecb3aab28.json @@ -40,7 +40,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-921", + "id": "chatcmpl-629", "choices": [ { "delta": { @@ -55,7 +55,7 @@ "logprobs": null } ], - "created": 1756920971, + "created": 1759253815, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -66,7 +66,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-921", + "id": "chatcmpl-629", "choices": [ { "delta": { @@ -81,7 +81,7 @@ "logprobs": null } ], - "created": 1756920971, + "created": 1759253815, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -92,7 +92,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-921", + "id": "chatcmpl-629", "choices": [ { "delta": { @@ -107,7 +107,7 @@ "logprobs": null } ], - "created": 1756920971, + "created": 1759253815, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -118,7 +118,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-921", + "id": "chatcmpl-629", "choices": [ { "delta": { @@ -133,7 +133,7 @@ "logprobs": null } ], - "created": 1756920971, + "created": 1759253816, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -144,7 +144,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-921", + "id": "chatcmpl-629", "choices": [ { "delta": { @@ -159,7 +159,7 @@ "logprobs": null } ], - "created": 1756920971, + "created": 1759253816, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -170,7 +170,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-921", + "id": "chatcmpl-629", "choices": [ { "delta": { @@ -185,7 +185,7 @@ "logprobs": null } ], - "created": 1756920971, + "created": 1759253816, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -196,7 +196,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-921", + "id": "chatcmpl-629", "choices": [ { "delta": { @@ -211,7 +211,7 @@ "logprobs": null } ], - "created": 1756920971, + "created": 1759253816, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -222,7 +222,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-921", + "id": "chatcmpl-629", "choices": [ { "delta": { @@ -237,7 +237,7 @@ "logprobs": null } ], - "created": 1756920971, + "created": 1759253816, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, diff --git a/tests/integration/recordings/responses/1a4da7c94fde.json b/tests/integration/recordings/responses/1a4da7c94fde.json index 4b3fb8fb6..ca24f20d2 100644 --- a/tests/integration/recordings/responses/1a4da7c94fde.json +++ b/tests/integration/recordings/responses/1a4da7c94fde.json @@ -21,7 +21,7 @@ "body": { "__type__": "openai.types.chat.chat_completion.ChatCompletion", "__data__": { - "id": "chatcmpl-438", + "id": "chatcmpl-478", "choices": [ { "finish_reason": "stop", @@ -38,7 +38,7 @@ } } ], - "created": 1759245073, + "created": 1759282396, "model": "llama-guard3:1b", "object": "chat.completion", "service_tier": null, diff --git a/tests/integration/recordings/responses/1b8394f90636.json b/tests/integration/recordings/responses/1b8394f90636.json deleted file mode 100644 index 6857c6840..000000000 --- a/tests/integration/recordings/responses/1b8394f90636.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "request": { - "method": "POST", - "url": "http://localhost:11434/api/generate", - "headers": {}, - "body": { - "model": "llama3.2:3b-instruct-fp16", - "prompt": "<|begin_of_text|>Complete the sentence using one word: Roses are red, violets are ", - "raw": true, - "options": { - "temperature": 0.0, - "max_tokens": 50, - "num_predict": 50 - }, - "stream": false - }, - "endpoint": "/api/generate", - "model": "llama3.2:3b-instruct-fp16" - }, - "response": { - "body": { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:13.821929Z", - "done": true, - "done_reason": "stop", - "total_duration": 1907912167, - "load_duration": 90979292, - "prompt_eval_count": 18, - "prompt_eval_duration": 77350291, - "eval_count": 43, - "eval_duration": 1738568334, - "response": " _______.\n\nThe best answer is blue. The traditional nursery rhyme goes like this:\n\nRoses are red,\nViolets are blue,\nSugar is sweet,\nAnd so are you! (Or something similar.)", - "thinking": null, - "context": null - } - }, - "is_streaming": false - } -} diff --git a/tests/integration/recordings/responses/1b92be674e2a.json b/tests/integration/recordings/responses/1b92be674e2a.json deleted file mode 100644 index e5f05bf54..000000000 --- a/tests/integration/recordings/responses/1b92be674e2a.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "request": { - "method": "POST", - "url": "http://localhost:11434/api/generate", - "headers": {}, - "body": { - "model": "llama3.2:3b-instruct-fp16", - "raw": true, - "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWho is the CEO of Meta?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", - "options": { - "temperature": 0.0 - }, - "stream": false - }, - "endpoint": "/api/generate", - "model": "llama3.2:3b-instruct-fp16" - }, - "response": { - "body": { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:39:38.236797Z", - "done": true, - "done_reason": "stop", - "total_duration": 1296281500, - "load_duration": 283393917, - "prompt_eval_count": 23, - "prompt_eval_duration": 75453042, - "eval_count": 24, - "eval_duration": 936860125, - "response": "Mark Zuckerberg is the founder, chairman and CEO of Meta, which he originally founded as Facebook in 2004.", - "thinking": null, - "context": null - } - }, - "is_streaming": false - } -} diff --git a/tests/integration/recordings/responses/211b1562d4e6.json b/tests/integration/recordings/responses/211b1562d4e6.json deleted file mode 100644 index 2d0044e27..000000000 --- a/tests/integration/recordings/responses/211b1562d4e6.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "request": { - "method": "POST", - "url": "http://localhost:11434/api/generate", - "headers": {}, - "body": { - "model": "llama3.2:3b-instruct-fp16", - "raw": true, - "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhich planet do humans live on?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", - "options": { - "temperature": 0.0 - }, - "stream": false - }, - "endpoint": "/api/generate", - "model": "llama3.2:3b-instruct-fp16" - }, - "response": { - "body": { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:17.894986Z", - "done": true, - "done_reason": "stop", - "total_duration": 363397458, - "load_duration": 86692791, - "prompt_eval_count": 23, - "prompt_eval_duration": 68658541, - "eval_count": 6, - "eval_duration": 207389084, - "response": "Humans live on Earth.", - "thinking": null, - "context": null - } - }, - "is_streaming": false - } -} diff --git a/tests/integration/recordings/responses/239e4503608a.json b/tests/integration/recordings/responses/239e4503608a.json new file mode 100644 index 000000000..448197b2c --- /dev/null +++ b/tests/integration/recordings/responses/239e4503608a.json @@ -0,0 +1,806 @@ +{ + "request": { + "method": "POST", + "url": "http://0.0.0.0:11434/v1/v1/embeddings", + "headers": {}, + "body": { + "model": "nomic-embed-text:137m-v1.5-fp16", + "input": [ + "What inspires neural networks?" + ], + "encoding_format": "float" + }, + "endpoint": "/v1/embeddings", + "model": "nomic-embed-text:137m-v1.5-fp16" + }, + "response": { + "body": { + "__type__": "openai.types.create_embedding_response.CreateEmbeddingResponse", + "__data__": { + "data": [ + { + "embedding": [ + -0.0050316164, + 0.07984447, + -0.15915774, + -0.015208397, + 0.06857012, + -0.025208611, + 0.013689548, + 0.01110039, + -0.021925347, + -0.014392589, + -0.0557497, + 0.048096333, + 0.124248095, + 0.05381016, + -0.032023083, + 0.03293363, + -0.07727248, + -0.01613264, + -0.0012452743, + -0.015702942, + -0.067251004, + -0.028757395, + 0.034863908, + -0.0017118178, + 0.0616299, + 0.021848574, + -0.022553956, + -0.033664376, + 0.01553894, + 0.009967761, + 0.08114387, + -0.066336334, + -0.025725907, + 0.0058821645, + -0.072110265, + -0.015364161, + 0.031697143, + -0.015320406, + 0.011826234, + 0.05202543, + -0.008305483, + -0.013734584, + -0.06918373, + -0.016431326, + 0.0070836195, + 0.026307657, + 0.021504063, + -0.053779546, + 0.072037436, + -0.036065537, + 0.016765, + -0.015237846, + -0.023797043, + -0.017345365, + 0.081010945, + 0.017555244, + 0.00849005, + -0.011041562, + 0.021113921, + 0.0012852269, + 0.05733302, + 0.04459211, + -0.006820112, + 0.049741834, + 0.032682, + -0.018714704, + -0.047921024, + 0.05474767, + 0.010007742, + 0.027578747, + 0.01696662, + -0.0005828434, + 0.02848909, + 0.049656194, + 0.029906206, + 0.04397822, + -0.04246628, + 0.01594018, + -0.029281856, + 0.052589595, + 0.086577676, + 0.0042159576, + -0.029517883, + -0.009740598, + 0.043349918, + 0.044087544, + -0.02930377, + 0.0024098633, + -0.030418152, + 0.08221704, + 0.046374217, + 0.008004957, + 0.017713528, + -0.034519937, + -0.034394786, + -0.019209871, + 0.01361772, + -0.0012474392, + -0.06304891, + -0.03015956, + -0.026744615, + -0.04382269, + 0.009914152, + -0.050125472, + 0.030627307, + -0.010395332, + 0.0067255315, + -0.025443034, + 0.015175414, + 0.011367137, + -0.004649633, + 0.0003723871, + -0.010448302, + -0.0021068275, + -0.046118032, + -0.022402227, + 0.01804005, + -0.025681397, + 0.036584888, + 0.080027714, + 0.025778025, + -0.017021077, + 0.00734547, + -0.007449189, + 0.013060171, + 0.07254409, + -0.015623211, + -0.019112717, + -0.010143475, + -0.048559416, + 0.038491815, + -0.0065740654, + -0.0521703, + -0.059264045, + 0.032110944, + 0.061506197, + -0.048721578, + -0.03464822, + 0.013747572, + 0.007892225, + 0.03265148, + -0.037367918, + 0.024855481, + -0.01627199, + -0.01771346, + -0.035029493, + 0.0013889165, + 0.0036677802, + -0.029530859, + 0.03162031, + -0.024760932, + 0.028933072, + 0.017674228, + -0.03722869, + 0.063645, + -0.04195384, + -0.034291398, + -0.042508453, + -0.0026806353, + 0.008954077, + 0.06860229, + -0.0043270513, + 0.031392172, + -0.0052816705, + -0.042464685, + -0.03767891, + 0.037023526, + 0.009309706, + 0.03279453, + 0.06322216, + -0.04550696, + 0.022164896, + -0.03588774, + 0.028416842, + 0.050470043, + -0.0034147543, + 0.0069440254, + -0.016464153, + 0.03128234, + -0.046282057, + 0.017499384, + -0.044354558, + 0.041510575, + 0.044442233, + -0.005217252, + 0.011210587, + -0.01738494, + -0.0050604055, + -0.04739853, + -0.006758368, + 0.010371208, + 0.0031476691, + -0.047869083, + -0.031100815, + -0.049210694, + -0.026688233, + 0.0077580754, + -0.022510948, + 0.054258704, + 0.011458622, + -0.02378493, + -0.012583161, + -0.056452923, + -0.007816392, + -0.038032427, + 0.04502559, + -0.01308419, + 0.043747045, + 0.016204404, + -0.0041383137, + 0.049442504, + 0.0076792636, + -0.0021476683, + -0.021795, + -0.031687617, + 0.025953416, + 0.0012399888, + -0.01656653, + -0.005198368, + 0.023106242, + 0.026499178, + -0.007669003, + 0.04550536, + -0.019885251, + -0.006509397, + -0.028927304, + -0.03770212, + -0.015793309, + 0.009043467, + 0.020382207, + -0.02132457, + -0.04350365, + 0.030105298, + 0.013326256, + 0.05148862, + 0.013384519, + 0.08420081, + 0.012137208, + 0.01429465, + -0.021215776, + 0.019751377, + 0.010666951, + -0.0028496862, + -0.0044943816, + -0.046843883, + -0.0145780165, + 0.0044858507, + -0.052179694, + -0.010133602, + 0.038626175, + 0.018442878, + -0.0016659115, + -0.003639202, + 0.018665677, + 0.053869862, + 0.006519413, + -0.0063330783, + 0.03512428, + -0.0033435219, + -0.050845515, + 0.059054703, + -0.018078795, + 0.012237686, + -0.032968126, + 0.015100413, + -0.054588336, + 0.015835619, + -0.03670951, + -0.012846813, + -0.01836416, + -0.024260957, + 0.059409123, + 0.015367348, + -0.028107207, + 0.009289864, + 0.037938606, + 0.024906129, + 0.02536807, + 0.005617444, + -0.02020537, + -0.067401595, + -0.009159591, + -0.049427476, + -0.04140775, + -0.028121712, + -0.0012032806, + 0.065760456, + -0.009735368, + 0.024084985, + 0.022508778, + 0.017129708, + -0.054647677, + 0.015578886, + 0.017550059, + 0.004188966, + -0.021639245, + 0.08918487, + -0.010681521, + -0.0013267483, + -0.04089318, + 0.004022531, + 0.009869387, + 0.03852075, + 0.012265251, + -0.021414107, + -0.035589736, + -0.041858815, + 0.0010829576, + -0.0052885553, + 0.027289463, + -0.090056516, + 0.013117442, + 0.015796974, + -0.006428205, + -0.010485043, + 0.03804702, + 0.0019676236, + 0.030326132, + 0.06926383, + -0.04581391, + -0.026230657, + -0.05017411, + -0.069891036, + -0.020800032, + -0.0021375767, + 0.03964166, + 0.022971395, + 0.009086531, + -0.0025304465, + -0.015464918, + 0.042726092, + -0.006683121, + -0.008244169, + -0.016234832, + -0.0031603999, + -0.044795815, + -0.035910357, + 0.053608935, + -0.006930592, + 0.04424536, + -0.012017321, + 0.0155857755, + -0.008697974, + -0.067098126, + -0.032931764, + 0.026898768, + 0.0010457109, + -0.041276965, + 0.017719025, + -0.009889669, + -0.048280854, + 0.009008355, + -0.008872175, + -0.01640687, + -0.0051646377, + -0.022281006, + 0.041271873, + 0.06915707, + 0.029213337, + 0.0133835655, + 0.044670742, + 0.0017441317, + 0.013911358, + -0.03592245, + -0.060621563, + 0.018041532, + 0.017789826, + -0.00043342085, + 0.019603321, + 0.012585408, + 0.034794804, + -0.0023819709, + -0.013787601, + 0.05080919, + -0.044285674, + 0.055536143, + -0.08918706, + -0.03900586, + -0.037006263, + 0.003928892, + -0.015029967, + -0.02021197, + 0.033677697, + -0.013563023, + 0.037201263, + 0.019805612, + -0.02354718, + -0.037705727, + 0.025382977, + 0.0061666463, + -0.020041076, + 0.04034747, + -0.07936578, + -0.031228192, + 0.035324488, + -0.054238997, + 0.047006484, + 0.00159503, + 0.07012299, + 0.007637998, + -0.018800775, + -0.053914547, + -0.050283875, + -0.034318645, + 0.008452663, + 0.01237047, + 0.00035791937, + -0.046610557, + 0.042989474, + -0.019692015, + -0.00061614456, + 0.062187936, + 0.04266471, + -0.050016437, + 0.021421405, + -0.024854518, + 0.068603024, + 0.060942996, + -0.014557106, + 0.03239151, + 0.010247157, + 0.015091995, + 0.009245114, + 0.02277781, + 0.027239017, + 0.043091062, + -0.00082639145, + 0.00031364473, + -0.058441285, + -0.018276462, + 0.030178891, + -0.023433916, + -0.013687651, + -0.012881733, + -0.030734714, + 0.03498326, + -0.013399916, + 0.04820285, + 0.013932867, + 0.05571984, + 0.04240612, + -0.0060554333, + 0.0032024565, + -0.042510703, + 0.048483945, + 0.08732585, + 0.0027016816, + 0.0011064744, + -0.09377502, + 0.067491576, + 0.018435383, + 0.012728095, + 0.029038312, + 0.0040321746, + 0.07395845, + 0.0031073147, + 0.028865123, + 0.006154529, + 0.03711985, + 0.03329579, + -0.0040069376, + -0.011551551, + -0.053671077, + 0.010432108, + -0.038892966, + -0.0003408905, + 0.0007365908, + -0.047822062, + 0.053264767, + 0.02096518, + 0.004777782, + 0.0432757, + 0.021553257, + -0.0026501648, + -0.0072480487, + -0.002123129, + 0.061610248, + -0.01611616, + 0.035909727, + 0.058587678, + 0.0145304715, + -0.020112783, + -0.05207282, + -0.08221201, + 0.009016992, + -0.00064655097, + 0.01956686, + 0.018373564, + -0.013966411, + -0.022123411, + -0.0071573188, + 0.033414096, + -0.04946249, + -0.0034403466, + -0.01580445, + -0.026580384, + -0.07122861, + 0.04952695, + 0.036092717, + -0.002789775, + 0.026477033, + 0.03799533, + -0.0452679, + -0.003930312, + 0.018536521, + -0.01201987, + 0.025422221, + -0.066111766, + -0.029471582, + 0.009364392, + -0.04817774, + -0.0008147315, + -0.0148154665, + 0.00984774, + -0.00092833134, + -0.03763107, + -0.020189954, + -0.024074532, + -0.023612108, + 0.015350284, + 0.030945191, + -0.03588645, + -0.021719966, + -0.020571873, + -0.012741516, + 0.039295603, + -0.033746354, + 0.0028816632, + 0.048078135, + -0.0034790456, + 0.04186476, + -0.016505575, + -0.056669652, + -0.0026806216, + 0.04009492, + -0.016062018, + 0.016597595, + -0.015369735, + 0.01423482, + -0.01612097, + 0.05822151, + -0.0043877237, + 0.009242956, + -0.0037488444, + -0.0044891555, + -0.027579125, + -0.025424628, + 0.028450571, + -0.01797597, + -0.06810425, + 0.0168767, + 0.0026893963, + -0.008469021, + 0.012569571, + 0.004442434, + -0.041943144, + -0.019236285, + -0.028779197, + 0.0046836706, + -0.0365118, + 0.018350676, + 0.021902338, + 0.03604989, + -0.006049927, + -0.037667684, + 0.043027684, + -0.01943701, + 0.010076409, + 0.038713254, + 0.07812194, + 0.06597296, + -0.045489065, + 0.0070664356, + 0.0044989125, + -0.011527495, + -0.046050567, + 0.067999, + -0.008593809, + -0.086977795, + -0.052920334, + -0.016987754, + -0.0752132, + 0.029077167, + -0.024781171, + -0.00960023, + 0.0056692883, + -0.039548755, + -0.013300934, + 0.054275468, + -0.03491646, + -0.035587896, + -0.007802609, + -0.028378379, + -0.05615233, + -0.011850314, + -0.017397001, + -0.0525217, + -0.0003308184, + -0.040857855, + -0.021513592, + 0.025556894, + 0.01627368, + 0.055545956, + -0.004418218, + -0.051336065, + 0.0488211, + 0.012719186, + 0.007410796, + -0.0034307821, + 0.0516907, + -0.01817577, + -0.004452086, + -0.0056198505, + -0.015632447, + 0.075757094, + -0.018579062, + 0.035753764, + -0.015519769, + -0.054327093, + 0.01306886, + -0.019790396, + -0.036639318, + 0.07008371, + 0.0061804685, + 0.046798132, + -0.005218823, + -0.064510226, + -0.0127003165, + 0.0017728137, + 0.040912032, + -0.058067385, + 0.059538517, + -0.10029672, + 0.002820211, + -0.07771457, + 0.008914206, + 0.00806939, + 0.03881859, + 0.017941529, + 0.007458678, + 0.0011317434, + -0.050489407, + -0.039054077, + 0.028261676, + 0.04449006, + 0.010117796, + 0.057966575, + 0.08405063, + 0.037630063, + 0.0017458433, + 0.07786049, + 0.012527607, + 0.05369065, + -0.004282323, + -0.044055793, + 0.003343061, + 0.02884031, + -0.057139236, + -0.030217687, + -0.0159622, + -0.04396499, + -0.00034443758, + -0.019190768, + 0.0051302793, + 0.005976632, + -0.05645029, + -0.0011924162, + -0.020180402, + -0.037948944, + -0.008716054, + 0.035000052, + -0.041332114, + 0.0021782147, + -0.0439729, + -0.032859106, + 0.027919779, + 0.008747301, + 0.05736891, + 0.013317791, + 0.0012040264, + -0.0033161226, + 0.018489197, + -0.0026256584, + -0.05727805, + 0.023803348, + -0.012519388, + 0.02669887, + 0.0062565706, + -0.017575208, + -0.04754666, + -0.02628541, + -0.07511388, + 0.008495705, + -0.04325911, + -0.05147621, + 0.05350302, + -0.047565665, + 0.029716888, + -0.017600134, + 0.06251193, + -0.06014906, + 0.06652642, + -0.016948748, + 0.047118686, + -0.022581328, + 0.008118961, + 0.023824824, + -0.028134644, + -0.013040867, + -0.036118224, + -0.043649647, + 0.024044087, + 0.043980736, + 0.09335813, + 0.0065352735, + 0.048652958, + 0.02291362, + -0.031512454, + -0.026838718, + 0.072112754, + 0.029041806, + 0.009871398, + -0.076643795, + 0.017986268, + -0.036420677, + -0.030303614, + 0.02293626, + -0.028474882, + -0.02937154, + 0.01083049, + 0.0067934864, + -0.031213833, + -0.04556768, + -0.0046230564, + -0.0074542915, + -0.021028588, + -0.058362946, + 0.0034970073, + 0.04495744, + -0.008255564, + -0.011092999, + 0.026076281, + 0.016826289, + -0.026028905, + -0.0025076317, + 0.017507493, + 0.015523931, + 0.04691712, + 0.011547796, + -0.038370498, + 0.029770205, + -0.017786123, + -0.006200203, + 0.013117157, + 0.027439341, + 0.017241932, + -0.063327014, + 0.075111434, + 0.10742071, + -0.00892997, + 0.042728376, + -0.0031351764, + 0.06845063, + -0.009078234, + -0.030184548, + 0.04281056, + -0.037315223, + 0.012807935 + ], + "index": 0, + "object": "embedding" + } + ], + "model": "nomic-embed-text:137m-v1.5-fp16", + "object": "list", + "usage": { + "prompt_tokens": 6, + "total_tokens": 6 + } + } + }, + "is_streaming": false + } +} diff --git a/tests/integration/recordings/responses/27ef1a50dc19.json b/tests/integration/recordings/responses/27ef1a50dc19.json new file mode 100644 index 000000000..10c625e24 --- /dev/null +++ b/tests/integration/recordings/responses/27ef1a50dc19.json @@ -0,0 +1,806 @@ +{ + "request": { + "method": "POST", + "url": "http://0.0.0.0:11434/v1/v1/embeddings", + "headers": {}, + "body": { + "model": "nomic-embed-text:137m-v1.5-fp16", + "input": [ + "Python programming language" + ], + "encoding_format": "float" + }, + "endpoint": "/v1/embeddings", + "model": "nomic-embed-text:137m-v1.5-fp16" + }, + "response": { + "body": { + "__type__": "openai.types.create_embedding_response.CreateEmbeddingResponse", + "__data__": { + "data": [ + { + "embedding": [ + -0.012737296, + 0.052157503, + -0.09865639, + -0.05476475, + 0.05301662, + 0.0074160905, + -0.06798324, + -0.0033211287, + -0.016955739, + -0.066146754, + -0.00029801717, + 0.044583604, + 0.04537025, + -0.044383764, + 0.0023149354, + -0.09608677, + 0.025675122, + -0.0704009, + -0.03931903, + 0.06766093, + 0.017914528, + -0.040849652, + 0.026488103, + -0.015297751, + 0.11874497, + 0.020230753, + 0.0105890855, + -0.0036319923, + -0.0075948774, + 0.016645674, + -0.045041427, + 0.004138968, + 0.0004353597, + -0.02476739, + -0.044161372, + -0.06683856, + 0.06450044, + -0.018002711, + 0.038697395, + 0.015279114, + -0.043509968, + 0.009773898, + 0.060179695, + -0.007329619, + 0.07848926, + -0.06192075, + 0.004529198, + -0.014174553, + -0.03300747, + 0.021683672, + -0.020385684, + -0.035768215, + -0.043068312, + -0.013654137, + 0.07617396, + 0.038741313, + 0.006725823, + 0.011636873, + 0.015038775, + -0.06120382, + 0.07566976, + 0.082728565, + -0.08939894, + 0.04476117, + 0.05678162, + -0.011741467, + 0.0026016668, + 0.03271547, + -0.023847334, + 0.014053751, + 0.030476196, + -0.06255138, + 0.04260044, + -0.0026815364, + -0.0260585, + -0.007336162, + -0.020206766, + -0.04938916, + 0.017385937, + 0.06006105, + -0.013208199, + 0.016350197, + -0.0109011745, + 0.028250203, + 0.04128484, + -0.06976558, + -0.042334184, + -0.0020309563, + -0.051363576, + 0.020697631, + -0.06012748, + -0.0064777704, + -0.02580574, + 0.004771875, + -0.064917386, + 0.02215894, + -0.054416675, + 0.026068965, + 0.04200019, + -0.024564879, + 0.0077957124, + -0.015894597, + 0.060694925, + -0.048398413, + 0.03545728, + 0.043259352, + 0.04367656, + -0.035536934, + -0.058171894, + -0.0115244435, + -0.006172969, + 0.045124453, + -0.027776113, + -0.022800889, + -0.045794144, + 0.0015683161, + 0.02532558, + -0.0408559, + 0.06885377, + 0.053380273, + -0.002310288, + -0.048188288, + 0.040053353, + 0.048873883, + -0.018484699, + 0.024138113, + -0.06406123, + 0.028043946, + 0.013406045, + -0.03121256, + 0.04827139, + -0.022590872, + -0.044979047, + -0.009155806, + -0.0345572, + 0.040470112, + -0.053579397, + -0.014609841, + 0.09309223, + -0.022341968, + 0.022824768, + 0.027127359, + -0.023630599, + -0.014862734, + 0.019149441, + -0.022489576, + 0.037146494, + 0.026537362, + -0.013998867, + 0.023908654, + 0.019494286, + 0.035421006, + 0.010681667, + 0.04866381, + -0.00028648498, + 0.0076756324, + 0.01770439, + 0.004861778, + 0.0675088, + -0.02110296, + 0.07012984, + 0.011100984, + -0.015785491, + 0.029732592, + -0.042797945, + -0.028424682, + 0.024825025, + 0.012830561, + -0.031163441, + 0.0010846684, + -0.04394154, + -0.06074506, + -0.0068602944, + -0.02000956, + 0.017218532, + 0.016892785, + -0.016099539, + -0.011027052, + 0.04092132, + -0.013812635, + -0.0171445, + -0.05161461, + 0.043900732, + 0.054356292, + -0.06110619, + 0.010437808, + -0.010695358, + -0.038556177, + -0.022182107, + -0.013702171, + -0.02606656, + 0.0417685, + -0.03564253, + -0.065730296, + -0.048234634, + -0.031294968, + 0.018793715, + 0.0028812673, + 0.059523605, + -0.07834006, + -0.041890293, + -0.007903964, + -0.05529348, + -0.010216022, + -0.05732938, + -0.008337224, + -0.004084479, + 0.0032915517, + -0.04187034, + 0.01608275, + 0.06422492, + 0.018843329, + -0.023873901, + 0.061657883, + 0.0042031026, + -0.035615478, + -0.0233748, + -0.01701599, + 0.011956012, + 0.034292623, + 0.056101177, + 0.00090226205, + 0.0053342264, + 0.0020548122, + 0.01625327, + 0.028918983, + -0.066553414, + 0.017591959, + -0.055340543, + 0.014200978, + 0.0043894285, + -0.046320267, + 0.009632542, + 0.026329784, + 0.037263606, + 0.060245816, + 0.047682427, + 0.044949647, + -0.010772139, + -0.041810554, + -0.031361483, + 0.0073113176, + -0.030563952, + 0.04529861, + -0.009128403, + -0.0051679183, + -0.004846899, + -0.009234518, + -0.017252633, + 0.039498128, + -0.019625667, + -0.0402034, + -0.005365279, + 0.06279761, + 0.027031269, + 0.02773575, + 0.032350197, + 0.00057488075, + 0.06752743, + -0.017945373, + 0.03612706, + -0.038697086, + -0.029901898, + -0.0113743795, + -0.020817084, + -0.0028207486, + -0.0037516905, + 0.016709562, + 0.0070552756, + -0.025101524, + 0.013061921, + -0.0097264135, + 0.023312164, + -0.030784104, + -0.0029193545, + -0.02444496, + 0.027738145, + -0.047183525, + -0.0056739203, + 0.009817768, + 0.028266534, + -0.06388905, + -0.019374298, + 0.04362763, + -0.0057525537, + 0.010138786, + 0.025025772, + 0.0056975563, + -0.013095728, + -0.010737826, + 0.05379437, + 0.0035773406, + -0.033730775, + -0.022392886, + -0.024516208, + 0.03529997, + 0.04245314, + 0.029541131, + 0.044283565, + -0.010923522, + -0.015672298, + 0.031540904, + 0.049757652, + 0.0134175075, + 0.026056338, + -0.045238763, + 0.036880285, + 0.019401666, + -0.01225724, + -0.011385536, + -0.039677687, + 0.012001496, + -0.018710397, + 0.051085025, + -0.07968707, + 0.044598807, + 0.020966908, + 0.024486324, + 0.030820722, + -0.035817347, + -0.005985216, + -0.077220775, + 0.060087338, + -0.018667521, + 0.00042907865, + 0.04296211, + 0.010683234, + 0.03383496, + -0.000113617025, + -0.034164984, + -0.012604936, + 0.013022496, + 0.024046391, + -0.021777937, + -0.043731887, + 0.0033063248, + 0.0032457314, + -0.013931376, + 0.0023861264, + 0.0075240964, + 0.007015829, + -0.05085907, + 0.042630788, + -0.02087415, + -0.007658267, + 0.013132027, + 0.041472685, + -0.040956587, + 0.05658287, + 0.04250153, + 0.0021518448, + 0.044045568, + -0.040921584, + 0.007132343, + -0.00048801105, + -0.036380254, + 0.047273647, + -0.004309134, + -0.013429063, + -0.00019902465, + -0.0004708195, + -0.029873386, + 0.027239243, + -0.03529831, + -0.023228176, + 0.024661895, + 0.05063533, + -0.028260268, + 0.01129846, + -0.0045312783, + -0.031872246, + -0.046879377, + -0.007871232, + 0.004367725, + -0.017214479, + -0.015753403, + -0.078615755, + -0.014234739, + -0.025533726, + 0.029994033, + 0.006888315, + -0.042100083, + -0.0016963482, + 0.021459604, + -0.01591483, + -0.07365999, + -0.010291573, + 0.0047568013, + 0.03292463, + 0.043200362, + 0.014325783, + -0.048490327, + -0.024439182, + 0.033686552, + 0.029715305, + -0.010423145, + 0.013148504, + 0.0008267967, + -0.027305948, + -0.0060520596, + -0.0779034, + -0.06871077, + 0.03765654, + -0.023108464, + -0.027462585, + 0.022435384, + -0.010619645, + -0.019606477, + 0.02848785, + -0.009619229, + -0.007973983, + -0.0029784956, + 0.009451803, + -0.019557634, + -0.021816052, + 0.028761018, + 0.027324788, + 0.031654317, + -0.058149435, + 0.017170029, + 0.034972027, + 0.027760118, + -0.010306612, + 0.012620151, + 0.008334629, + 0.012273061, + 0.029800836, + 0.058904618, + 0.018408349, + -0.054807078, + 0.0006477238, + 0.022915987, + 0.03338144, + 0.03668132, + -0.0071606343, + -0.0016230526, + 0.022836274, + 0.01099753, + -0.015486893, + 0.046064902, + 0.03652358, + -0.021730995, + -0.04240822, + 0.007839006, + 0.010131339, + 0.071891285, + 0.08595036, + -0.036551163, + -0.036580227, + 0.027753903, + 0.013721581, + 0.015000481, + 0.009816424, + 0.033280663, + 0.06401278, + 0.034881614, + -0.010603335, + 0.02859825, + -0.02816573, + 0.07249696, + 0.005746021, + -0.026890617, + -0.05659028, + -0.007152308, + -0.024288459, + -0.018561136, + -0.013725504, + -0.030577758, + 0.005742889, + 0.0024392854, + -0.0399384, + 0.020328993, + 0.039503425, + -0.042268254, + -0.022119028, + -0.034113314, + -0.030274384, + 0.011519863, + 0.050782666, + 0.004041363, + -0.023739118, + -0.0027546436, + -0.058498923, + -0.005471496, + -0.0053262375, + 0.037513364, + -0.004591814, + 0.021252032, + -0.001629569, + -0.04622212, + 0.047883164, + 0.03736839, + 0.08020275, + 0.00542343, + -0.03817893, + -0.009962559, + -0.040674374, + 0.09175239, + 0.1028728, + 0.028166553, + 0.04177519, + 0.019556358, + -0.044252433, + -0.015929267, + 0.042483907, + -0.031323276, + 0.068415634, + -0.008449004, + -0.035050288, + 0.037856326, + 0.055856578, + 0.00058986177, + 0.032994922, + 0.018346844, + 0.038019393, + -0.03150018, + 0.009805387, + -0.03539326, + -0.09154862, + 0.009951651, + 0.0144051695, + -0.041230854, + -0.010663703, + -0.023963679, + -0.029891582, + 0.03757397, + 0.031183342, + -0.01945111, + -0.016845128, + -0.023847176, + 0.047975387, + -0.023667773, + -0.04123289, + -0.020595824, + -0.048070088, + -0.062379338, + -0.049796887, + 0.038511876, + 0.010982749, + -0.004460679, + 0.07803074, + 0.02439175, + 0.02101776, + -0.0038604757, + 0.05022388, + 0.011080523, + -0.02685521, + -0.009115208, + -0.005774415, + -0.05743546, + 0.07516603, + -0.040346682, + 0.0063808565, + -0.02058147, + 0.010124437, + -0.029869549, + -0.005972344, + -0.025552256, + 0.0043650023, + -0.043274693, + -0.035563324, + 0.008438223, + 0.00926376, + 0.010181649, + 0.0063408106, + 0.030337317, + -0.018971639, + -0.03495948, + -0.018965906, + 0.03824476, + -0.037335593, + -0.035132956, + -0.0004800879, + 0.0031907824, + 0.005043757, + 0.010878841, + 0.02765467, + -0.03625543, + -0.056799237, + -0.010009897, + 0.07060158, + -0.031162763, + -0.018445587, + 0.036646154, + -0.025019318, + -0.0059613483, + 0.012737257, + 0.004886132, + -0.03758108, + -0.012071592, + -0.014093439, + 0.011282327, + -0.017012196, + 0.020709567, + -0.010598827, + 0.024100173, + -0.066286445, + -0.020624982, + -0.019746993, + -0.04389995, + -0.000542952, + -0.00042189853, + 0.047723014, + -0.015338273, + -0.0014234964, + 0.08354232, + -0.0323755, + 0.056150857, + -0.017370827, + -0.019247927, + 0.036820125, + 0.019029636, + -0.0148101, + 0.033162937, + 0.030420834, + -0.06173969, + 0.045244128, + 0.010388652, + 0.014610128, + -0.024237249, + -0.005471384, + -0.05329097, + 0.03361388, + -0.022210777, + 0.042801995, + 0.021740006, + -0.04432001, + 0.020300837, + 0.040372755, + 0.071037516, + 0.0064171883, + -0.003981306, + -0.048869807, + 0.0020238254, + -0.009861756, + 0.006638257, + -0.033705212, + 0.0005100761, + 0.03717974, + 0.065557785, + 0.047391072, + -0.03947765, + 0.0040267883, + -0.008363395, + 0.0065301796, + -0.011944791, + 0.033006497, + 0.07639093, + -0.0033113193, + -0.05430868, + 0.07391257, + 0.064527504, + -0.002406421, + 0.0062794937, + 0.011258814, + 0.014174505, + 0.051364396, + -0.049812824, + -0.063861094, + 0.008121674, + -0.014099882, + -0.03951206, + -0.03534859, + 0.031739417, + 0.068740524, + 0.057014074, + 0.0065806364, + 0.0014213074, + -0.054351427, + -0.0045105484, + -0.007082805, + 0.016566794, + -0.01276022, + -0.030325878, + 0.020703789, + 0.05879084, + 0.018262943, + -0.024337808, + -0.056616426, + -0.018280823, + 0.016159344, + -0.026617214, + -0.032240644, + -0.01484388, + 0.039500516, + -0.045082357, + 0.054483585, + -0.018476259, + -0.022805728, + -0.06581501, + -0.02136263, + -0.02278495, + 0.0022921907, + -0.055788554, + 0.043488245, + -0.017217342, + -0.019207379, + -0.03229883, + 0.014165345, + 0.07650592, + 0.0145935565, + 0.023521688, + 0.011726674, + 0.051898655, + -0.06092941, + 0.0049421154, + 0.017239925, + 0.029926429, + -0.011885315, + -0.053228807, + -0.022613214, + 0.021623421, + 0.048251476, + 0.06570422, + 0.035834767, + 0.032429963, + -0.05052382, + -0.046073183, + -0.04484784, + 0.01433757, + 0.072260626, + -0.010861808, + -0.023238782, + 0.015412952, + -0.0336904, + -0.0018390296, + -0.003844745, + -0.06879578, + 0.0040851673, + -0.0033650463, + 0.020701468, + 0.022823572, + -0.055186763, + 0.030715447, + -0.0077931485, + 0.057467323, + -0.031872775, + -0.04632591, + -0.058218405, + 0.0021320789, + 0.011682204, + 0.05363371, + -0.0022989055, + 0.05224489, + 0.008273623, + -0.024590664, + -0.015599656, + 0.0622297, + 0.05610885, + -0.03643005, + -0.029709268, + -0.008453385, + -0.047318127, + 0.093379706, + -0.019986182, + -0.013489889, + -0.032653943, + 0.0735651, + 0.052270554, + 0.0009286598, + 0.01696985, + -0.012898181, + -0.012480467, + -0.028892197, + -0.03233334, + -0.00919493, + -0.0477996, + -0.017610596 + ], + "index": 0, + "object": "embedding" + } + ], + "model": "nomic-embed-text:137m-v1.5-fp16", + "object": "list", + "usage": { + "prompt_tokens": 3, + "total_tokens": 3 + } + } + }, + "is_streaming": false + } +} diff --git a/tests/integration/recordings/responses/2afe3b38ca01.json b/tests/integration/recordings/responses/2afe3b38ca01.json index 270d2744c..a1cb871ff 100644 --- a/tests/integration/recordings/responses/2afe3b38ca01.json +++ b/tests/integration/recordings/responses/2afe3b38ca01.json @@ -22,7 +22,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:50.436472Z", + "created_at": "2025-10-01T01:34:06.144961341Z", "done": false, "done_reason": null, "total_duration": null, @@ -40,7 +40,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:50.478138Z", + "created_at": "2025-10-01T01:34:06.3373667Z", "done": false, "done_reason": null, "total_duration": null, @@ -58,7 +58,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:50.519952Z", + "created_at": "2025-10-01T01:34:06.532942727Z", "done": false, "done_reason": null, "total_duration": null, @@ -76,7 +76,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:50.561433Z", + "created_at": "2025-10-01T01:34:06.728352251Z", "done": false, "done_reason": null, "total_duration": null, @@ -94,7 +94,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:50.603624Z", + "created_at": "2025-10-01T01:34:06.924985367Z", "done": false, "done_reason": null, "total_duration": null, @@ -112,7 +112,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:50.645851Z", + "created_at": "2025-10-01T01:34:07.121349528Z", "done": false, "done_reason": null, "total_duration": null, @@ -130,7 +130,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:50.688403Z", + "created_at": "2025-10-01T01:34:07.318123626Z", "done": false, "done_reason": null, "total_duration": null, @@ -148,7 +148,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:50.72991Z", + "created_at": "2025-10-01T01:34:07.51621183Z", "done": false, "done_reason": null, "total_duration": null, @@ -166,7 +166,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:50.771635Z", + "created_at": "2025-10-01T01:34:07.715339999Z", "done": false, "done_reason": null, "total_duration": null, @@ -184,7 +184,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:50.813711Z", + "created_at": "2025-10-01T01:34:07.911837801Z", "done": false, "done_reason": null, "total_duration": null, @@ -202,7 +202,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:50.856201Z", + "created_at": "2025-10-01T01:34:08.111752821Z", "done": false, "done_reason": null, "total_duration": null, @@ -220,7 +220,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:50.899048Z", + "created_at": "2025-10-01T01:34:08.31294106Z", "done": false, "done_reason": null, "total_duration": null, @@ -238,15 +238,15 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:50.94069Z", + "created_at": "2025-10-01T01:34:08.520937013Z", "done": true, "done_reason": "stop", - "total_duration": 688370708, - "load_duration": 107469833, + "total_duration": 4447759914, + "load_duration": 44225114, "prompt_eval_count": 399, - "prompt_eval_duration": 74988334, + "prompt_eval_duration": 2025476521, "eval_count": 13, - "eval_duration": 505216458, + "eval_duration": 2377545768, "response": "", "thinking": null, "context": null diff --git a/tests/integration/recordings/responses/2d187a11704c.json b/tests/integration/recordings/responses/2d187a11704c.json index c0f746ffe..ecce0ec80 100644 --- a/tests/integration/recordings/responses/2d187a11704c.json +++ b/tests/integration/recordings/responses/2d187a11704c.json @@ -22,7 +22,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:56.566151Z", + "created_at": "2025-10-01T01:35:11.444139198Z", "done": false, "done_reason": null, "total_duration": null, @@ -40,7 +40,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:56.609308Z", + "created_at": "2025-10-01T01:35:11.631417419Z", "done": false, "done_reason": null, "total_duration": null, @@ -58,7 +58,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:56.651314Z", + "created_at": "2025-10-01T01:35:11.837785952Z", "done": false, "done_reason": null, "total_duration": null, @@ -76,7 +76,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:56.693185Z", + "created_at": "2025-10-01T01:35:12.035361735Z", "done": false, "done_reason": null, "total_duration": null, @@ -94,7 +94,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:56.734643Z", + "created_at": "2025-10-01T01:35:12.231459021Z", "done": false, "done_reason": null, "total_duration": null, @@ -112,7 +112,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:56.776343Z", + "created_at": "2025-10-01T01:35:12.437587336Z", "done": false, "done_reason": null, "total_duration": null, @@ -130,7 +130,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:56.81705Z", + "created_at": "2025-10-01T01:35:12.645814233Z", "done": false, "done_reason": null, "total_duration": null, @@ -148,7 +148,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:56.857959Z", + "created_at": "2025-10-01T01:35:12.857399802Z", "done": false, "done_reason": null, "total_duration": null, @@ -166,7 +166,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:56.899424Z", + "created_at": "2025-10-01T01:35:13.069748955Z", "done": false, "done_reason": null, "total_duration": null, @@ -184,7 +184,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:56.939218Z", + "created_at": "2025-10-01T01:35:13.275446646Z", "done": false, "done_reason": null, "total_duration": null, @@ -202,7 +202,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:56.980065Z", + "created_at": "2025-10-01T01:35:13.472121232Z", "done": false, "done_reason": null, "total_duration": null, @@ -220,7 +220,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:57.02214Z", + "created_at": "2025-10-01T01:35:13.665744046Z", "done": false, "done_reason": null, "total_duration": null, @@ -238,7 +238,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:57.0628Z", + "created_at": "2025-10-01T01:35:13.861581737Z", "done": false, "done_reason": null, "total_duration": null, @@ -256,7 +256,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:57.106061Z", + "created_at": "2025-10-01T01:35:14.057543582Z", "done": false, "done_reason": null, "total_duration": null, @@ -274,7 +274,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:57.1492Z", + "created_at": "2025-10-01T01:35:14.250235864Z", "done": false, "done_reason": null, "total_duration": null, @@ -292,7 +292,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:57.190075Z", + "created_at": "2025-10-01T01:35:14.440950519Z", "done": false, "done_reason": null, "total_duration": null, @@ -310,7 +310,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:57.23178Z", + "created_at": "2025-10-01T01:35:14.633159237Z", "done": false, "done_reason": null, "total_duration": null, @@ -328,7 +328,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:57.272738Z", + "created_at": "2025-10-01T01:35:14.824645544Z", "done": false, "done_reason": null, "total_duration": null, @@ -346,7 +346,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:57.313855Z", + "created_at": "2025-10-01T01:35:15.015421713Z", "done": false, "done_reason": null, "total_duration": null, @@ -364,7 +364,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:57.354964Z", + "created_at": "2025-10-01T01:35:15.21010827Z", "done": false, "done_reason": null, "total_duration": null, @@ -382,7 +382,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:57.395971Z", + "created_at": "2025-10-01T01:35:15.406911964Z", "done": false, "done_reason": null, "total_duration": null, @@ -400,7 +400,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:57.438471Z", + "created_at": "2025-10-01T01:35:15.599086606Z", "done": false, "done_reason": null, "total_duration": null, @@ -418,7 +418,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:57.479796Z", + "created_at": "2025-10-01T01:35:15.789596143Z", "done": false, "done_reason": null, "total_duration": null, @@ -436,7 +436,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:57.520641Z", + "created_at": "2025-10-01T01:35:15.981551476Z", "done": false, "done_reason": null, "total_duration": null, @@ -454,7 +454,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:57.561511Z", + "created_at": "2025-10-01T01:35:16.170823008Z", "done": false, "done_reason": null, "total_duration": null, @@ -472,7 +472,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:57.602875Z", + "created_at": "2025-10-01T01:35:16.361099362Z", "done": false, "done_reason": null, "total_duration": null, @@ -490,7 +490,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:57.643406Z", + "created_at": "2025-10-01T01:35:16.554187248Z", "done": false, "done_reason": null, "total_duration": null, @@ -508,7 +508,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:57.684279Z", + "created_at": "2025-10-01T01:35:16.746364193Z", "done": false, "done_reason": null, "total_duration": null, @@ -526,7 +526,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:57.725699Z", + "created_at": "2025-10-01T01:35:16.937784556Z", "done": false, "done_reason": null, "total_duration": null, @@ -544,7 +544,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:57.766658Z", + "created_at": "2025-10-01T01:35:17.130739694Z", "done": false, "done_reason": null, "total_duration": null, @@ -562,7 +562,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:57.80738Z", + "created_at": "2025-10-01T01:35:17.324485154Z", "done": false, "done_reason": null, "total_duration": null, @@ -580,7 +580,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:57.848466Z", + "created_at": "2025-10-01T01:35:17.513221988Z", "done": false, "done_reason": null, "total_duration": null, @@ -598,7 +598,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:57.889056Z", + "created_at": "2025-10-01T01:35:17.704588587Z", "done": false, "done_reason": null, "total_duration": null, @@ -616,7 +616,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:57.931554Z", + "created_at": "2025-10-01T01:35:17.89491876Z", "done": false, "done_reason": null, "total_duration": null, @@ -634,7 +634,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:57.974754Z", + "created_at": "2025-10-01T01:35:18.085415685Z", "done": false, "done_reason": null, "total_duration": null, @@ -652,7 +652,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:58.016978Z", + "created_at": "2025-10-01T01:35:18.291123534Z", "done": false, "done_reason": null, "total_duration": null, @@ -670,7 +670,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:58.057942Z", + "created_at": "2025-10-01T01:35:18.481091772Z", "done": false, "done_reason": null, "total_duration": null, @@ -688,7 +688,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:58.099015Z", + "created_at": "2025-10-01T01:35:18.669330853Z", "done": false, "done_reason": null, "total_duration": null, @@ -706,7 +706,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:58.140531Z", + "created_at": "2025-10-01T01:35:18.862203802Z", "done": false, "done_reason": null, "total_duration": null, @@ -724,7 +724,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:58.181382Z", + "created_at": "2025-10-01T01:35:19.050586441Z", "done": false, "done_reason": null, "total_duration": null, @@ -742,7 +742,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:58.223318Z", + "created_at": "2025-10-01T01:35:19.243400941Z", "done": false, "done_reason": null, "total_duration": null, @@ -760,7 +760,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:58.26358Z", + "created_at": "2025-10-01T01:35:19.438492404Z", "done": false, "done_reason": null, "total_duration": null, @@ -778,7 +778,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:58.305496Z", + "created_at": "2025-10-01T01:35:19.625091169Z", "done": false, "done_reason": null, "total_duration": null, @@ -796,7 +796,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:58.347254Z", + "created_at": "2025-10-01T01:35:19.817882725Z", "done": false, "done_reason": null, "total_duration": null, @@ -814,7 +814,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:58.390044Z", + "created_at": "2025-10-01T01:35:20.006228518Z", "done": false, "done_reason": null, "total_duration": null, @@ -832,7 +832,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:58.430867Z", + "created_at": "2025-10-01T01:35:20.195451511Z", "done": false, "done_reason": null, "total_duration": null, @@ -850,7 +850,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:58.471376Z", + "created_at": "2025-10-01T01:35:20.38583856Z", "done": false, "done_reason": null, "total_duration": null, @@ -868,7 +868,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:58.51208Z", + "created_at": "2025-10-01T01:35:20.574736342Z", "done": false, "done_reason": null, "total_duration": null, @@ -886,7 +886,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:58.553226Z", + "created_at": "2025-10-01T01:35:20.770260046Z", "done": false, "done_reason": null, "total_duration": null, @@ -904,7 +904,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:58.594787Z", + "created_at": "2025-10-01T01:35:20.961391185Z", "done": false, "done_reason": null, "total_duration": null, @@ -922,7 +922,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:58.63466Z", + "created_at": "2025-10-01T01:35:21.15136915Z", "done": false, "done_reason": null, "total_duration": null, @@ -940,7 +940,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:58.674628Z", + "created_at": "2025-10-01T01:35:21.34012064Z", "done": false, "done_reason": null, "total_duration": null, @@ -958,7 +958,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:58.714616Z", + "created_at": "2025-10-01T01:35:21.530394237Z", "done": false, "done_reason": null, "total_duration": null, @@ -976,7 +976,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:58.754906Z", + "created_at": "2025-10-01T01:35:21.721043618Z", "done": false, "done_reason": null, "total_duration": null, @@ -994,7 +994,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:58.795048Z", + "created_at": "2025-10-01T01:35:21.911611623Z", "done": false, "done_reason": null, "total_duration": null, @@ -1012,7 +1012,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:58.835297Z", + "created_at": "2025-10-01T01:35:22.100940877Z", "done": false, "done_reason": null, "total_duration": null, @@ -1030,7 +1030,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:58.875738Z", + "created_at": "2025-10-01T01:35:22.289910353Z", "done": false, "done_reason": null, "total_duration": null, @@ -1048,7 +1048,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:58.91604Z", + "created_at": "2025-10-01T01:35:22.476827205Z", "done": false, "done_reason": null, "total_duration": null, @@ -1066,7 +1066,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:58.956596Z", + "created_at": "2025-10-01T01:35:22.663529325Z", "done": false, "done_reason": null, "total_duration": null, @@ -1084,7 +1084,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:58.996664Z", + "created_at": "2025-10-01T01:35:22.851128482Z", "done": false, "done_reason": null, "total_duration": null, @@ -1102,7 +1102,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:59.037796Z", + "created_at": "2025-10-01T01:35:23.042424694Z", "done": false, "done_reason": null, "total_duration": null, @@ -1120,7 +1120,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:59.078586Z", + "created_at": "2025-10-01T01:35:23.234415016Z", "done": false, "done_reason": null, "total_duration": null, @@ -1138,7 +1138,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:59.119448Z", + "created_at": "2025-10-01T01:35:23.422767727Z", "done": false, "done_reason": null, "total_duration": null, @@ -1156,7 +1156,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:59.160318Z", + "created_at": "2025-10-01T01:35:23.611953916Z", "done": false, "done_reason": null, "total_duration": null, @@ -1174,7 +1174,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:59.201852Z", + "created_at": "2025-10-01T01:35:23.802138602Z", "done": false, "done_reason": null, "total_duration": null, @@ -1192,7 +1192,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:59.243763Z", + "created_at": "2025-10-01T01:35:23.993446989Z", "done": false, "done_reason": null, "total_duration": null, @@ -1210,7 +1210,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:59.284948Z", + "created_at": "2025-10-01T01:35:24.186705934Z", "done": false, "done_reason": null, "total_duration": null, @@ -1228,7 +1228,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:59.325598Z", + "created_at": "2025-10-01T01:35:24.39236955Z", "done": false, "done_reason": null, "total_duration": null, @@ -1246,7 +1246,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:59.366289Z", + "created_at": "2025-10-01T01:35:24.579916625Z", "done": false, "done_reason": null, "total_duration": null, @@ -1264,7 +1264,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:59.406764Z", + "created_at": "2025-10-01T01:35:24.768821839Z", "done": false, "done_reason": null, "total_duration": null, @@ -1282,7 +1282,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:59.447922Z", + "created_at": "2025-10-01T01:35:24.957792215Z", "done": false, "done_reason": null, "total_duration": null, @@ -1300,7 +1300,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:59.488486Z", + "created_at": "2025-10-01T01:35:25.147895529Z", "done": false, "done_reason": null, "total_duration": null, @@ -1318,7 +1318,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:59.529Z", + "created_at": "2025-10-01T01:35:25.337348777Z", "done": false, "done_reason": null, "total_duration": null, @@ -1336,7 +1336,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:59.569417Z", + "created_at": "2025-10-01T01:35:25.528043056Z", "done": false, "done_reason": null, "total_duration": null, @@ -1354,7 +1354,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:59.610542Z", + "created_at": "2025-10-01T01:35:25.720598024Z", "done": false, "done_reason": null, "total_duration": null, @@ -1372,7 +1372,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:59.651411Z", + "created_at": "2025-10-01T01:35:25.908813849Z", "done": false, "done_reason": null, "total_duration": null, @@ -1390,7 +1390,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:59.69241Z", + "created_at": "2025-10-01T01:35:26.102538985Z", "done": false, "done_reason": null, "total_duration": null, @@ -1408,7 +1408,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:59.732339Z", + "created_at": "2025-10-01T01:35:26.296587284Z", "done": false, "done_reason": null, "total_duration": null, @@ -1426,7 +1426,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:59.772462Z", + "created_at": "2025-10-01T01:35:26.48997969Z", "done": false, "done_reason": null, "total_duration": null, @@ -1444,7 +1444,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:59.812507Z", + "created_at": "2025-10-01T01:35:26.68461717Z", "done": false, "done_reason": null, "total_duration": null, @@ -1462,7 +1462,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:59.852762Z", + "created_at": "2025-10-01T01:35:26.877976002Z", "done": false, "done_reason": null, "total_duration": null, @@ -1480,7 +1480,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:59.892984Z", + "created_at": "2025-10-01T01:35:27.071304424Z", "done": false, "done_reason": null, "total_duration": null, @@ -1498,7 +1498,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:59.933555Z", + "created_at": "2025-10-01T01:35:27.267083009Z", "done": false, "done_reason": null, "total_duration": null, @@ -1516,7 +1516,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:59.973778Z", + "created_at": "2025-10-01T01:35:27.458752902Z", "done": false, "done_reason": null, "total_duration": null, @@ -1534,7 +1534,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:00.014923Z", + "created_at": "2025-10-01T01:35:27.651757232Z", "done": false, "done_reason": null, "total_duration": null, @@ -1552,7 +1552,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:00.057464Z", + "created_at": "2025-10-01T01:35:27.84093711Z", "done": false, "done_reason": null, "total_duration": null, @@ -1570,7 +1570,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:00.09902Z", + "created_at": "2025-10-01T01:35:28.031166547Z", "done": false, "done_reason": null, "total_duration": null, @@ -1588,7 +1588,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:00.140492Z", + "created_at": "2025-10-01T01:35:28.222014814Z", "done": false, "done_reason": null, "total_duration": null, @@ -1606,7 +1606,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:00.180239Z", + "created_at": "2025-10-01T01:35:28.412024854Z", "done": false, "done_reason": null, "total_duration": null, @@ -1624,7 +1624,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:00.220364Z", + "created_at": "2025-10-01T01:35:28.603242201Z", "done": false, "done_reason": null, "total_duration": null, @@ -1642,7 +1642,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:00.26097Z", + "created_at": "2025-10-01T01:35:28.793015428Z", "done": false, "done_reason": null, "total_duration": null, @@ -1660,7 +1660,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:00.301228Z", + "created_at": "2025-10-01T01:35:28.98105341Z", "done": false, "done_reason": null, "total_duration": null, @@ -1678,7 +1678,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:00.341631Z", + "created_at": "2025-10-01T01:35:29.171562052Z", "done": false, "done_reason": null, "total_duration": null, @@ -1696,7 +1696,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:00.383006Z", + "created_at": "2025-10-01T01:35:29.359960218Z", "done": false, "done_reason": null, "total_duration": null, @@ -1714,7 +1714,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:00.423509Z", + "created_at": "2025-10-01T01:35:29.547663965Z", "done": false, "done_reason": null, "total_duration": null, @@ -1732,7 +1732,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:00.464702Z", + "created_at": "2025-10-01T01:35:29.737967784Z", "done": false, "done_reason": null, "total_duration": null, @@ -1750,7 +1750,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:00.505914Z", + "created_at": "2025-10-01T01:35:29.926196503Z", "done": false, "done_reason": null, "total_duration": null, @@ -1768,7 +1768,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:00.546505Z", + "created_at": "2025-10-01T01:35:30.117904197Z", "done": false, "done_reason": null, "total_duration": null, @@ -1786,7 +1786,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:00.587839Z", + "created_at": "2025-10-01T01:35:30.309146475Z", "done": false, "done_reason": null, "total_duration": null, @@ -1804,15 +1804,15 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:00.629018Z", + "created_at": "2025-10-01T01:35:30.497677975Z", "done": true, "done_reason": "stop", - "total_duration": 4303339291, - "load_duration": 156231250, + "total_duration": 21228194411, + "load_duration": 46730034, "prompt_eval_count": 36, - "prompt_eval_duration": 81909875, + "prompt_eval_duration": 2125755306, "eval_count": 100, - "eval_duration": 4064559292, + "eval_duration": 19055134812, "response": "", "thinking": null, "context": null diff --git a/tests/integration/recordings/responses/3130f21f1bb9.json b/tests/integration/recordings/responses/3130f21f1bb9.json new file mode 100644 index 000000000..d4dcf6aea --- /dev/null +++ b/tests/integration/recordings/responses/3130f21f1bb9.json @@ -0,0 +1,3131 @@ +{ + "request": { + "method": "POST", + "url": "http://0.0.0.0:11434/v1/v1/embeddings", + "headers": {}, + "body": { + "model": "nomic-embed-text:137m-v1.5-fp16", + "input": [ + "Python is a high-level programming language that emphasizes code readability and allows programmers to express concepts in fewer lines of code than would be possible in languages such as C++ or Java.", + "Machine learning is a subset of artificial intelligence that enables systems to automatically learn and improve from experience without being explicitly programmed, using statistical techniques to give computer systems the ability to progressively improve performance on a specific task.", + "Data structures are fundamental to computer science because they provide organized ways to store and access data efficiently, enable faster processing of data through optimized algorithms, and form the building blocks for more complex software systems.", + "Neural networks are inspired by biological neural networks found in animal brains, using interconnected nodes called artificial neurons to process information through weighted connections that can be trained to recognize patterns and solve complex problems through iterative learning." + ], + "encoding_format": "float" + }, + "endpoint": "/v1/embeddings", + "model": "nomic-embed-text:137m-v1.5-fp16" + }, + "response": { + "body": { + "__type__": "openai.types.create_embedding_response.CreateEmbeddingResponse", + "__data__": { + "data": [ + { + "embedding": [ + -0.003090947, + 0.09604761, + -0.11840379, + -0.092462674, + 0.06473318, + 0.013984173, + -0.0453576, + 0.0036970088, + -0.02222872, + -0.051683415, + 0.0003385266, + 0.023853302, + 0.043623973, + -0.020216433, + 0.009333161, + -0.08589091, + 0.0102010295, + -0.050277237, + 0.013096318, + 0.070338726, + -0.0044037374, + -0.04049156, + 0.027865507, + -0.030463468, + 0.06956409, + 0.016478432, + -0.01048117, + -0.04063368, + -0.012503031, + 0.02518871, + -0.036050968, + -0.019599508, + 0.0072585195, + -0.033797707, + -0.055228572, + -0.04808135, + 0.048784044, + 0.007958744, + 0.05235575, + 0.0155341895, + -0.039142516, + 0.014507955, + 0.02470678, + -0.02759484, + 0.08697875, + -0.047769055, + 0.029249318, + -0.04448267, + -0.029990533, + -0.030334929, + -0.008363074, + -0.07003726, + -0.037667923, + 0.0026686124, + 0.101092435, + 0.053792834, + 0.0069262264, + 0.023978552, + 0.02889155, + -0.03792439, + 0.09474232, + 0.07994058, + -0.068739556, + 0.052854076, + 0.040114164, + -0.0031523013, + -0.03227859, + 0.028844943, + -0.026357155, + -0.011060798, + 0.020999193, + -0.07089094, + 0.026845012, + -0.015627025, + -0.04613553, + -0.011963311, + -0.020483695, + -0.026694208, + 0.0148264915, + 0.065035485, + -0.0022104725, + -0.016194746, + -0.0208957, + 0.037690002, + 0.033658814, + -0.05529406, + -0.034939546, + 6.913827e-05, + -0.036353707, + 0.047993362, + -0.05729234, + -0.009336094, + 0.012104476, + 0.00092687964, + -0.069908544, + 0.021848856, + -0.01802717, + 0.013347229, + 0.031699587, + -0.030859077, + -1.687288e-06, + -0.010270364, + 0.04771742, + -0.051177908, + 0.033818368, + 0.04920404, + 0.01666294, + -0.033314653, + -0.046947327, + -0.0075336993, + 0.011538041, + 0.043432906, + -0.047548775, + -0.032091845, + -0.054206308, + 0.01632687, + 0.08829971, + -0.03389563, + 0.044160683, + 0.0563715, + 0.014417741, + -0.016173586, + 0.035288636, + 0.055231627, + 0.02842211, + 0.028187707, + -0.04426007, + 0.05323493, + -0.012233036, + -0.05448969, + 0.031235894, + -0.0009951439, + -0.050905637, + -0.006768993, + -0.030966967, + 0.067565106, + -0.058782987, + -0.020246718, + 0.062599055, + 0.002883254, + 0.028725693, + 0.020061154, + -0.023027781, + -0.012063589, + 0.038898543, + -0.023685627, + -0.0071144463, + 0.031448044, + 0.012175329, + 0.0257892, + 0.026001925, + 0.049877577, + 0.0021397287, + 0.004722688, + -0.008280793, + 0.006610069, + 0.035600357, + 0.0057330946, + 0.04715625, + 0.0104579665, + 0.06878401, + 0.032636765, + -0.045692537, + 0.027380036, + -0.02896107, + -0.029047707, + 0.014847608, + -0.011170206, + -0.030609459, + -0.00029586494, + -0.043504786, + -0.04351318, + 0.016228631, + -0.0018337993, + 0.0074679446, + -0.013748864, + 0.022429049, + -0.0375771, + 0.042493116, + -0.021883924, + -0.012697342, + -0.04706134, + 0.044902463, + 0.04387019, + -0.055043343, + 0.014316774, + 0.020061927, + -0.042015336, + -0.016192857, + -0.030242536, + -0.014330689, + 0.02923408, + -0.02710425, + -0.04271568, + -0.03355069, + -0.026888834, + 0.0047209496, + -0.0056308866, + 0.047076028, + -0.06260847, + -0.042926077, + -0.033471134, + -0.0420381, + 0.014255864, + -0.040093295, + 0.036077272, + -0.017827978, + 0.010296059, + -0.043022502, + 0.008587915, + 0.08664976, + -0.00020295857, + -0.017598357, + 0.06415218, + 0.0058186534, + -0.035194118, + -0.030805245, + -0.019902973, + -0.011155231, + 0.019659974, + 0.06168094, + -0.03935558, + 0.0058380696, + 0.008744179, + 0.014075224, + 0.019879585, + -0.06612642, + 0.021474052, + -0.05057089, + 0.0067976415, + -0.014930689, + -0.039542083, + 0.03057139, + 0.024985412, + 0.019986767, + 0.041225713, + 0.038953424, + 0.042473312, + -0.0012347505, + -0.028306473, + 0.0068447716, + -0.0060466137, + -0.007780399, + 0.031249423, + -0.033626, + 0.017350428, + -0.003920609, + -0.02308066, + -0.013918898, + 0.03348771, + -0.022070652, + -0.0311343, + 0.004665898, + 0.05681535, + 0.033781994, + 0.029855534, + 0.055623304, + 0.0037308626, + 0.032435834, + -0.01548921, + 0.051779583, + -0.03348033, + -0.027463121, + -0.016725047, + -0.022375818, + 0.012979877, + -0.00022387852, + 0.0060666804, + 0.0034318524, + -0.03092084, + 0.02341147, + 0.023289494, + 0.021310503, + -0.045035034, + -0.003533924, + -0.021152453, + 0.021689946, + -0.044476595, + 0.009260065, + 0.009512747, + 0.031830043, + -0.035532735, + -0.040821794, + 0.028622385, + 0.0003955203, + 0.03296935, + 0.017467853, + 0.011803479, + 0.005080811, + -0.025084332, + 0.069132484, + -0.023703001, + -0.03957126, + -0.043329764, + -0.011744362, + 0.04279272, + 0.07370136, + 0.015284943, + 0.03391219, + -0.03261106, + -0.028988473, + 0.06003438, + 0.08163386, + 0.037571035, + 0.020090902, + -0.01987498, + 0.025182985, + 0.0016644186, + -0.021142084, + -0.011045582, + -0.04523148, + 0.035729762, + -0.04577271, + 0.066968985, + -0.08435172, + 0.03305286, + 0.013549899, + 0.025869401, + 0.043451995, + -0.030745648, + 0.0010823214, + -0.08180061, + 0.040454637, + -0.028382152, + 0.009892922, + 0.049347524, + -0.007337878, + 0.012099656, + -0.03163371, + -0.052415583, + 0.009677461, + 0.009352584, + 0.013957565, + -0.019746099, + -0.074012175, + -0.0030700697, + 0.02775875, + -0.017766705, + 0.026490878, + 0.0033631313, + 0.035369392, + -0.04432113, + 0.017871099, + -0.050520398, + 0.0011422632, + 0.008406398, + 0.033428602, + -0.046777137, + 0.042452376, + 0.0273346, + -0.003995728, + 0.037445698, + -0.024369251, + -0.02828132, + -0.0030712776, + -0.04018031, + 0.025428733, + -0.005815698, + -0.022197451, + 0.00620749, + 0.030668877, + 0.0035744372, + 0.028039407, + -0.059336178, + 0.0015513424, + 0.0006978681, + 0.02373031, + -0.019448636, + -0.012421107, + -0.0056262217, + -0.040361527, + -0.04692492, + -0.012687595, + 0.006593882, + -0.0041717407, + -0.03117893, + -0.068955414, + -0.020455334, + -0.009882477, + 0.00793095, + 0.024907323, + -0.053882554, + -0.035952404, + 0.00774612, + 0.021623546, + -0.060584284, + 0.0008677752, + -0.004447187, + 0.032608233, + 0.033415746, + 0.037971195, + -0.04416349, + -0.030293355, + 0.024735263, + 0.050290417, + -0.026328063, + 0.025719365, + 0.016626138, + -0.044612437, + -0.003098227, + -0.047689714, + -0.07156968, + 0.01989559, + -0.011833882, + -0.02977814, + -0.0035325778, + 0.009505919, + -0.024347162, + 0.016585112, + -0.024012927, + -0.0023020753, + 0.013682231, + 0.019170996, + -0.015666388, + -0.033047408, + 0.053364336, + 0.02001459, + 0.034338653, + -0.048730344, + 0.013365634, + 0.018888196, + 0.05630122, + -0.00662485, + 0.012007138, + 0.018249286, + 0.022746533, + 0.02860551, + 0.057509553, + 0.01917473, + -0.067357, + 0.009858217, + 0.0396155, + 0.037449677, + 0.027316686, + -0.003741414, + -0.0004973098, + 0.02991219, + 0.014136339, + -0.028230866, + 0.06657123, + 0.032783315, + -0.03101118, + -0.06064414, + 0.004188821, + 0.022631776, + 0.059042003, + 0.06876, + -0.012206267, + -0.0821691, + 0.022086529, + -0.0072288415, + 0.013867353, + 0.0091591915, + 0.00805788, + 0.045439675, + 0.017412364, + -0.008539732, + 0.0045926417, + -0.025433894, + 0.04361251, + -0.0047451644, + 0.00017663927, + -0.06020522, + 0.024841757, + -0.00026000594, + 0.008635995, + -0.009238347, + -0.012046931, + -0.0010463385, + 0.041900307, + -0.028666915, + 0.037059262, + 0.028481482, + -0.012526489, + -0.0055596284, + -0.024260871, + -0.011554422, + 0.03115736, + 0.03714331, + 0.024052016, + -0.01083798, + -0.030802228, + -0.048096277, + -0.01104405, + -0.0049294434, + 0.022385463, + -0.008944233, + 0.0026380213, + -0.023794232, + -0.048210252, + 0.03202458, + 0.04057014, + 0.0531768, + 0.016310908, + -0.039813325, + -0.05208368, + -0.014054222, + 0.094533496, + 0.07642529, + 0.025715023, + 0.028485976, + 0.02768392, + -0.025633201, + -0.0029767978, + 0.06410617, + -0.029699529, + 0.059712842, + -0.053882755, + -0.043304577, + 0.02225193, + 0.034443524, + 0.006656706, + -0.011267327, + 0.049484365, + 0.05220316, + -0.02691971, + 0.023881223, + -0.022981929, + -0.09593904, + 0.018707242, + 0.016387459, + -0.024498131, + -0.0068502496, + -0.026733112, + -0.03909302, + 0.037554115, + 0.014788388, + -0.011440841, + -0.00028370088, + -0.010407865, + 0.041494798, + -0.0059260563, + -0.040287785, + -0.025351562, + -0.059843395, + -0.056114774, + -0.06655903, + 0.056252357, + 0.021331474, + -0.001166095, + 0.06491203, + 0.050037753, + 0.0033837704, + 0.020583183, + 0.06599941, + 0.005478397, + -0.022636946, + -0.00044582508, + 0.011203095, + -0.05957346, + 0.044482667, + -0.04590922, + 0.0013798112, + -0.033329614, + 0.025112469, + -0.02123516, + 0.00025512607, + -0.027879294, + 0.013120379, + -0.048738264, + -0.03624769, + 0.036045056, + 0.025573866, + 0.023047429, + 0.025920672, + 0.016882492, + -0.02279409, + -0.02317234, + -0.0040101693, + 0.060752228, + -0.040337354, + -0.05460929, + 0.0198172, + 0.022455717, + 0.012135278, + 0.002002113, + 0.017909495, + -0.0153429555, + -0.050094794, + -0.026103504, + 0.060342155, + -0.0285984, + -0.013253505, + 0.04859142, + -0.03881282, + -0.014088534, + -0.016100964, + 0.012022445, + -0.01684563, + -0.027013376, + -0.014015188, + -0.004543662, + -0.023600634, + -0.005541604, + 0.0075320834, + 0.023768572, + -0.059007607, + -0.037556786, + -0.01778341, + -0.06213497, + -1.4281669e-05, + 0.0071058916, + 0.035102, + -0.042220693, + 0.024100045, + 0.09466793, + -0.031069918, + 0.046927627, + -0.04166753, + -0.023964025, + 0.040654592, + 0.0309336, + -0.016093053, + -0.00029172184, + 0.0057314406, + -0.060659353, + 0.048662484, + -0.0007095928, + 0.012155295, + -0.029255588, + -0.029109525, + -0.05350515, + 0.05714772, + -0.041150652, + 0.043109175, + 0.0009024791, + -0.023951774, + 0.027793754, + 0.05562148, + 0.06399012, + -0.058591112, + 0.0069887685, + -0.037780132, + 0.029130891, + -0.0089229075, + 0.0013858108, + -0.03863276, + 0.0019716322, + 0.046890926, + 0.0874699, + 0.019922499, + -0.05109738, + 0.027648486, + 0.00987546, + 0.0029350575, + -0.03160173, + 0.037278082, + 0.07510668, + 0.007423074, + -0.047842957, + 0.06636329, + 0.05289521, + -0.0010001262, + 0.01971588, + -0.0074665854, + 0.008849992, + 0.06130543, + -0.023203438, + -0.066689104, + -0.00826479, + 0.0010215435, + -0.002183026, + -0.021711286, + 0.041641667, + 0.039001487, + 0.04480901, + 0.0008162, + 0.0019801676, + -0.08664479, + -0.0024015156, + 0.018281285, + 0.002742708, + -0.001846643, + -0.02501251, + 0.005773928, + 0.047037184, + -0.0038052397, + -0.01996088, + -0.043526832, + -0.02497972, + 0.013066086, + -0.009926004, + -0.009117636, + -0.03091159, + 0.020381417, + -0.048431884, + 0.021292195, + -0.04605411, + -0.062775806, + -0.065336205, + -0.03168914, + -0.021132536, + 0.024628565, + -0.047913622, + 0.027086733, + 0.0014576988, + -0.013014333, + -0.016274815, + 0.0027481033, + 0.06521211, + -0.014618258, + 0.011080098, + 0.03910298, + 0.038535718, + -0.01819429, + 0.0075649046, + 0.024294391, + 0.048159268, + -0.036184233, + -0.052870464, + -0.04117243, + 0.02658233, + 0.0373725, + 0.067497686, + -0.002039666, + 0.04371207, + -0.047288615, + -0.061389018, + -0.05991368, + -0.001503112, + 0.054956224, + -0.018673347, + -0.01878792, + 0.014894865, + 0.0054442305, + -0.005585625, + 0.015543309, + -0.0489046, + 0.02444715, + 0.015062179, + 0.034169022, + 0.022409236, + -0.057436798, + 0.042047292, + -0.039522476, + 0.018624678, + -0.035853356, + -0.035035174, + -0.07487606, + 0.006371521, + 0.030847441, + 0.050054766, + -0.0068717157, + 0.0412162, + -0.0009972106, + -0.03751093, + -0.032882456, + 0.049063325, + 0.0363597, + -0.0435322, + -0.00644647, + -0.010058214, + -0.03934986, + 0.07194581, + -0.013095484, + 0.015656278, + -0.005050425, + 0.072323844, + 0.056736372, + -0.0021469446, + 0.012176674, + -0.008620731, + 0.010838642, + -0.03625522, + -0.04454152, + -0.007512609, + -0.053434398, + -0.024375373 + ], + "index": 0, + "object": "embedding" + }, + { + "embedding": [ + 0.0093245255, + 0.037020646, + -0.15275846, + -0.039018434, + 0.05615867, + 0.019716505, + 0.040707525, + -0.0016290393, + -0.037260506, + 0.0040282393, + 0.011403119, + 0.049958482, + 0.14523987, + 0.04678009, + -0.022403825, + -0.02939822, + -0.047135856, + -0.042446245, + -0.016692566, + 0.021995345, + 0.009028183, + -0.0067151533, + 0.014182877, + 0.01290824, + 0.036767777, + 0.025258692, + -0.041439414, + -0.047470998, + 0.013928222, + 0.037319552, + 0.010282564, + -0.061294544, + 0.0343252, + -0.016851913, + -0.07322739, + -0.039828923, + 0.07597111, + 0.009395966, + 0.03197832, + 0.018252423, + -0.025390154, + 0.029811395, + 0.019995535, + 0.013386904, + 0.049264256, + 0.024902813, + 0.0042558494, + -0.033679035, + 0.022450514, + -0.00937979, + 0.047814894, + -0.048913524, + 0.014945698, + 0.048196375, + 0.09089787, + 0.02406028, + -0.009449359, + 0.035176005, + -0.003615816, + -0.055852853, + 0.15740943, + 0.021552045, + -0.07463581, + 0.08465411, + 0.009901923, + -0.0015639447, + -0.02050741, + 0.03975716, + -0.001861341, + -0.0010024293, + 0.0067345276, + -0.022124752, + 0.0017578524, + 0.029929232, + -0.04326069, + -0.009592429, + -0.03115974, + -0.01987962, + -0.009464124, + 0.06323683, + 0.060557403, + -0.05530454, + 0.03876498, + -0.022195175, + 0.051614936, + 0.0026718706, + -0.068879806, + -0.021950895, + -0.039826524, + 0.111891806, + 0.016034095, + 0.042541582, + 0.028269166, + 0.007713899, + -0.054541785, + 0.012631863, + -0.034623574, + 0.01539877, + -0.0402728, + -0.016335228, + -0.047618385, + -0.009332856, + 0.030080792, + -0.060409877, + 0.044823535, + 0.060680836, + -0.029626874, + -0.013954677, + -0.009220117, + 0.03483868, + 0.00037684178, + 0.05157052, + -0.028470146, + -0.006076354, + -0.07370837, + -0.040964562, + 0.052686464, + -0.0010079364, + 0.017319629, + -0.0030558787, + 0.018884663, + -0.018591058, + -0.042040937, + 0.0056352047, + 0.0052988734, + 0.08985566, + -0.048688963, + 0.003959538, + -0.0073859375, + -0.03349454, + 0.020888774, + -0.013648461, + -0.036276914, + -0.00889212, + -0.0029556719, + 0.11167381, + -0.029314028, + -0.046929546, + 0.030574305, + 0.054464515, + 0.017300002, + -0.0028822748, + -0.007059641, + -0.007018886, + 0.020453785, + -0.022019796, + 0.027801864, + 0.03007795, + -0.049766764, + 0.037184987, + -0.0040109023, + 0.06102339, + -0.041503135, + -0.04510763, + 0.009217179, + 0.007659363, + -0.031119471, + -0.0041486067, + 0.048159894, + 0.009898165, + 0.029883144, + 1.4485938e-05, + -0.020938009, + 0.0075253425, + -0.039013185, + -0.016228665, + 0.01714668, + 0.040588617, + 0.043694753, + 0.009124086, + -0.046304006, + 0.0031405657, + 0.013402954, + 0.014587735, + 0.04041461, + 0.0093977805, + 0.051957473, + -0.05709989, + 0.03600369, + -0.05006624, + 0.021610659, + -0.037959095, + 0.024283256, + 0.0048661674, + -0.025518768, + -0.010449195, + -0.008590603, + 0.016784025, + -0.024047092, + -0.057893563, + -0.00787648, + -0.0056437235, + -0.012347517, + -0.041244365, + -0.06496264, + -0.015397793, + 0.016185174, + -0.0081507275, + 0.04797402, + -0.04418742, + 0.0075834817, + -0.030680092, + -0.073421605, + -0.006560622, + -0.026873987, + 0.04554698, + 0.043217268, + -0.0030417389, + -0.013746721, + 0.044227745, + 0.06898634, + 0.033688527, + 0.015968256, + -0.017101463, + 4.6322406e-05, + -0.010614815, + -0.027202137, + 0.0044153146, + 0.015001771, + -0.025231807, + 0.017586673, + -0.016993087, + 0.00016057934, + 0.00918556, + 0.001865834, + -0.013132488, + -0.020118512, + 0.0064147087, + -0.036133893, + 0.05339043, + -0.027853882, + -0.07504275, + 0.07823152, + 0.004424533, + 0.019923503, + -0.0023546969, + 0.012785957, + 0.0408715, + 0.005607736, + 0.059096873, + -0.0031324262, + 0.042175602, + -0.046861377, + -0.013041484, + -0.059123434, + -0.017823974, + 0.024541097, + -0.028629845, + -0.01231504, + 0.014271066, + -0.0024197495, + 0.043298703, + -0.0035040171, + -0.033378445, + 0.043341734, + -0.035771772, + -0.011224461, + -0.0025649173, + 0.013266323, + 0.023559095, + 0.04528574, + -0.012232341, + 0.041650575, + -0.023827018, + 0.026528109, + -0.025912467, + -0.009457015, + 0.030885559, + 0.00508413, + 0.011302803, + 0.019581333, + 0.031124663, + 0.043074433, + -0.014444246, + 0.00043950108, + 0.0053879125, + -0.013675915, + -0.0013934845, + 0.007200696, + -0.0058096065, + -0.036498114, + -0.053479876, + -0.059405014, + -0.013652843, + -0.014175657, + 0.004233997, + 0.0331408, + 0.018059615, + 0.023540152, + 0.017002555, + 0.030605104, + -0.029103186, + -0.016021432, + -0.022441352, + -0.015525735, + 0.036115427, + 0.071785465, + 0.03213885, + 0.031858843, + -0.03609922, + -0.02211658, + 0.03137403, + 0.05064348, + -0.009311132, + 0.008374338, + -0.0030512083, + -0.0013003871, + -0.017440137, + 0.008430136, + -0.031068781, + -0.061828244, + -0.0005138882, + -0.020554032, + 0.015898706, + -0.02284647, + -0.0037570924, + -0.018994445, + 0.029730799, + 0.025522925, + -0.021349328, + 0.016261058, + -0.06793578, + -0.04652047, + -0.011446559, + 0.032109052, + 0.044868983, + -0.021103615, + 0.0016362354, + -0.027130213, + -0.008456837, + 0.04900622, + 0.045049977, + -0.017868036, + -0.027128046, + -0.067157134, + -0.011682388, + 0.016103556, + -0.0077392915, + 0.0029228136, + 0.026761508, + 0.052925434, + -0.018473348, + -0.028827662, + -0.02461206, + -0.0065369527, + 0.026928715, + -0.03324631, + -0.024081169, + 0.029017812, + 0.02071607, + -0.011475426, + 0.005307389, + -0.011571068, + 0.0015733382, + 0.023515893, + -0.0029607431, + 0.013698769, + 0.041067895, + 0.02487061, + -0.0026149799, + 0.035429507, + -0.03970223, + 0.0068344646, + -0.030429753, + -0.004380877, + -0.009994052, + 0.053399317, + -0.0010140841, + 0.02292136, + 0.0022311974, + 0.022894353, + 0.007466015, + -0.036959704, + 0.047222514, + -0.028948285, + 0.006194667, + -0.06982458, + -0.009092363, + -0.021758143, + -0.01981225, + -0.031105403, + 0.0144788055, + -0.021151582, + -0.004192275, + 0.05543094, + -0.0022040652, + -0.006517331, + -0.01685621, + -0.0013394988, + 0.03680351, + -0.00096560386, + -0.019486453, + -0.054713782, + 0.020746361, + -0.003185628, + -0.0114257885, + 0.008769883, + 0.005613104, + 0.021872899, + 0.028670345, + -0.021123279, + -0.031985007, + 0.010203381, + -0.011448128, + -0.013718579, + 0.020098874, + -0.030820787, + -0.013415337, + 0.037591003, + 0.013922949, + 0.024146594, + 0.0070229536, + -0.0018689213, + -0.05856467, + 0.01674269, + -0.02001378, + 0.03841721, + 0.027468543, + -0.06941817, + 0.030009644, + 0.0011426784, + 0.00953964, + -0.006994295, + 0.01284643, + -0.025263516, + 0.009963703, + 0.022037242, + 0.06309938, + 0.00735522, + -0.07995197, + 0.027594607, + -0.011367537, + -0.024657212, + -0.02510339, + -0.015770642, + 0.01773516, + 0.008827416, + 0.012059225, + 0.0023088488, + 0.05050483, + 0.04500924, + -0.03049868, + -0.056825154, + 0.001529503, + 0.022069085, + 0.10531091, + 0.049558576, + -0.002328827, + -0.112704284, + 0.055938598, + -0.03194784, + 0.014782691, + 0.033694178, + 0.0063839774, + 0.068916574, + -0.022501256, + -0.044051528, + 0.0036021087, + 0.031241383, + 0.029762296, + 0.021401146, + 0.008787494, + -0.07336343, + 0.024864858, + -0.012231658, + 0.007604965, + 0.0026919795, + -0.028528215, + -0.0003819639, + 0.09918798, + -0.01552715, + 0.042090885, + 0.04863421, + -0.017187787, + 0.0010847711, + 0.0028207442, + -0.025932025, + -0.029571703, + 0.058376424, + 0.059427686, + 0.017944148, + -0.09262087, + -0.010741885, + -0.055742923, + -0.02393492, + 0.0129495235, + 0.019577857, + -4.6359088e-05, + -0.0002931635, + -0.0349463, + 0.026407348, + 0.028792545, + 0.010096559, + -0.03485205, + -0.033645257, + -0.040398862, + -0.06670086, + 0.03226899, + 0.032771114, + -0.01653104, + -0.018478092, + 0.053559817, + -0.011644564, + -5.3669213e-05, + -0.014113438, + -0.017209353, + 0.04424602, + -0.09492333, + -0.07200167, + 0.09117658, + -0.010002326, + 0.003501061, + 0.022046536, + 0.068746924, + 0.011795792, + -0.06277398, + 0.032998886, + 0.046990275, + -0.01798326, + -0.0020059661, + 0.0454271, + 0.023868166, + -0.031513233, + -0.006265176, + -0.062364977, + -0.017524943, + 0.01076548, + -0.022577569, + 0.03853864, + 0.006597602, + 0.08020667, + -0.001134649, + -0.0017109414, + -0.04024732, + -0.038222782, + 0.0122661255, + -0.002929228, + 0.036991615, + 0.033264674, + 0.030700099, + 0.031671878, + 0.009365578, + 0.005706133, + -0.06333692, + 0.03199222, + 0.015824173, + -0.025739605, + 0.035910852, + 0.01947545, + -0.08464693, + 0.0036003182, + -0.05398591, + -0.00021602986, + -0.033240035, + 0.025206719, + 0.0038602054, + -0.028930863, + -0.032232255, + -0.006284008, + -0.030168863, + -0.015249662, + 0.011376491, + 0.07199718, + -0.012426832, + -0.017788382, + 0.009426625, + -0.008828723, + -0.01003789, + 0.027800059, + 0.055750176, + 0.026687961, + -0.038412776, + 0.011075051, + 0.020443255, + -0.01534028, + -0.037537303, + 0.010854493, + 0.00034301533, + -0.053437542, + -0.06475626, + 0.056774616, + -0.055306915, + -0.008023826, + -0.011753992, + 0.014524239, + -0.0067454968, + -0.08453447, + 0.030588787, + 0.021832015, + -0.011673041, + -0.020679984, + 0.013251596, + -0.013768357, + -0.06051844, + -0.02935452, + 0.020162996, + -0.037135623, + -0.039756987, + -0.0012803585, + -0.045267165, + -0.016591255, + -0.0095577175, + 0.01816317, + -0.004656964, + 0.009891947, + 0.09686123, + -0.009047401, + 0.04441379, + 0.030881783, + -0.008660555, + -0.03175654, + 0.015524616, + -0.012787256, + 0.012635331, + 0.04635218, + -0.023316002, + 0.030894702, + -0.06904067, + -0.038113616, + -0.03105733, + -0.06713498, + -0.04352835, + 0.07463982, + -0.039180443, + 0.014423453, + -0.0138991205, + 0.002304632, + -0.026797185, + 0.046242025, + 0.038676746, + -0.06316837, + 0.026809318, + -0.03561769, + -0.022187576, + -0.05402242, + 0.014213004, + -0.018501688, + 0.021722514, + 0.024766516, + 0.072815225, + 0.00046832484, + -0.017296348, + -0.0372928, + 0.004340185, + 0.04115723, + -0.023918534, + 0.054117117, + 0.08087816, + 0.014544625, + -0.01190335, + 0.02659143, + 0.05491329, + 0.032358818, + -0.012098936, + -0.04303043, + 0.04448981, + 0.012310984, + -0.0241536, + 0.029603016, + -0.050989088, + -0.028680546, + -0.009174626, + -0.00062233716, + -0.012195833, + 0.047890197, + -0.025283357, + -0.03110058, + -0.017887974, + -0.05515267, + -0.06324735, + 0.036425985, + 0.0067124036, + 0.04024804, + -0.034627836, + -0.008010907, + 0.038717482, + 0.0087442035, + 0.02849219, + -0.03953373, + -0.026028346, + -0.047877103, + -0.013296234, + 0.038786545, + -0.038865823, + -0.002800321, + -0.027000545, + 0.01880298, + -0.032667033, + 0.0016585434, + -0.07333883, + -0.010135463, + -0.044739705, + 0.0025542916, + -0.01182256, + -0.025548426, + 0.04039957, + -0.00538747, + 0.028974304, + 0.0620915, + 0.057959843, + -0.031026581, + 0.02820788, + -0.0018781021, + 0.03305192, + -0.042720795, + -0.019136827, + -0.016491875, + 0.0153581435, + -0.024703098, + -0.026549935, + -0.03919062, + -0.0061582318, + -0.04027008, + 0.06689507, + -0.048648667, + 0.0027749157, + 0.019460328, + -0.021952484, + -0.03920368, + 0.043874845, + 0.035227075, + 0.00050708227, + -0.028798986, + -0.010921614, + -0.03460011, + -0.032910287, + 0.03575106, + -0.057257373, + 0.008827229, + -6.677861e-05, + 0.026294341, + -0.004256348, + -0.03372479, + 0.050080862, + -0.017295398, + -0.01863417, + -0.040255852, + -0.0041076206, + -0.06634954, + 0.0026297811, + -0.0029651944, + 0.028690115, + 0.050920658, + -0.003802487, + 0.019519106, + -0.010920629, + -0.008953767, + 0.04096082, + 0.013585407, + -0.026391802, + -0.022688387, + -0.015385721, + -0.058970373, + 0.023268297, + -0.028552901, + 0.0433965, + -0.02365681, + 0.05893179, + 0.13265237, + -0.013373229, + 0.032411925, + -0.049168058, + 0.030531129, + -0.019705787, + -0.041768335, + 0.028881814, + -0.04144874, + -0.008257591 + ], + "index": 1, + "object": "embedding" + }, + { + "embedding": [ + 0.047196038, + 0.091142215, + -0.1597772, + -0.071980886, + 0.056181777, + -0.013574952, + 0.019645968, + -0.002229554, + -0.06470401, + -0.07946628, + 0.005811743, + 0.026315752, + 0.08416122, + -0.010945363, + -0.021314582, + 0.00079418987, + -0.077663176, + -0.028869387, + 0.020390352, + 0.02529034, + -0.009494531, + -0.033271216, + 0.02107692, + -0.019727936, + 0.030555207, + 0.06428749, + 0.02047115, + -0.037003648, + -0.0073746303, + 0.039292663, + 0.046648905, + -0.0016168942, + 0.04544661, + -0.03287251, + -0.06026098, + -0.072457686, + -0.0543314, + 0.0030291956, + 0.026706785, + -0.039102606, + 0.0014001783, + 0.013308768, + -0.020474184, + -0.027642239, + 0.056315504, + -0.0110963825, + 0.0038216838, + -0.0715681, + 0.057043735, + -0.02925203, + 0.028970603, + -0.014273903, + 0.014560466, + 0.022202523, + 0.083961904, + 0.035574052, + -0.0067049107, + 0.05092665, + 0.07913678, + -0.050428323, + 0.103278175, + 0.13400482, + -0.04718957, + 0.02196696, + 0.04658032, + -0.013099539, + -0.015067284, + 0.047082856, + -0.022273533, + -0.031628273, + 0.030090977, + 0.0017626628, + 0.016243754, + -0.021831565, + -0.04281829, + 0.010177228, + -0.009490942, + 0.02398183, + -0.03195164, + 0.05142606, + 0.05562375, + -0.021397453, + 0.046833977, + -0.023156704, + 0.02481665, + -0.018685648, + -0.052793, + 0.0057367384, + 0.0036868926, + 0.05987065, + -0.04860744, + 0.009424155, + 0.036160514, + 0.03268708, + -0.08120845, + 0.015565214, + 0.0065461453, + 0.009595294, + -0.035419293, + -0.04015081, + -0.012359314, + -0.020797476, + 0.015938926, + 0.011375911, + 0.010299362, + 0.02136731, + 0.012169368, + 0.0050262664, + -0.037667487, + 0.0028375806, + -0.043531008, + 0.07092234, + -0.029633397, + 0.0034252724, + -0.03371975, + 0.002689036, + 0.07615999, + -0.047351267, + -0.029219117, + 0.0043876464, + -0.017166462, + -0.026522089, + 0.029852819, + 0.036388557, + 0.02790765, + 0.0012395928, + -0.033574115, + 0.026541134, + -0.015883164, + -0.017308207, + 0.0043208464, + -0.01781834, + -0.08576683, + -0.021266902, + -0.00091734336, + 0.063925914, + -0.0636338, + -0.019395242, + 0.04142762, + 0.051580306, + -0.009378915, + 0.0076578762, + -0.049971018, + -0.05210072, + 0.020126708, + -0.039226025, + 0.032834936, + 0.004295513, + -0.00822929, + -0.041445013, + -0.0053563626, + 0.066455126, + -0.014121869, + -0.00038340111, + 0.011891198, + -0.02433985, + 0.03911454, + -0.026543828, + 0.017506469, + 0.014610692, + 0.06652318, + 0.01890215, + -0.03491689, + 0.031371742, + -0.044803504, + -0.055975728, + 0.012669145, + 0.006600477, + 0.04271467, + 0.013318119, + -0.05349779, + 0.0036878218, + -0.0001651938, + 0.015618081, + 0.036369592, + -0.045075055, + 0.03905816, + -0.07850693, + 0.07685361, + -0.046722192, + -0.03938731, + -0.010492511, + 0.017311106, + 0.035254713, + -0.013005874, + -0.017511614, + 0.021798579, + -0.00913231, + -0.035806797, + -0.0063659386, + 0.019934557, + 0.024101818, + -0.034454327, + -0.007897603, + -0.002740732, + -0.034705732, + -0.0057592946, + 0.019262113, + 0.05265825, + -0.03382213, + -0.022950789, + -0.013037723, + -0.0764288, + 0.038185064, + -0.018474115, + 0.08566955, + -0.022391578, + 0.029010091, + 0.0014999794, + 0.011474489, + 0.07550279, + -0.0088601755, + -0.0067664813, + 0.027960664, + -0.022911478, + -0.06447774, + -0.03635964, + -0.05556697, + 0.0014916504, + 0.061901204, + -0.006489014, + -0.031035952, + 0.029084971, + 0.03652331, + 0.02115822, + -0.024768474, + -0.05207974, + -0.008811171, + -0.0291517, + -0.020186478, + -0.07146631, + -0.04208383, + 0.04857987, + 0.0074508637, + 0.037387297, + 0.061844684, + 0.0077880905, + 0.01571539, + 0.06102829, + 0.011565299, + 0.0043974966, + 0.028080147, + -0.0026064538, + -0.015231559, + -0.0027829441, + 0.010238836, + 0.0064328546, + -0.03777797, + -0.026618876, + 0.045300484, + -0.0023777906, + -0.031147419, + 0.001941467, + 0.028211078, + 0.035062306, + -0.043537844, + -0.0018198305, + -0.0062067653, + 0.0013700705, + -0.023436785, + 0.026487304, + -0.023156805, + -0.029925214, + -0.048819628, + -0.020895006, + -0.0053620506, + 0.020788668, + 0.0016424966, + 0.009597431, + -0.007447987, + 0.011617311, + 0.01665404, + 0.026866777, + 0.013419313, + 0.00021373077, + 0.013857725, + -0.005448599, + -0.024011314, + -0.046686616, + 0.0359406, + -0.0010894559, + -0.06973374, + -0.07715284, + -0.011489149, + -0.016353264, + 0.05362321, + 0.01999732, + 0.023591232, + 0.015858373, + 0.0106446, + 0.04530168, + 0.0035821204, + 0.0007134405, + 0.008175128, + 0.038299993, + 0.0054010325, + 0.057564262, + 0.018544776, + 0.0053211045, + -0.046358928, + -0.019733012, + 0.076029964, + 0.08506735, + -0.009986194, + -0.027884813, + 0.010542434, + 0.0060398704, + -0.0030184602, + -0.05998791, + -0.006252025, + -0.0019239573, + -0.010500256, + -0.008998424, + 0.031042974, + -0.035569057, + 0.03266593, + 0.009654758, + 0.025398506, + 0.039548393, + -0.015997441, + 0.0012819835, + -0.039446097, + -0.035862952, + -0.082573324, + 0.048624847, + 0.06937553, + -0.0054291803, + 0.025491295, + -0.03857474, + -0.02308041, + 0.08053192, + -0.034568477, + -0.0044807186, + -0.03503258, + -0.048932645, + 1.1737342e-05, + -0.011792595, + -0.032054264, + -0.00453626, + -0.008468506, + -0.0055969004, + -0.026221965, + 0.01031578, + -0.03324874, + 0.0109566515, + 0.034680765, + -0.03597828, + -0.03322748, + 0.03240576, + 0.024590159, + -0.040850475, + 0.017198646, + -0.031880114, + -0.0029554085, + -0.016767552, + -0.0015941852, + -0.017123714, + 0.035533957, + -0.010788068, + 0.030174825, + 0.010924076, + 0.027474629, + 0.023643604, + -0.013129948, + -0.027259605, + 0.005510377, + 0.017440986, + 0.008311619, + 0.032622393, + 0.012598541, + -0.008452944, + 0.012188304, + -0.0075518154, + 0.032866932, + 0.03646025, + -0.04298285, + -0.1059887, + -0.023007406, + -0.002635653, + 0.035034154, + 0.05254074, + -0.022326577, + -0.0014958372, + -0.028453777, + 0.026125064, + -0.03796821, + 0.008033808, + -0.030824648, + -0.005005962, + 0.0438012, + -0.02358864, + -0.04335626, + -0.035232823, + 0.03057689, + -0.0073437486, + -0.0404325, + -0.05135266, + 0.052123345, + -0.00016468669, + 0.02002462, + -0.015014162, + -0.03622243, + -0.03050481, + -0.040739246, + -0.024996106, + 0.054607674, + -0.016961228, + -0.06196773, + -0.0054934607, + -0.020940252, + 0.009475076, + 0.024586989, + 0.030742824, + -0.029876895, + 0.0011661805, + 0.049705602, + 0.01817788, + -0.011099843, + 0.012515207, + 0.012134478, + 0.06012862, + 0.06586978, + 0.02206432, + 0.012405332, + 0.011492619, + 0.057517283, + 0.039727986, + 0.036832094, + -0.0068368753, + -0.050639737, + 0.0027461697, + 0.030489529, + 0.019812578, + 0.013843842, + -0.042825714, + 0.028802438, + 0.011758442, + 0.043386873, + -0.08002957, + 0.06010537, + 0.020845708, + -0.059011314, + -0.025467385, + 0.019283999, + 0.02319924, + 0.10296513, + -0.0047983225, + -0.029733762, + -0.06991749, + 0.039923888, + 0.009794141, + 0.036195923, + 0.0149378395, + -0.0045961924, + 0.08263021, + -0.008851824, + -0.016882513, + -0.0039290953, + 0.033838544, + 0.07616792, + -0.039768293, + 0.0030416448, + -0.06292793, + 0.025954135, + 0.024035094, + -0.020181857, + -0.00037736268, + -0.0544439, + 0.03185422, + 0.05116394, + -0.020500429, + 0.025646817, + 0.021882568, + -0.032575775, + 0.030521028, + 0.039357774, + -0.04701352, + -0.007480726, + 0.024786005, + 0.06482045, + -0.03231383, + -0.009185509, + -0.029500628, + -0.042932667, + 0.0027423182, + 0.037025183, + -0.0021403548, + -0.0062750797, + 0.0015741963, + 0.0075664488, + 0.026836632, + -0.0068985997, + 0.051818896, + 0.021798473, + -0.014673459, + -0.049462285, + -0.025359796, + 0.005089651, + 0.010454076, + -0.0017442531, + 0.005919327, + 0.037392985, + 0.011022216, + 0.014484379, + 0.025708478, + -0.008212678, + 0.08412747, + -0.07219317, + -0.036572296, + -0.03318908, + -0.0037007534, + 0.01659926, + 0.0018811452, + 0.04749907, + -0.018900009, + -0.05883556, + 0.039992135, + 0.0024598013, + -0.06646788, + -0.017353285, + -0.036943384, + -0.019335784, + -0.025069907, + 0.026266735, + -0.07462318, + 0.025532207, + -0.006670783, + -0.049258057, + 0.03298218, + 0.016623227, + 0.022299461, + 0.021571873, + -0.072619714, + -0.03962455, + 0.014613417, + -0.020248458, + -0.05920888, + 0.031506635, + 0.059952386, + 0.017395217, + -0.0049050455, + 0.04887802, + -0.0065715476, + 0.020171778, + 0.03011787, + -0.044278126, + 0.013971917, + -0.0048314836, + 0.03344628, + -0.0767616, + -0.0061307205, + -0.008161809, + -0.009098235, + -0.029315813, + 0.045320068, + -0.007701528, + -0.018021924, + -0.030506555, + -0.03741862, + -0.020213155, + -0.0063777245, + 0.06945386, + 0.04283372, + 0.016477546, + 0.027384358, + -0.0026863571, + 0.007820002, + -0.0018470917, + 0.040006183, + 0.042037923, + 0.018319461, + -0.050153524, + 0.010664328, + 0.02503713, + -0.0007233028, + -0.012246717, + 0.033397615, + -0.023933277, + -0.048364405, + -0.041006297, + 0.06825752, + -0.028538162, + 0.016694458, + 0.0069958055, + 0.029652372, + 0.013887178, + -0.046311468, + 0.011172329, + 0.035175674, + -0.043903574, + 0.002936285, + 0.034429543, + 0.006820103, + -0.013296491, + -0.006742919, + 0.029530542, + 0.00532295, + 0.0075707044, + -0.008245243, + -0.08217108, + 0.010589537, + 0.029912904, + 0.041674282, + -0.016409904, + 0.009006446, + 0.052544534, + 0.013545871, + 0.00306798, + -0.067667685, + -0.028266698, + 0.031383086, + -0.0057115993, + -0.058313437, + -0.026002342, + 0.014227475, + -0.036897156, + 0.015020346, + -0.05232954, + 0.03962218, + -0.019057784, + -0.020456716, + -0.051977415, + 0.031089894, + -0.025652861, + 0.0014514852, + 0.033242825, + -0.019859595, + 0.008557296, + 0.057280354, + 0.044464763, + -0.05466, + 0.0396839, + -0.061720293, + -0.0012289534, + -0.031185132, + 0.00548277, + -0.004933768, + 0.013798229, + 0.0021489037, + 0.045024496, + 0.027551206, + -0.027432932, + 0.007928687, + 0.019000659, + 0.038767714, + -0.032183338, + 0.031476248, + 0.053522173, + 0.057496518, + -0.026903572, + 0.06892834, + 0.07015745, + 0.04140363, + -0.00942414, + -0.00061388145, + -0.040191073, + 0.02611062, + -0.05183095, + -0.0108404355, + -0.023469463, + -0.031083992, + 0.0026440022, + 0.0046938704, + -0.031017989, + 0.028630355, + 0.015287666, + 0.012703247, + -0.005691149, + -0.02598773, + -0.024182925, + 0.030279767, + -0.005073411, + 0.032127503, + -0.04519084, + 0.017076224, + 0.05640596, + 0.024112599, + -0.0333013, + -0.03903351, + -0.021338848, + 0.0010390321, + 0.034611, + 0.004346159, + -0.0064769904, + -0.0072676134, + 0.020723384, + -0.033305127, + -0.020461561, + 0.0050275815, + -0.044603597, + -0.013380884, + -0.036931954, + -0.026003534, + -0.07064688, + 0.011175793, + 0.0044292524, + -0.0024063522, + -0.023108391, + 0.008546763, + 0.054686714, + 0.004983771, + -0.04192459, + 0.048129994, + 0.028456993, + 0.013692521, + -0.004430813, + -0.003406782, + 0.031648476, + -0.021930605, + 0.006784842, + -0.026855038, + -0.026392555, + 0.008313964, + 0.021044634, + 0.010267574, + 0.012147755, + -0.02742087, + -0.043582316, + -0.083078235, + 0.01573647, + 0.025756931, + -0.06818067, + -0.016401079, + -0.0044566514, + -0.02378505, + 0.021864686, + 0.02386985, + -0.041395113, + 0.013274799, + 0.0063065225, + 0.006547624, + -0.026604403, + -0.043232836, + 0.051827814, + -0.06494862, + 0.0396398, + -0.069097236, + 0.018889207, + -0.067203484, + 0.01607326, + -0.020041527, + 0.034416907, + -0.053663958, + -0.017389456, + -0.0042673177, + -0.053327113, + -0.012564687, + 0.07531229, + 0.0427696, + -0.010124306, + -0.0027448875, + -0.0034454837, + -0.019242082, + 0.01708283, + -0.005840094, + 0.021710888, + -0.0076535884, + 0.04060072, + 0.11197486, + 0.04484882, + 0.011559398, + 0.008932262, + 0.061322574, + 0.021612102, + -0.045259267, + -0.011339255, + -0.05299153, + 0.0093771275 + ], + "index": 2, + "object": "embedding" + }, + { + "embedding": [ + 0.027245862, + 0.060283583, + -0.15871146, + -0.031568535, + 0.08966781, + -0.009877726, + -0.005061825, + 0.021904163, + -0.05223594, + -0.030656064, + -0.045109104, + 0.05240342, + 0.111219995, + 0.028164001, + -0.024039363, + -0.0130944615, + -0.037601292, + -0.020098876, + 0.007845649, + -0.01822089, + -0.032101102, + 0.014322339, + 0.039650172, + 0.015713558, + 0.013959974, + 0.037878696, + -0.04469285, + -0.0465454, + 0.0051279105, + 0.01630973, + 0.04561555, + -0.07390089, + 0.016852492, + -0.021088712, + -0.06328283, + -0.013791005, + 0.050055116, + 0.0036957439, + 0.060187742, + 0.059610564, + -0.017706284, + -0.022241557, + -0.05661737, + -0.02193874, + 9.48778e-05, + 0.013118881, + 0.03373546, + -0.011202453, + 0.07014778, + -0.051482487, + 0.03545195, + 0.00094783277, + -0.02942382, + 0.00038519106, + 0.07619621, + 0.024894293, + 0.036435377, + 0.017168151, + 0.056508567, + -0.009315149, + 0.10211646, + 0.09107672, + -0.03072802, + 0.06184492, + 0.023228725, + -0.026680725, + -0.04373859, + 0.071472734, + 0.016359106, + 0.045361094, + 0.04099657, + -0.05709707, + 0.016682878, + 0.061999902, + 0.0040781456, + 0.031207735, + -0.01815521, + 0.017081087, + -0.038311433, + 0.06551059, + 0.042621337, + -0.023254134, + 0.00324166, + 0.025500461, + 0.06363713, + 0.028368887, + -0.047420453, + -0.031893067, + -0.01832079, + 0.10243929, + 0.034108825, + 0.0026146523, + 0.035782505, + -0.01846613, + -0.06395596, + -0.0036888223, + -0.043183427, + 0.017307153, + -0.033251215, + -0.037922606, + -0.02813781, + -0.022724569, + -0.003101826, + -0.039399717, + 0.024256784, + 0.03649086, + 0.024154464, + -0.044671882, + 0.004651931, + 0.03141076, + -0.045471687, + 0.00470596, + -0.0032932786, + 0.01968961, + -0.048491728, + -0.04735094, + 0.015655091, + -0.017009573, + 0.012976821, + 0.05997737, + 0.037542593, + -0.051237483, + 0.016889507, + 0.0055180034, + 0.027581284, + 0.075740136, + -0.030488169, + -0.004377374, + -0.019294405, + -0.055036787, + 0.0096051805, + -0.018032536, + -0.019944519, + -0.02269011, + 0.044367604, + 0.08809307, + -0.019882299, + -0.094365284, + 0.040228304, + 0.020632531, + 0.017236752, + -0.017160296, + -0.004910616, + -0.017073063, + -0.0178934, + -0.022657098, + -0.001389279, + -0.03627766, + -0.020595334, + 0.02149062, + -0.022931164, + 0.038730804, + -0.020145698, + -0.021577856, + 0.0718258, + -0.03376272, + 0.011657426, + -0.005178226, + 0.04535083, + 0.01615894, + 0.032707777, + -0.018039498, + -0.018790582, + 0.02739878, + 0.004031926, + -0.03894811, + 0.04094701, + 0.036164746, + 0.04689552, + 0.05045284, + -0.07230247, + -0.001776263, + -0.04477206, + 0.025434542, + 0.08975286, + 0.019576134, + 0.04535626, + -0.049018703, + 0.047965, + -0.040172733, + 0.021348117, + -0.04445437, + 0.006687952, + 0.02179775, + 0.02404915, + 0.03876682, + -0.018946612, + -0.026794031, + -0.005406324, + -0.044365283, + -0.007350431, + 0.01732674, + -0.00943676, + -0.021791663, + -0.047802847, + 0.0070027253, + 0.029850952, + -0.03508603, + 0.04632801, + -0.025603946, + 0.008032826, + -0.027046453, + -0.04433862, + -0.01474196, + -0.019139003, + 0.047279418, + -0.0017983918, + -0.0010266311, + 0.0008772529, + 0.043189965, + 0.050935254, + 0.021701865, + 0.025868567, + 0.0070106974, + -0.040093336, + -0.003238879, + -0.010293299, + 0.010317621, + -0.023940518, + -0.016471367, + 0.017227875, + -0.015673608, + 0.011852957, + -0.047917172, + 0.016926808, + -0.04070471, + -0.07315424, + -0.0117236925, + -0.0026620778, + 0.024642462, + 0.0014607996, + -0.044809517, + 0.09402161, + -0.018066194, + 0.040263332, + 0.022643141, + 0.03896513, + 0.05954352, + -0.017299676, + 0.0072893444, + 0.016921865, + 0.0058542406, + -0.008214378, + 0.01744687, + -0.0685054, + -0.031103907, + 0.025145013, + -0.06425777, + -0.018737316, + 0.036973044, + 0.033628393, + 0.0058102794, + 0.0022098932, + 0.038919367, + 0.04726517, + -0.0058417385, + -0.002135642, + 0.017032234, + 0.028075736, + -0.026516486, + 0.028623953, + -0.008184112, + -0.013200166, + -0.04673543, + -0.019416578, + -0.076724775, + 0.006872661, + -0.010197241, + -0.003372622, + 0.0021620456, + 0.00240546, + 0.0035013973, + 0.043290343, + -0.04864605, + -0.009547462, + 0.03201086, + -0.005911921, + -0.0123690395, + -0.011560213, + 0.0027875686, + -0.018296137, + -0.0041300203, + -0.08999025, + -0.028549945, + -0.025506724, + -0.0007048058, + 0.04636368, + 0.015024821, + 0.0071439566, + 0.027114589, + 0.0072933384, + -0.008806719, + -0.01519739, + 0.0012542526, + -0.0017610046, + 0.027101524, + 0.0854385, + 0.017921269, + -0.04569333, + -0.022095298, + -0.0036186369, + 0.020641662, + 0.051357616, + 0.023811221, + 0.013467358, + -0.027534153, + -0.032872036, + 0.011422957, + 0.020111589, + 0.00066933193, + -0.021959255, + 0.0062451945, + 0.021817718, + 0.003450641, + -0.011268173, + 0.0019975253, + -0.005088231, + 0.04558833, + 0.07090172, + -0.027219305, + 0.012050814, + -0.03922491, + -0.059428718, + -0.020768164, + -0.0046120123, + 0.05145667, + -0.021452473, + 0.001263492, + -0.041401517, + -0.07144716, + 0.028021138, + 0.017785124, + 0.027505571, + 0.0042549605, + -0.039304886, + -0.051514883, + -0.004218487, + 0.021489624, + -0.00059305044, + 0.03607232, + 0.016684912, + -0.01774261, + 0.005931646, + -0.04204551, + -0.04362529, + 0.02855274, + -0.013241047, + -0.018193208, + -0.005617491, + -0.006943511, + -0.020308204, + 0.018649286, + 0.007975145, + 0.007177669, + 0.009523636, + -0.019732438, + 0.056202587, + 0.033373702, + 0.01409769, + -0.009485809, + 0.033760604, + -0.008198031, + -0.00681633, + -0.0037554954, + -0.03238141, + -0.0056827515, + 0.028672356, + 0.015055369, + 0.016145162, + -0.011672806, + 0.016120475, + -0.018956868, + -0.0048036706, + 0.02629785, + -0.024991067, + 0.031281672, + -0.0702558, + -0.003573209, + -0.04217928, + -0.0030341262, + -0.027616149, + 0.0057182107, + 0.0323835, + -0.008513545, + 0.047801852, + 0.009490673, + 0.020305088, + -0.06920696, + -0.0012978396, + 0.056136526, + 0.012414983, + 0.0025740871, + -0.04842826, + -0.07440041, + 0.04167829, + -0.033985693, + 0.047807522, + 0.015166004, + 0.009363624, + 0.01819693, + -0.026656805, + -0.06516735, + 0.007120078, + -0.022500241, + -0.010702533, + 0.03584595, + -0.031223014, + -0.03895432, + 0.0234847, + 0.03174296, + 0.026597798, + 0.044434477, + 0.04964613, + -0.05766173, + 0.015803417, + -0.00081371516, + 0.040700074, + 0.041978814, + -0.016586332, + 0.029647356, + 0.0036003343, + 0.042376608, + 0.008695962, + -0.008596939, + -0.011530272, + 0.034333903, + 0.015860746, + 0.018078186, + -0.018113146, + -0.037704233, + 0.047249004, + -0.02584009, + 0.005825563, + 0.000371342, + -0.031069594, + 0.0038704663, + -0.0064397594, + 0.0067662997, + 0.039237246, + 0.01610454, + 0.053018425, + -0.017866885, + -0.033351976, + -0.04966936, + 0.02553021, + 0.096392356, + 0.006235646, + -0.0011623363, + -0.09150005, + 0.056395184, + 0.025470069, + 0.03975463, + 0.047834385, + -0.031531435, + 0.06536414, + -0.03136712, + -0.005700051, + 0.012526135, + 0.017888134, + 0.012697156, + 0.022255125, + 0.034288254, + -0.08876369, + -0.010626175, + -0.028193215, + 0.0030229834, + 0.013437896, + -0.045422014, + 0.04681177, + 0.030657688, + -0.03141879, + 0.030983318, + 0.00336144, + 0.021394482, + -0.018361505, + -0.031111937, + 0.03457415, + -0.0023526768, + 0.03803461, + 0.043445755, + -0.013572091, + -0.08171221, + -0.046155915, + -0.069421306, + -0.015525085, + 0.025588093, + -0.018922325, + 0.030250905, + -0.032884397, + 0.008061702, + 0.026341802, + -0.021932058, + 0.0134598175, + -0.008491402, + -0.03877356, + -0.0476232, + -0.0776146, + 0.037178673, + 0.06379859, + -0.023771383, + -0.0044903033, + 0.056668997, + -0.07009883, + -0.03152752, + 0.043444388, + 0.01206208, + 0.04602436, + -0.07172936, + -0.061790556, + 0.03829441, + -0.013659499, + -0.030399065, + -0.035164356, + 0.0317647, + 0.017092723, + -0.055914905, + 0.020872148, + -0.016242614, + -0.050757747, + 0.0023328536, + 0.04715397, + -0.01135217, + 0.011601415, + -0.02599819, + -0.039736405, + 0.018630927, + -0.041785266, + -0.033215553, + 0.041373458, + -0.012634345, + 0.048526336, + -0.013929099, + -0.030469704, + -0.015005477, + -0.024936618, + 0.005307157, + -0.00036820394, + 0.001962054, + 0.031552475, + 0.0018166394, + 0.05759657, + 0.0014612125, + 0.045063153, + -0.01830616, + 0.018843198, + -0.020797426, + -0.008716646, + 0.029580116, + -0.023307435, + -0.07548631, + 0.0071234074, + -0.048167568, + -0.0039012767, + -0.024599176, + 0.017739318, + -0.023021622, + -0.04997149, + -0.067146346, + 0.0076629273, + -0.009611252, + -0.028416289, + 0.04600209, + 0.022871956, + -0.025487065, + -0.0071445624, + 0.028350297, + -0.03804604, + 0.015516315, + 0.033764865, + 0.039653454, + 0.04477548, + -0.0622456, + -0.015426987, + 0.019288, + -0.0073813493, + -0.031079715, + 0.03758739, + 0.020391418, + -0.06970982, + -0.0649795, + 0.013703063, + -0.056728862, + -0.015340432, + 0.015757658, + 0.015466401, + 0.004555054, + -0.06372665, + -0.00501313, + 0.05966391, + -0.034424067, + -0.018809654, + 0.01602035, + -0.034418017, + -0.077762775, + -0.022856047, + -0.007983469, + 0.0006324841, + 0.017406244, + -0.052947056, + -0.051727176, + -0.0017075659, + 0.0047101146, + 0.05452821, + -0.046378218, + -0.019906662, + 0.08689091, + 0.038267314, + 0.046228018, + -0.024327576, + 0.0034851911, + 0.001068745, + 0.029938696, + -0.020577151, + -0.043334898, + 0.07126347, + -0.044205036, + 0.053321823, + -0.013972622, + -0.033100657, + -0.049140602, + -0.042451255, + -0.052555818, + 0.036991484, + 0.007727234, + 0.046934932, + -0.03681313, + -0.054982018, + -0.015578396, + 0.030656325, + 0.057343654, + -0.054728117, + 0.031549044, + -0.011055691, + -0.014745011, + -0.03597926, + 0.0027503108, + -0.019723143, + 0.018643366, + 0.029704876, + 0.04329162, + -0.00405516, + -0.047569558, + -0.0420094, + 0.033786584, + 0.03496848, + 0.0063383738, + 0.041854557, + 0.077770464, + 0.0080803335, + -0.0037750478, + 0.09271395, + 0.041000195, + 0.033774655, + -0.0078020873, + -0.0329384, + -0.016490592, + 0.04216569, + -0.045574486, + -0.027002726, + -0.04039204, + -0.0455005, + 0.006861543, + -0.012789972, + 0.018258702, + 0.01183113, + -0.030536951, + -0.012831484, + -0.04837929, + -0.045997955, + -0.01881417, + 0.03721969, + -0.017666493, + 0.026500538, + -0.021292703, + 0.005287962, + 0.03912168, + 0.013433035, + 0.012103709, + 0.018988166, + -0.013906217, + 0.007650382, + 0.006032777, + -0.001299358, + -0.038683444, + -0.009180721, + 0.0144397635, + 0.038731154, + -0.035990484, + 0.00036745195, + -0.059590884, + 0.00040038596, + -0.014142658, + -0.014341654, + -0.010042413, + -0.032898992, + 0.061229717, + -0.016390923, + 0.0101258755, + 0.0070963274, + 0.06077856, + -0.010359901, + 0.036488257, + 0.009701303, + 0.019478898, + -0.023020407, + -0.022665584, + 0.0019758136, + -0.012811091, + -0.030994447, + -0.020028442, + -0.023469936, + 0.04515979, + 0.018709365, + 0.11431244, + -0.031670246, + 0.019375036, + 0.013917027, + -0.022900162, + -0.028190011, + 0.06998063, + 0.011137804, + -0.01323254, + -0.042150043, + 0.012698348, + -0.030653633, + -0.009219284, + 0.013932575, + -0.070930734, + -0.009891334, + -0.0034357598, + -0.0075193173, + -0.026391804, + -0.028414827, + 0.03698509, + 0.005169126, + -0.0052795867, + -0.051408794, + -0.010734686, + -0.006937469, + -0.022320686, + -0.016538981, + 0.010083156, + 0.0012961837, + -0.04591768, + 0.054475185, + -0.009425144, + 0.008758125, + 0.04664199, + 0.03343574, + -0.019808, + 0.021894857, + -0.01854046, + -0.02284305, + 0.0168231, + -0.0052546444, + 0.03224328, + -0.024904018, + 0.07087449, + 0.1269788, + -0.017275726, + 0.05269279, + -0.019833203, + 0.0231947, + -0.012339875, + -0.05842646, + 0.0072436375, + -0.051073585, + 0.0094848145 + ], + "index": 3, + "object": "embedding" + } + ], + "model": "nomic-embed-text:137m-v1.5-fp16", + "object": "list", + "usage": { + "prompt_tokens": 162, + "total_tokens": 162 + } + } + }, + "is_streaming": false + } +} diff --git a/tests/integration/recordings/responses/37706c1729ba.json b/tests/integration/recordings/responses/37706c1729ba.json index 256e0c37e..74caaadf1 100644 --- a/tests/integration/recordings/responses/37706c1729ba.json +++ b/tests/integration/recordings/responses/37706c1729ba.json @@ -38,7 +38,7 @@ } } ], - "created": 1759245080, + "created": 1759282470, "model": "llama-guard3:1b", "object": "chat.completion", "service_tier": null, diff --git a/tests/integration/recordings/responses/390f0c7dac96.json b/tests/integration/recordings/responses/390f0c7dac96.json deleted file mode 100644 index e8c9528fb..000000000 --- a/tests/integration/recordings/responses/390f0c7dac96.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "request": { - "method": "POST", - "url": "http://localhost:11434/api/generate", - "headers": {}, - "body": { - "model": "llama3.2:3b-instruct-fp16", - "raw": true, - "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nTest metrics generation 1<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", - "options": { - "temperature": 0.0 - }, - "stream": false - }, - "endpoint": "/api/generate", - "model": "llama3.2:3b-instruct-fp16" - }, - "response": { - "body": { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-11T15:51:18.170868Z", - "done": true, - "done_reason": "stop", - "total_duration": 5240614083, - "load_duration": 9823416, - "prompt_eval_count": 21, - "prompt_eval_duration": 21000000, - "eval_count": 310, - "eval_duration": 5209000000, - "response": "This is the start of a test. I'll provide some sample data and you can try to generate metrics based on it.\n\n**Data:**\n\nLet's say we have a dataset of user interactions with an e-commerce website. The data includes:\n\n| User ID | Product Name | Purchase Date | Quantity | Price |\n| --- | --- | --- | --- | --- |\n| 1 | iPhone 13 | 2022-01-01 | 2 | 999.99 |\n| 1 | MacBook Air | 2022-01-05 | 1 | 1299.99 |\n| 2 | Samsung TV | 2022-01-10 | 3 | 899.99 |\n| 3 | iPhone 13 | 2022-01-15 | 1 | 999.99 |\n| 4 | MacBook Pro | 2022-01-20 | 2 | 1799.99 |\n\n**Task:**\n\nYour task is to generate the following metrics based on this data:\n\n1. Average order value (AOV)\n2. Conversion rate\n3. Average revenue per user (ARPU)\n4. Customer lifetime value (CLV)\n\nPlease provide your answers in a format like this:\n\n| Metric | Value |\n| --- | --- |\n| AOV | 1234.56 |\n| Conversion Rate | 0.25 |\n| ARPU | 1000.00 |\n| CLV | 5000.00 |\n\nGo ahead and generate the metrics!", - "thinking": null, - "context": null - } - }, - "is_streaming": false - } -} diff --git a/tests/integration/recordings/responses/3b60c09d6c4f.json b/tests/integration/recordings/responses/3b60c09d6c4f.json new file mode 100644 index 000000000..573daa802 --- /dev/null +++ b/tests/integration/recordings/responses/3b60c09d6c4f.json @@ -0,0 +1,806 @@ +{ + "request": { + "method": "POST", + "url": "http://0.0.0.0:11434/v1/v1/embeddings", + "headers": {}, + "body": { + "model": "nomic-embed-text:137m-v1.5-fp16", + "input": [ + "The secret string is foobazbar." + ], + "encoding_format": "float" + }, + "endpoint": "/v1/embeddings", + "model": "nomic-embed-text:137m-v1.5-fp16" + }, + "response": { + "body": { + "__type__": "openai.types.create_embedding_response.CreateEmbeddingResponse", + "__data__": { + "data": [ + { + "embedding": [ + 0.00044567845, + 0.069345646, + -0.13331954, + -0.046871964, + 0.08016425, + -0.048083987, + -0.019010393, + 0.015145315, + -0.046878867, + -0.05115706, + -0.11474304, + 0.058239155, + 0.016648395, + 0.011023492, + 0.041939907, + -0.029991476, + -9.543025e-05, + -0.02533831, + -0.02011866, + -0.07322108, + 0.017030168, + -0.00957343, + 0.004485929, + 0.017447446, + 0.1246118, + 0.0117449965, + 0.0014033606, + 0.016348116, + -0.0005036347, + -0.040095236, + 0.015161008, + -0.0034678434, + -0.025513498, + 0.018403651, + -0.046444066, + -0.0633152, + 0.017913556, + 0.027162347, + -0.027503235, + 0.07005407, + -0.06677951, + 0.067936614, + -0.009670534, + 0.03929378, + 0.026953742, + -0.04413318, + 0.012423691, + 0.053801637, + 0.068956025, + -0.07052555, + 0.072077766, + -0.026170403, + 0.0569044, + -0.014713597, + 0.027845478, + 0.004202079, + 0.013470566, + -0.048575625, + 0.026492853, + 0.01398613, + 0.061292946, + 0.018669717, + -0.03883197, + 0.08187032, + 0.027836354, + 0.007642394, + -0.056150433, + 0.023952084, + 0.031071052, + -0.049114376, + 0.058882445, + -0.00040445005, + -0.02008241, + 0.012982363, + -0.061310835, + 0.008937138, + -0.020913182, + -0.0092431, + -0.031858914, + 0.014872756, + 0.029764224, + -0.016896453, + 0.021685613, + 0.018258028, + -0.04633906, + -0.03561103, + -0.033857256, + 0.019963097, + -0.03752244, + 0.015296732, + -0.017445896, + -0.014324619, + 0.004804526, + 0.04106732, + -0.017421542, + 0.0192038, + 0.027671007, + 0.044899814, + -0.04936399, + -0.030076561, + 0.016601052, + -0.013544007, + 0.042761896, + 0.0024784307, + -0.0022394105, + 0.013565438, + 0.0022860803, + -0.00041760976, + -0.05886792, + 0.0074303076, + -0.0015840015, + 0.05203811, + -0.013102137, + -0.09152751, + 0.025666736, + -0.0022051502, + 0.022787694, + -0.02524802, + -0.00011112814, + -0.0022206625, + -0.021147829, + -0.02161167, + 0.01456756, + 0.025838867, + -0.01404628, + 0.026200539, + -0.014191877, + 0.021828128, + 0.019994682, + -0.07021417, + -0.009830949, + -0.01094356, + 0.011583981, + -0.0037562435, + 0.032894533, + 0.048460174, + -0.017713327, + 0.0038000469, + 0.069233336, + -0.02220729, + 0.012367555, + 0.010958855, + 0.017700545, + -0.06432872, + 0.014903545, + -0.07342504, + 0.029049437, + 0.01858068, + -0.019002236, + -0.030976567, + 0.001063091, + 0.009665964, + 0.017194226, + 0.014693427, + -0.004587786, + -0.02747058, + 0.061187223, + 0.032178245, + 0.009072266, + 0.046665266, + 0.036214747, + 0.028900135, + -0.00039593378, + 0.002205184, + -0.054302886, + -0.038410567, + 0.01953658, + 0.07283172, + 0.0063177072, + 0.048450936, + -0.062249575, + 0.011464932, + 0.009836349, + -0.019204034, + 0.0212673, + 0.0026400527, + -0.031265385, + 0.005496048, + 0.009981116, + -0.02005659, + 0.035396017, + -0.055278853, + 0.044190887, + 0.023812689, + -0.0602695, + 0.019462213, + -0.01969013, + -0.028041134, + 0.02364917, + -0.049788468, + 0.0022309152, + -0.040284824, + -0.059724264, + -0.03366438, + -0.028473698, + -0.018445726, + 0.02930147, + 0.028754137, + 0.033635426, + 0.017532766, + -0.08573839, + 0.04823697, + -0.027376462, + 0.0056161224, + -0.012013627, + -0.021365276, + 0.008281257, + -0.028078597, + 0.024465317, + 0.024162576, + 0.075117595, + -0.06746106, + 0.0036551915, + -0.01740995, + 0.006771356, + -0.021181645, + -0.010371318, + -0.015649507, + -0.028625006, + 0.03872479, + 0.06485805, + 0.04116872, + 0.014413853, + -0.023209086, + 0.024703778, + 0.008546008, + -0.055185292, + -0.0003334275, + -0.03359408, + 0.006813681, + 0.026214652, + -0.094747946, + 0.05505837, + 0.06588719, + -0.021185499, + -0.008195226, + 0.024911653, + 0.06094513, + -0.011626769, + 0.0052414685, + 0.00221315, + 0.0049781743, + -0.006753542, + 0.017345196, + -0.032445163, + 0.04730397, + -0.030807534, + -0.011132825, + 0.019257821, + 0.037375852, + -0.01791027, + 0.013328558, + 0.0039301207, + 0.02116138, + 0.022959339, + -0.034923322, + 0.020886097, + -0.03162536, + 0.01642531, + -0.071851775, + 0.0043929643, + -0.038616575, + 0.013561031, + -0.046020526, + -0.009411261, + -0.01872071, + -0.004853035, + 0.017835563, + 0.016219897, + -0.040965024, + -0.015721563, + -0.011120184, + 0.002712119, + -0.013525761, + -0.017541371, + 0.002172893, + 0.047437634, + -0.00055855716, + -0.019012688, + -0.0034372362, + -0.06898951, + -0.00070805446, + -0.066043876, + 0.013205724, + -0.040814314, + 0.05816519, + 0.028029984, + -0.013227342, + 0.0012570657, + 0.0041219597, + 0.053272642, + 0.005242944, + -0.023647735, + 0.037811704, + 0.011506217, + 0.019518841, + 0.026147118, + 0.015235484, + 0.010721468, + -0.06350039, + 0.03209373, + 0.034801636, + 0.0081500225, + 0.005969703, + -0.017227497, + -0.025534213, + 0.017176751, + 0.039256673, + 0.046966672, + 0.03472027, + -0.047879733, + 0.03222837, + 0.03380229, + 0.029047774, + -0.044715878, + 0.050964445, + -0.008719146, + 0.024849666, + 0.06419251, + -0.030985096, + -0.018823322, + -0.054562908, + -0.00907499, + -0.10115823, + -0.024997335, + 0.01242978, + -0.0019470031, + 0.0333229, + -0.029330114, + -0.041030563, + 0.023396686, + 0.05379854, + -0.027988946, + -0.021597246, + -0.040569063, + 0.04048141, + 0.005340183, + 0.019063592, + -0.025319468, + -0.003563014, + -0.0026412164, + -0.018177321, + 0.03233157, + -0.067418195, + 0.0076498054, + 0.038282733, + -0.03286021, + -0.032854397, + 0.046934273, + 0.04355527, + -0.07515824, + 0.013815288, + -0.04784709, + 0.026895981, + 0.0025065525, + 0.025239244, + 0.054204963, + -0.014532232, + 0.028296318, + -0.010739294, + 0.051052067, + -0.026637534, + 0.0068342197, + -0.026805444, + 0.02265711, + -0.007651249, + 0.030557599, + -0.03413214, + -0.038503505, + 0.017946247, + -0.031123659, + -0.022322055, + 0.02973932, + 0.011667091, + -0.014459768, + -0.028301675, + -0.11210148, + -0.00873513, + -0.017461887, + 0.018714411, + 0.02778843, + -0.03661049, + 0.033506807, + -0.011684556, + 0.01726771, + -0.003502183, + -0.0037348305, + -0.023243207, + 0.05685141, + 0.04693209, + -0.025070677, + -0.00013908459, + -0.027548794, + 0.018317811, + -0.0178067, + 0.0014910959, + 0.01803822, + 0.01608141, + 0.007222165, + -0.0014852714, + -0.046118837, + -0.0026458004, + 0.039712854, + -0.002699, + -0.04608312, + 0.056430176, + 0.005960536, + -0.04096914, + 0.07490523, + -0.040113874, + 0.050887205, + -0.0050432947, + 0.025429089, + -0.040005684, + -0.016144099, + -0.027699653, + 0.008637651, + -0.01148726, + -0.011380815, + 0.007922618, + 0.07924035, + 0.063685514, + -0.0018839106, + -0.012124223, + 0.0073183966, + 0.00021943168, + -0.016844638, + 0.043696962, + 0.0029683067, + -0.040563498, + 0.03907888, + 0.037264947, + 0.0111134555, + 0.05346586, + -0.025725322, + 0.023384957, + -0.060350742, + -0.026976733, + 0.012131329, + 0.03989188, + 0.02435085, + -0.0075752987, + -0.0114409635, + 0.035790615, + 0.020276839, + 0.07685958, + 0.046703145, + -0.020972438, + -0.03259271, + 0.06400826, + -0.00498698, + -0.024871409, + 0.014828645, + 0.0130927, + 0.106245086, + -0.007118865, + 0.012881113, + 0.011313499, + 0.0839651, + 0.0125661325, + -0.0066993455, + -0.022454198, + -0.06478769, + 0.020374268, + 0.015577235, + -0.032526292, + 0.020350832, + -0.0571311, + 0.08554014, + 0.08232226, + -0.037315074, + 0.0021203265, + 0.024621665, + -0.041138764, + 0.0257467, + 0.029454008, + 0.01576975, + 0.030322494, + -0.027369676, + 0.035611905, + -0.033540208, + 0.03968557, + -0.057308182, + -0.059743047, + -0.023096878, + 0.040560856, + 0.014436853, + -0.025654038, + -0.018847847, + 0.025198145, + 0.030089647, + 0.024180522, + 0.0022778937, + -0.002554793, + 0.0022749486, + -0.08901101, + -0.06115288, + -0.01974829, + 0.026249625, + -0.0053902855, + 0.0070387293, + 0.02137391, + 0.0016356307, + 0.034444757, + 0.037089553, + -0.012963089, + 0.015482281, + -0.016791286, + -0.066437095, + -0.020030353, + -0.036646403, + 0.0022244542, + -0.028270856, + -0.0035234697, + 0.043064065, + -0.007920013, + 0.06887318, + 0.033386547, + -0.024132386, + 0.010797932, + -0.008047283, + 0.024117367, + 0.014206666, + -0.04957293, + -0.06584216, + 0.07456989, + 0.023377368, + -0.009300324, + -0.011824271, + -0.07421093, + 0.025775433, + -0.03486574, + -0.011464092, + -0.033658788, + 0.04973876, + -0.008150324, + 0.016183274, + 0.026232768, + -0.046371486, + 0.05480489, + 0.012598278, + 0.033995587, + -0.026970293, + -0.02781425, + 0.008035459, + -0.009073307, + -0.0346637, + -0.016842574, + -0.016181363, + -0.01383546, + 0.0642562, + -0.050719734, + -0.055135835, + -0.006392721, + 0.004836332, + -0.02701654, + -0.0027673533, + 0.020192543, + -0.0038055407, + 0.016163835, + -0.0107361125, + 0.01661987, + 0.009653905, + 0.0023535355, + -0.0033649358, + -0.053976573, + 0.018550616, + -0.034805, + 0.029848143, + 0.03626025, + -0.07495047, + -0.001908639, + -0.07656478, + 0.038458325, + 0.029302891, + 0.023092957, + -0.007622042, + -0.030261463, + -0.021329772, + -0.018646786, + 0.0127468, + -0.0658906, + -0.0026415756, + -0.02147435, + -0.021851867, + 0.036363255, + -0.047830794, + -0.07678409, + -0.019886537, + -0.06597324, + -0.04127708, + 0.04287775, + 0.024867415, + 0.031287063, + -0.014819534, + 0.00026204466, + -0.015248521, + 0.0058353236, + -0.024796542, + -0.054158095, + 0.032939717, + 0.0361686, + 0.047894675, + 0.0028992337, + -0.030339025, + 0.03422538, + 0.033026263, + 0.03143931, + -0.011571698, + 0.009420109, + 0.029710123, + 0.03437753, + -0.008656629, + -0.003830146, + 0.03320896, + -0.050311238, + 0.0586845, + 0.023397285, + -0.045850404, + -0.010823152, + 0.023126738, + -0.05035062, + -0.0030130981, + -0.0052116127, + 0.053729337, + -0.036006823, + -0.052962758, + -0.008728322, + -0.01685641, + 0.036570363, + -0.03503138, + -0.0058037033, + -0.018182477, + -0.036445614, + -0.05576862, + 0.045270767, + -0.050004005, + 0.046993006, + -0.06549657, + 0.015647849, + 0.047161687, + -0.003219364, + -0.0043631354, + 0.032075495, + -0.0034678625, + 0.07055552, + 0.036095902, + -0.009122484, + 0.036022466, + 0.006809808, + 0.040848542, + 0.058361802, + -0.0054787197, + 0.0046539647, + 0.01463279, + -0.034826387, + 0.028488237, + -0.06910212, + -0.04828465, + -0.058208026, + 0.043390226, + -0.031781167, + -0.016992405, + -0.03197743, + 0.05476584, + 0.02947553, + 0.044686142, + -0.043358956, + -0.00148739, + 0.003283796, + 0.004783566, + -0.0059531527, + 0.048087712, + -0.04270814, + 0.051301256, + 0.034262523, + 0.055976618, + 0.042672966, + -0.020190198, + -0.043155447, + -0.0010662689, + 0.030956378, + -0.061135452, + -0.022980267, + 0.021279445, + 0.00079709163, + 0.016252836, + -0.0319085, + -0.03133885, + -0.03715316, + -0.014255662, + -0.03807531, + -0.013276923, + -0.075007856, + 0.029038494, + 0.003576076, + -0.04630256, + -0.013997682, + -0.06467764, + 0.07094117, + -0.023424728, + 0.008367736, + -0.011615238, + 0.019250317, + -0.062135782, + -0.02721775, + 0.009017732, + -0.01770822, + 0.0019154089, + -0.022779467, + 0.001992755, + 0.0523557, + 0.0039214473, + 0.02655032, + -0.0090086395, + 0.048243005, + -0.007176262, + -0.01898235, + -0.0053927833, + -0.0036218057, + 0.044131264, + -0.032330353, + -0.011098804, + -0.0014564599, + 0.0043925233, + -0.04351347, + 0.04603144, + -0.047746886, + 0.047553774, + -0.01860305, + 0.005971783, + -0.040747114, + 0.014575995, + -0.021958629, + 0.01937992, + 0.0009213148, + -0.05576995, + 0.051647134, + 0.014199863, + -0.026313303, + 0.020335903, + 0.041635584, + -0.022310706, + -0.01472034, + 0.019536275, + -0.0036119658, + -0.05164503, + 0.034833908, + 0.0007355733, + -0.016247703, + 0.050653964, + -0.057264917, + -0.027475258, + 0.045744468, + 0.037262745, + 0.020553257, + -0.010156378, + 0.060023002, + 0.130969, + 0.0118143745, + 0.008351982, + -0.037791353, + 0.0017138623, + 0.032201435, + -0.037822705, + -0.04097315, + -0.0012332207, + 0.008696999 + ], + "index": 0, + "object": "embedding" + } + ], + "model": "nomic-embed-text:137m-v1.5-fp16", + "object": "list", + "usage": { + "prompt_tokens": 9, + "total_tokens": 9 + } + } + }, + "is_streaming": false + } +} diff --git a/tests/integration/recordings/responses/3c3f13cb7794.json b/tests/integration/recordings/responses/3c3f13cb7794.json deleted file mode 100644 index 117fbcceb..000000000 --- a/tests/integration/recordings/responses/3c3f13cb7794.json +++ /dev/null @@ -1,221 +0,0 @@ -{ - "request": { - "method": "POST", - "url": "http://localhost:11434/api/generate", - "headers": {}, - "body": { - "model": "llama3.2:3b-instruct-fp16", - "raw": true, - "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhat's the name of the Sun in latin?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", - "options": { - "temperature": 0.0 - }, - "stream": true - }, - "endpoint": "/api/generate", - "model": "llama3.2:3b-instruct-fp16" - }, - "response": { - "body": [ - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:18.136699Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "The", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:18.177622Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " Latin", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:18.218104Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " word", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:18.258837Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " for", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:18.299715Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " \"", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:18.341602Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "Sun", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:18.385504Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "\"", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:18.429427Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " is", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:18.473547Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " Sol", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:18.516327Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ".", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:18.559332Z", - "done": true, - "done_reason": "stop", - "total_duration": 628034000, - "load_duration": 116384417, - "prompt_eval_count": 26, - "prompt_eval_duration": 87798792, - "eval_count": 11, - "eval_duration": 423189583, - "response": "", - "thinking": null, - "context": null - } - } - ], - "is_streaming": true - } -} diff --git a/tests/integration/recordings/responses/40f524d1934a.json b/tests/integration/recordings/responses/40f524d1934a.json deleted file mode 100644 index 1c073c5ea..000000000 --- a/tests/integration/recordings/responses/40f524d1934a.json +++ /dev/null @@ -1,221 +0,0 @@ -{ - "request": { - "method": "POST", - "url": "http://localhost:11434/api/generate", - "headers": {}, - "body": { - "model": "llama3.2:3b-instruct-fp16", - "raw": true, - "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant. You have access to functions, but you should only use them if they are required.\nYou are an expert in composing functions. You are given a question and a set of possible functions.\nBased on the question, you may or may not need to make one function/tool call to achieve the purpose.\n\nIf you decide to invoke any of the function(s), you MUST put it in the format of [func_name1(params_name1=params_value1, params_name2=params_value2...), func_name2(params)]\nIf you decide to invoke a function, you SHOULD NOT include any other text in the response. besides the function call in the above format.\nFor a boolean parameter, be sure to use `True` or `False` (capitalized) for the value.\n\n\nHere is a list of functions in JSON format that you can invoke.\n\n[\n {\n \"name\": \"get_weather\",\n \"description\": \"Get the current weather\",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"location\"],\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state (both required), e.g. San Francisco, CA.\"\n }\n }\n }\n }\n]\n\nYou can answer general questions or invoke tools when necessary.\nIn addition to tool calls, you should also augment your responses by using the tool outputs.\nPretend you are a weather assistant.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhat's the weather like in San Francisco?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", - "options": { - "temperature": 0.0 - }, - "stream": true - }, - "endpoint": "/api/generate", - "model": "llama3.2:3b-instruct-fp16" - }, - "response": { - "body": [ - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:51.314693Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "[", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:51.362989Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "get", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:51.408403Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "_weather", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:51.455832Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "(location", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:51.50384Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "=\"", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:51.552257Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "San", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:51.599938Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " Francisco", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:51.645807Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ",", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:51.694632Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " CA", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:51.743454Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "\")]", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:51.790525Z", - "done": true, - "done_reason": "stop", - "total_duration": 687242541, - "load_duration": 131028916, - "prompt_eval_count": 324, - "prompt_eval_duration": 76000000, - "eval_count": 11, - "eval_duration": 479000000, - "response": "", - "thinking": null, - "context": null - } - } - ], - "is_streaming": true - } -} diff --git a/tests/integration/recordings/responses/44fb9cf5875f.json b/tests/integration/recordings/responses/44fb9cf5875f.json deleted file mode 100644 index 17c538862..000000000 --- a/tests/integration/recordings/responses/44fb9cf5875f.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "request": { - "method": "POST", - "url": "http://localhost:11434/api/generate", - "headers": {}, - "body": { - "model": "llama3.2:3b-instruct-fp16", - "raw": true, - "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nTest trace 1<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", - "options": { - "temperature": 0.0 - }, - "stream": false - }, - "endpoint": "/api/generate", - "model": "llama3.2:3b-instruct-fp16" - }, - "response": { - "body": { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:41:49.581065Z", - "done": true, - "done_reason": "stop", - "total_duration": 2391571708, - "load_duration": 182022958, - "prompt_eval_count": 20, - "prompt_eval_duration": 74456583, - "eval_count": 51, - "eval_duration": 2134471458, - "response": "It seems like you're trying to test the system, but I'm not sure what specific functionality or feature you'd like to test. Could you please provide more context or clarify what you're looking for? I'll do my best to assist you!", - "thinking": null, - "context": null - } - }, - "is_streaming": false - } -} diff --git a/tests/integration/recordings/responses/4597743bcd2a.json b/tests/integration/recordings/responses/4597743bcd2a.json deleted file mode 100644 index 868d27a0e..000000000 --- a/tests/integration/recordings/responses/4597743bcd2a.json +++ /dev/null @@ -1,185 +0,0 @@ -{ - "request": { - "method": "POST", - "url": "http://localhost:11434/api/generate", - "headers": {}, - "body": { - "model": "llama3.2:3b-instruct-fp16", - "raw": true, - "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant. You have access to functions, but you should only use them if they are required.\nYou are an expert in composing functions. You are given a question and a set of possible functions.\nBased on the question, you may or may not need to make one function/tool call to achieve the purpose.\n\nIf you decide to invoke any of the function(s), you MUST put it in the format of [func_name1(params_name1=params_value1, params_name2=params_value2...), func_name2(params)]\nIf you decide to invoke a function, you SHOULD NOT include any other text in the response. besides the function call in the above format.\nFor a boolean parameter, be sure to use `True` or `False` (capitalized) for the value.\n\n\nHere is a list of functions in JSON format that you can invoke.\n\n[\n {\n \"name\": \"greet_everyone\",\n \"description\": \"\",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"url\"],\n \"properties\": {\n \"url\": {\n \"type\": \"string\",\n \"description\": \"\"\n }\n }\n }\n },\n {\n \"name\": \"get_boiling_point\",\n \"description\": \"\nReturns the boiling point of a liquid in Celsius or Fahrenheit.\n\n:param liquid_name: The name of the liquid\n:param celsius: Whether to return the boiling point in Celsius\n:return: The boiling point of the liquid in Celcius or Fahrenheit\n\",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"liquid_name\", \"celsius\"],\n \"properties\": {\n \"liquid_name\": {\n \"type\": \"string\",\n \"description\": \"\"\n },\n \"celsius\": {\n \"type\": \"boolean\",\n \"description\": \"\"\n }\n }\n }\n }\n]\n\nYou can answer general questions or invoke tools when necessary.\nIn addition to tool calls, you should also augment your responses by using the tool outputs.\nYou are a helpful assistant.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nSay hi to the world. Use tools to do so.<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", - "options": { - "temperature": 0.0 - }, - "stream": true - }, - "endpoint": "/api/generate", - "model": "llama3.2:3b-instruct-fp16" - }, - "response": { - "body": [ - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-07-29T23:26:17.476678Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "[g", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-07-29T23:26:17.520346Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "reet", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-07-29T23:26:17.563375Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "_every", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-07-29T23:26:17.606256Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "one", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-07-29T23:26:17.649215Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "(url", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-07-29T23:26:17.692049Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "=\"", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-07-29T23:26:17.734316Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "world", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-07-29T23:26:17.776615Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "\")]", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-07-29T23:26:17.819266Z", - "done": true, - "done_reason": "stop", - "total_duration": 5629478417, - "load_duration": 4092162625, - "prompt_eval_count": 448, - "prompt_eval_duration": 1191158583, - "eval_count": 9, - "eval_duration": 343915792, - "response": "", - "thinking": null, - "context": null - } - } - ], - "is_streaming": true - } -} diff --git a/tests/integration/recordings/responses/47004e2babf0.json b/tests/integration/recordings/responses/47004e2babf0.json new file mode 100644 index 000000000..7c491abbd --- /dev/null +++ b/tests/integration/recordings/responses/47004e2babf0.json @@ -0,0 +1,806 @@ +{ + "request": { + "method": "POST", + "url": "http://0.0.0.0:11434/v1/v1/embeddings", + "headers": {}, + "body": { + "model": "nomic-embed-text:137m-v1.5-fp16", + "input": [ + "Python is a high-level programming language with code readability and fewer lines than C++ or Java" + ], + "encoding_format": "float" + }, + "endpoint": "/v1/embeddings", + "model": "nomic-embed-text:137m-v1.5-fp16" + }, + "response": { + "body": { + "__type__": "openai.types.create_embedding_response.CreateEmbeddingResponse", + "__data__": { + "data": [ + { + "embedding": [ + 0.011488368, + 0.08907293, + -0.13142161, + -0.07895268, + 0.066022865, + 0.026360855, + -0.043541305, + 0.00094424584, + -0.024370281, + -0.06148249, + -0.0037689947, + 0.02773672, + 0.047909178, + -0.02939864, + 0.011469905, + -0.08921797, + 0.020931536, + -0.050551064, + 0.0090582725, + 0.058097444, + -0.021488983, + -0.04544651, + 0.0076826564, + -0.029468112, + 0.07073694, + 0.0072513763, + -0.020081414, + -0.038918976, + -0.012795414, + 0.020122375, + -0.028875042, + -0.021430979, + 0.019585375, + -0.032045633, + -0.052031405, + -0.051445574, + 0.058973435, + 0.010949792, + 0.05854762, + 0.00939292, + -0.026500102, + 0.007997425, + 0.027984431, + -0.033203643, + 0.0765589, + -0.047847986, + 0.031280704, + -0.04031829, + -0.01630044, + -0.035522394, + -0.018725617, + -0.0643683, + -0.048050657, + -0.00145174, + 0.08530237, + 0.046948127, + 0.0035006057, + 0.026577089, + 0.030813558, + -0.0314474, + 0.0914591, + 0.07347516, + -0.068352565, + 0.06653788, + 0.04145198, + 2.2763175e-05, + -0.032795746, + 0.033711713, + -0.011662007, + -0.02500982, + 0.014806517, + -0.08404245, + 0.034074288, + -0.02131799, + -0.04973383, + -0.019168304, + -0.01738479, + -0.03425713, + 0.011496745, + 0.049627766, + -0.004454383, + -0.007553486, + -0.008571264, + 0.0481393, + 0.048771415, + -0.049057007, + -0.04052862, + 0.008660308, + -0.023085842, + 0.05831716, + -0.058200188, + -0.0007301837, + 0.031119596, + -0.001510113, + -0.06288094, + 0.02649031, + -0.014243082, + 0.013741406, + 0.029891115, + -0.035321835, + -0.0007874549, + -0.017929547, + 0.040374395, + -0.05022418, + 0.047420263, + 0.04879514, + 0.022985416, + -0.036088556, + -0.056271147, + -0.019736229, + 0.010743018, + 0.04579346, + -0.04893372, + -0.03254895, + -0.047786195, + 0.020005278, + 0.09352314, + -0.032638513, + 0.05403496, + 0.058746118, + 0.013902004, + -0.014856816, + 0.046702012, + 0.062844306, + 0.024965078, + 0.018879883, + -0.059720308, + 0.06714566, + -0.004540917, + -0.05697842, + 0.028589077, + 0.010315179, + -0.04169755, + -0.0070149526, + -0.029461423, + 0.07288989, + -0.061704572, + -0.025856813, + 0.06512719, + 0.0066599897, + 0.03698303, + 0.021579178, + -0.012590982, + -0.0119007975, + 0.03978347, + -0.02246038, + 0.015831197, + 0.032543052, + 0.011093418, + 0.023233669, + 0.034819156, + 0.041866884, + 0.0020055538, + 0.014074135, + -0.019981578, + -0.008057632, + 0.034222472, + 0.0023065216, + 0.04555034, + 0.01121874, + 0.0654458, + 0.03134916, + -0.055534475, + 0.03950526, + -0.021282282, + -0.02630521, + 0.006853609, + -0.008049126, + -0.03182186, + 0.0004068945, + -0.043355547, + -0.04058918, + 0.008414404, + 0.0021767297, + 0.0066186627, + -0.019762259, + 0.014519637, + -0.039688654, + 0.045692563, + -0.010994483, + -0.008208485, + -0.043101825, + 0.04670997, + 0.043561783, + -0.046127435, + 0.01632397, + 0.016273865, + -0.045867354, + -0.005587781, + -0.019087313, + -0.01733775, + 0.032173995, + -0.026338268, + -0.051710702, + -0.016714055, + -0.014880144, + 0.0101565225, + 0.005058725, + 0.035922512, + -0.06759283, + -0.038288597, + -0.036956448, + -0.054448202, + 0.015715994, + -0.043900188, + 0.033019233, + -0.017369132, + 0.008349448, + -0.042008255, + 0.010484949, + 0.060232487, + 0.0044189435, + -0.025377398, + 0.048769046, + 0.0037088217, + -0.04514013, + -0.02408241, + -0.0057313573, + -0.0054432275, + 0.021014731, + 0.058329135, + -0.029602995, + 0.0038945777, + -0.0059355316, + 0.019913401, + 0.016605137, + -0.0575594, + 0.014817167, + -0.036886048, + 0.01452465, + -0.0056891516, + -0.038757816, + 0.034209594, + 0.014828261, + 0.010590116, + 0.04560492, + 0.03606981, + 0.046451095, + -0.0022792094, + -0.015315108, + 0.002956709, + 0.009974895, + -0.014766702, + 0.029623332, + -0.041294064, + 0.022859031, + -0.0059115966, + -0.03724629, + -0.00086585025, + 0.036032964, + -0.017468352, + -0.0182249, + 0.012723173, + 0.052306913, + 0.0363147, + 0.029758507, + 0.056407142, + 0.01234964, + 0.0135322865, + -0.0076179984, + 0.047202323, + -0.050033085, + -0.028000338, + -0.025103243, + -0.019605383, + 0.023990436, + -0.0075666127, + 0.009893213, + 0.0042337226, + -0.034943476, + 0.019118771, + 0.025516555, + 0.016372621, + -0.045386784, + -0.0076442338, + -0.016714053, + 0.018130064, + -0.05281019, + 0.0061577633, + 0.007972123, + 0.039240886, + -0.031219257, + -0.043458417, + 0.023760727, + -0.0019233959, + 0.034131095, + 0.037140265, + 0.001257368, + 0.008872333, + -0.017802484, + 0.06634031, + -0.018231707, + -0.040559564, + -0.03670049, + -0.009176452, + 0.040855963, + 0.083597414, + 0.015891276, + 0.019406065, + -0.028079053, + -0.02434008, + 0.049721453, + 0.08111963, + 0.034266386, + 0.027706612, + -0.024156323, + 0.034014143, + -0.004383591, + -0.019008825, + -0.008942543, + -0.04909622, + 0.04501953, + -0.045705624, + 0.072272286, + -0.07661043, + 0.022335226, + 0.015420332, + 0.029117696, + 0.042505234, + -0.022585507, + 0.0039081913, + -0.086267754, + 0.03733843, + -0.031266082, + -0.0068033175, + 0.04029885, + -0.017780999, + 0.022028906, + -0.027171975, + -0.050008755, + 0.008298878, + 0.011933541, + 0.0152934175, + -0.015793603, + -0.0673487, + -0.0064172964, + 0.037676953, + -0.018025218, + 0.018773079, + 0.0051527745, + 0.033772994, + -0.034934085, + 0.014310966, + -0.04726107, + 0.004405532, + 4.2734075e-05, + 0.026572658, + -0.044114474, + 0.031074164, + 0.03071906, + -0.009484853, + 0.03711684, + -0.025813565, + -0.024846341, + -0.011359158, + -0.041466694, + 0.01914002, + 0.0012177938, + -0.0054687117, + 0.0027515932, + 0.04025552, + -0.0069444985, + 0.030474605, + -0.057275087, + 0.004736491, + 0.002789965, + 0.018351864, + -0.011660434, + -0.015821503, + -0.011462616, + -0.033419356, + -0.05104818, + -0.0030111782, + 0.009709, + 0.010288827, + -0.022103397, + -0.0642, + -0.029997412, + -0.016013661, + -0.002303385, + 0.026114397, + -0.05361758, + -0.04575494, + 0.002697649, + 0.02567258, + -0.061158918, + -0.012497801, + -0.017992899, + 0.019593071, + 0.025052099, + 0.03286399, + -0.042965606, + -0.035508, + 0.032446146, + 0.0371789, + -0.027910959, + 0.040623948, + 0.017507747, + -0.053210605, + -0.00633099, + -0.04437149, + -0.069885515, + 0.020052157, + -0.008017359, + -0.027566357, + 0.008547149, + 0.004847182, + -0.028501885, + 0.015757173, + -0.012012285, + -0.005947874, + 0.0176843, + 0.019584997, + -0.017860798, + -0.012815542, + 0.05130764, + 0.020271033, + 0.03307423, + -0.049778644, + 0.008983508, + 0.026140546, + 0.06028017, + -0.017653985, + 0.011345359, + 0.018171743, + 0.020853298, + 0.0264798, + 0.062104598, + 0.010310946, + -0.06562607, + 0.01043746, + 0.034825344, + 0.021020371, + 0.027116027, + -0.0037368021, + 0.0042153355, + 0.03373333, + 0.008112555, + -0.02199968, + 0.057989873, + 0.026363613, + -0.019325271, + -0.06458278, + 0.011872044, + 0.024819711, + 0.06554175, + 0.07610625, + -0.017614668, + -0.08674962, + 0.0088432925, + -0.005442114, + 0.006102016, + 0.006328422, + 0.0060164, + 0.037999444, + -0.0014527381, + -0.01356921, + 0.016244326, + -0.01457221, + 0.056518734, + -0.0011039514, + 0.014004817, + -0.053100053, + 0.028817357, + 0.0064820037, + 0.0012086668, + -0.009552054, + -0.004504296, + -0.007035088, + 0.0556937, + -0.01315211, + 0.029669777, + 0.023995124, + -0.013237353, + -0.015704637, + -0.035238434, + -0.0037444944, + 0.028946487, + 0.023387091, + 0.016726805, + -0.013977982, + -0.03047428, + -0.04594697, + -0.00228121, + 0.0007855954, + 0.02124062, + -0.008536624, + 0.0048718117, + -0.014064172, + -0.036988426, + 0.027667416, + 0.0422569, + 0.04806283, + 0.01843529, + -0.025697526, + -0.0524962, + -0.020671658, + 0.07923146, + 0.08527786, + 0.028903358, + 0.026692472, + 0.01747058, + -0.015024007, + 0.0016035172, + 0.057610784, + -0.031230353, + 0.06121582, + -0.047109988, + -0.03725349, + 0.01860743, + 0.019578215, + -0.0025576772, + -0.0060827793, + 0.054300606, + 0.057380572, + -0.035506696, + 0.032013237, + -0.022982, + -0.08711582, + 0.026141228, + 0.021207755, + -0.028961299, + 0.00062547013, + -0.024462542, + -0.043661416, + 0.035253577, + 0.009077339, + -0.014111102, + 0.0058460566, + -0.019649502, + 0.044755884, + -0.0044299113, + -0.037719697, + -0.012573531, + -0.057711683, + -0.047507294, + -0.0704702, + 0.05821025, + 0.023852421, + 0.0023238708, + 0.059958983, + 0.045650728, + 0.0035823798, + 0.021182124, + 0.06536029, + 0.0023902277, + -0.026674217, + 0.0002469645, + 0.0020064032, + -0.06034399, + 0.040017728, + -0.049678437, + -0.0032678086, + -0.033326782, + 0.017452622, + -0.026135415, + -0.004004807, + -0.029187452, + 0.008761656, + -0.04633237, + -0.031040203, + 0.03361154, + 0.03364455, + 0.016584601, + 0.033674356, + 0.012560564, + -0.0359252, + -0.018261429, + -0.0010633499, + 0.048224416, + -0.05129638, + -0.055718843, + 0.016412761, + 0.019934708, + 0.014391434, + 0.0043129087, + 0.016390469, + -0.009737628, + -0.047240984, + -0.027559847, + 0.055247765, + -0.03220373, + -0.016151046, + 0.0485871, + -0.037485205, + -0.01835451, + -0.01517561, + 0.004869981, + -0.01780359, + -0.015432582, + -0.009408715, + -0.0071832985, + -0.029855747, + -0.012426293, + 0.005129185, + 0.025689391, + -0.06732369, + -0.04262489, + -0.014908167, + -0.05464126, + 0.0047209524, + 0.003995236, + 0.032822587, + -0.052573748, + 0.0352204, + 0.09358622, + -0.02966806, + 0.046852604, + -0.042644933, + -0.023728022, + 0.04067723, + 0.027035205, + -0.014150344, + 0.0060548745, + 0.007615636, + -0.06135294, + 0.038593236, + 0.0020092153, + 0.0008044259, + -0.03532518, + -0.025208732, + -0.057940982, + 0.063368574, + -0.03239539, + 0.042998813, + 0.005380122, + -0.025621908, + 0.02933094, + 0.060402885, + 0.06707255, + -0.06290247, + 0.0044211885, + -0.034580726, + 0.018173682, + -0.014258836, + -0.0009336827, + -0.045159176, + -0.000609831, + 0.046511274, + 0.09704431, + 0.017784506, + -0.04735181, + 0.042557452, + -0.0006873186, + 0.0061028055, + -0.033874914, + 0.040295046, + 0.06600115, + 0.00991167, + -0.04475665, + 0.05955679, + 0.05559941, + -0.0021201232, + 0.008088177, + 0.0036764112, + 0.002953009, + 0.06759343, + -0.009915477, + -0.052873727, + -0.009668077, + 0.002044497, + -0.00063458836, + -0.03656217, + 0.054652866, + 0.03798574, + 0.056606956, + -0.007915265, + 0.0013049815, + -0.09499897, + -0.0070800385, + 0.0244362, + -0.012560818, + -0.0042640534, + -0.022324111, + 0.0035668353, + 0.053489763, + -0.0023222228, + -0.01696316, + -0.04065025, + -0.02098738, + 0.0114039155, + -0.016950222, + -0.007028829, + -0.022667225, + 0.02366999, + -0.05761968, + 0.025501445, + -0.06229779, + -0.050604578, + -0.06865873, + -0.024909278, + -0.03078067, + 0.017422339, + -0.04470559, + 0.02937445, + -0.0016233833, + -0.02238118, + -0.020390697, + 0.000878372, + 0.046922233, + -0.023016753, + 0.017631982, + 0.03728526, + 0.048234653, + -0.03094375, + 0.0164381, + 0.026422715, + 0.049812343, + -0.040939927, + -0.054622803, + -0.03708105, + 0.035311334, + 0.02719904, + 0.07242579, + 0.00034508843, + 0.036894504, + -0.04266779, + -0.070187844, + -0.051377587, + -0.007023316, + 0.057383943, + -0.018449614, + -0.020260822, + 0.0012650142, + -0.0075096413, + -0.0052665956, + 0.011430787, + -0.053528212, + 0.032891087, + 0.014585182, + 0.022210846, + 0.023262084, + -0.05662875, + 0.050923083, + -0.042420305, + 0.0149962185, + -0.031335566, + -0.025867553, + -0.0785983, + 0.009070857, + 0.020916311, + 0.049653318, + -0.0062730005, + 0.04681294, + 0.0012068546, + -0.03855772, + -0.035257522, + 0.04051459, + 0.04250193, + -0.045821767, + -0.005271129, + -0.007447701, + -0.043520868, + 0.07666238, + -0.009431352, + 0.010825085, + 0.004938816, + 0.07231181, + 0.0627917, + -0.0001364236, + 0.016336551, + -0.0049293903, + 0.0138295395, + -0.023893986, + -0.044587392, + -0.006986627, + -0.05745243, + -0.031931262 + ], + "index": 0, + "object": "embedding" + } + ], + "model": "nomic-embed-text:137m-v1.5-fp16", + "object": "list", + "usage": { + "prompt_tokens": 21, + "total_tokens": 21 + } + } + }, + "is_streaming": false + } +} diff --git a/tests/integration/recordings/responses/47264d05c3ef.json b/tests/integration/recordings/responses/47264d05c3ef.json new file mode 100644 index 000000000..5534a925a --- /dev/null +++ b/tests/integration/recordings/responses/47264d05c3ef.json @@ -0,0 +1,806 @@ +{ + "request": { + "method": "POST", + "url": "http://0.0.0.0:11434/v1/v1/embeddings", + "headers": {}, + "body": { + "model": "nomic-embed-text:137m-v1.5-fp16", + "input": [ + "machine learning and artificial intelligence" + ], + "encoding_format": "float" + }, + "endpoint": "/v1/embeddings", + "model": "nomic-embed-text:137m-v1.5-fp16" + }, + "response": { + "body": { + "__type__": "openai.types.create_embedding_response.CreateEmbeddingResponse", + "__data__": { + "data": [ + { + "embedding": [ + -0.0055676526, + 0.037607595, + -0.14074987, + -0.002804985, + 0.07148354, + 0.025361888, + -0.006617389, + -0.008432862, + -0.027677476, + 0.033805065, + 0.012552972, + 0.041450765, + 0.13947411, + 0.04415726, + -0.018268242, + -0.010596744, + -0.05406684, + -0.023316454, + -0.01917343, + -0.007486475, + -0.008004426, + 0.025822539, + 0.015411618, + 0.018916113, + 0.07705309, + 0.0058656926, + -0.058034655, + -0.007960976, + 0.014135634, + 0.034185696, + 0.025762286, + -0.041148923, + 0.020820145, + -0.0036934123, + -0.059696127, + -0.048285812, + 0.09696554, + -0.006299937, + 0.02855948, + 0.036708932, + 0.004418546, + 0.033692554, + 0.00014569695, + -0.004598071, + 0.058664955, + 0.04386636, + -0.014703874, + -0.040981304, + 0.070256576, + -0.01631749, + 0.04358505, + -0.01474905, + 0.0053627864, + 0.020751968, + 0.076655865, + 0.011587456, + -0.026259147, + 0.0043378496, + 0.03386068, + -0.060910884, + 0.13739845, + 0.028939046, + -0.042746805, + 0.07966744, + 0.031755112, + -0.0031926725, + -0.0021385243, + 0.023516048, + 0.011488332, + 0.005949599, + -0.001006356, + -0.021689167, + 0.03777627, + 0.033713214, + -0.025795706, + -0.015380865, + -0.019959806, + -0.010755837, + -0.02877149, + 0.084691174, + 0.05146873, + -0.04077167, + 0.032549243, + -0.006378473, + 0.035918225, + -0.0093235485, + -0.08135541, + -0.01730062, + -0.010902666, + 0.10651181, + 0.02412386, + 0.03772865, + 0.05793197, + 0.011357906, + -0.010912312, + 0.0039970484, + -0.056139898, + 0.0001663857, + -0.049092147, + -0.03757449, + -0.06084076, + 0.021710595, + 0.016426036, + -0.046211846, + 0.047347162, + 0.021834597, + 0.0008032862, + -0.039862543, + -0.013690757, + 0.02270945, + -0.00546203, + 0.05374652, + -0.02116721, + -0.006679464, + -0.051961154, + -0.051756233, + -0.010277374, + -0.004740697, + 0.03921549, + 0.012441582, + 0.00071372476, + -0.04694471, + -0.008488195, + 0.005572887, + -0.012411736, + 0.043588247, + -0.049042385, + 0.024810083, + -0.011161265, + -0.04244215, + 0.039098956, + -0.0327504, + -0.02049274, + -0.006234103, + -0.025615763, + 0.0863854, + -0.053460903, + -0.05029799, + 0.035151068, + 0.037194397, + 0.01927741, + 0.024714334, + -0.0025672915, + -0.0139264995, + -0.026953243, + -0.024757806, + 0.027785258, + 0.029920481, + -0.09716015, + 0.030207563, + 0.00088082976, + 0.052972272, + -0.028489286, + -0.013131309, + 0.022434616, + 0.00065314706, + -0.055729564, + -0.0057886294, + 0.038754933, + -0.012502802, + 0.033816766, + -0.026282853, + -0.023173656, + 0.028089669, + -0.0050990237, + -0.0082897, + 0.026175315, + 0.0375448, + 0.027376607, + 0.020405287, + -0.043161266, + 0.0006997121, + 0.00033588792, + 0.014482382, + 0.062248748, + 0.009971126, + -0.017957326, + -0.083549835, + 0.04807994, + -0.050247118, + 0.031104453, + -0.04614943, + 0.02402854, + 0.03376869, + -0.0019501477, + -0.036129188, + -0.039748054, + -0.0029756199, + -0.03683378, + -0.030606419, + -0.020958807, + 0.021332651, + -0.020598978, + -0.042064365, + -0.054918192, + -0.00901248, + 0.022193708, + 0.009651182, + 0.01736177, + -0.034221455, + -0.0044257627, + -0.03959286, + -0.056846857, + -0.023341974, + -0.036591545, + 0.05263008, + 0.027988793, + 0.00053739984, + -0.017889682, + 0.00032725866, + 0.05651838, + 0.03722038, + 0.021961791, + -0.015104896, + -0.027406182, + -0.0062658424, + -0.0077742916, + -0.04878277, + 0.013014594, + -0.029580545, + 0.053123508, + -0.0060568117, + 0.02311685, + -0.017863069, + 0.0057518133, + 0.013460052, + -0.034497164, + -0.009695958, + -0.054542456, + 0.03457276, + -0.019900212, + -0.04496697, + 0.07930227, + 0.00061430456, + 0.030719148, + 0.020608494, + 0.017646661, + 0.055049658, + 0.008732203, + 0.035740122, + -0.022534488, + 0.057636857, + -0.02430445, + 0.011238781, + -0.056625325, + -0.031212583, + 0.010821367, + -0.042455893, + 0.019988628, + 0.025999557, + -0.02078072, + 0.027336553, + -0.032524664, + 0.019674964, + 0.004634663, + -0.027575325, + 0.006920462, + 0.00849185, + 0.0072606583, + 0.010830559, + 0.04373721, + -0.041281823, + 0.034703884, + -0.0070332997, + 0.02627788, + -0.008117525, + -0.0050063096, + 0.0006726745, + 0.013789757, + 0.007871836, + 0.020251142, + 0.023514729, + 0.04301568, + -0.001550706, + -0.006054088, + 0.029966662, + -0.004359033, + -0.028079243, + -0.013859538, + -0.017065715, + -0.056285594, + -0.030364485, + -0.067502774, + -0.028567376, + -0.0036689844, + 0.013287284, + 0.014196438, + 0.02717507, + 0.01529897, + 0.04067955, + 0.021112315, + 0.017248038, + -0.024668692, + -0.007050553, + -0.02688864, + 0.038015496, + 0.03523187, + 0.03283678, + 0.037456103, + -0.045826677, + 0.032901708, + -0.00715299, + 0.0734337, + 0.0036020123, + 0.050221503, + -0.022508303, + -0.0161466, + -0.014337791, + 0.039818697, + 0.012658511, + -0.06732133, + 0.0023105624, + 0.013785315, + 0.005420772, + 0.0023928639, + -0.010279525, + -0.042494286, + 0.019604988, + 0.0419654, + 0.010014578, + 0.0131692225, + -0.08502757, + -0.06022765, + -0.012788984, + 0.029492218, + 0.07531082, + -0.0014149746, + 0.015584036, + -0.04072224, + -0.035372414, + 0.015036397, + 0.023529893, + 0.018885048, + -0.022172105, + -0.06258309, + -0.003607014, + 0.028332703, + 0.0071907504, + -0.012343301, + 0.023307528, + 0.057685107, + -0.0027828452, + 0.004447051, + -0.01735233, + -0.016245272, + 0.013801741, + -0.0029756557, + -0.013213782, + 0.015396319, + -0.010235075, + -0.03276548, + 0.021457301, + 0.023885816, + 0.004579841, + 0.036322046, + 0.0031928096, + 0.017268742, + 0.06310177, + 0.044325467, + -0.007820684, + 0.027840687, + -0.055998452, + 0.015811397, + -0.027679825, + -0.01689621, + -0.015704138, + 0.02220624, + 0.0036319862, + 0.016407188, + -0.0028235482, + 0.05849856, + -0.008090543, + -0.0037728718, + 0.06077582, + -0.027032267, + 0.018484741, + -0.055906855, + -0.04504379, + -0.03492977, + -0.019317614, + -0.041188404, + 0.030125722, + -0.025321875, + 0.006913241, + 0.038495496, + -0.012324868, + 0.0005036001, + -0.040139947, + -0.0061344374, + 0.0005219825, + -0.018869184, + -0.014752749, + -0.07595433, + -0.018194932, + 0.012401524, + -0.027864115, + 0.006789087, + -0.009565956, + 0.015790598, + 0.046612665, + -0.04252712, + -0.021846049, + -0.005723392, + -0.048730128, + -0.015873676, + -0.011065935, + -0.047783904, + -0.03550279, + 0.06778763, + 0.020498566, + 0.024177074, + 0.01025881, + 7.263766e-06, + -0.06263741, + 0.024666198, + -0.05690874, + 0.021188669, + 0.017749513, + -0.05817258, + 0.010562816, + 0.030943366, + 0.0007343872, + -0.016273286, + 0.00787693, + -0.036151744, + 0.014707449, + 0.01039333, + 0.050455544, + 0.004762857, + -0.040837612, + 0.063730456, + -0.017636815, + -0.025875637, + -0.034493577, + -0.00932124, + 0.045578275, + 0.0021959038, + 0.02683857, + 0.020068243, + 0.02964936, + 0.03125028, + -0.03228684, + -0.03409907, + -0.018953461, + 0.032556947, + 0.121822715, + 0.04707043, + -0.020557143, + -0.07898298, + 0.03803513, + 0.009371626, + 0.011706999, + 0.023257945, + 0.0077813817, + 0.06505699, + -0.022636045, + -0.01171062, + 0.030803725, + 0.03876063, + 0.038833153, + 0.011656127, + 0.031124521, + -0.06297426, + 0.020178674, + -0.022308672, + -0.012454079, + -0.0018501335, + -0.025267268, + 0.03139099, + 0.06506641, + -0.006600023, + 0.03257224, + 0.038939405, + -0.03932672, + -0.011354874, + 0.013061634, + -0.025645908, + -0.03807022, + 0.031546343, + 0.054272447, + 0.0042550326, + -0.06261923, + -0.007274197, + -0.03840224, + -0.013757855, + 0.03581693, + -0.0064127482, + 0.02441153, + 0.0042232205, + -0.03191279, + 0.043696977, + 0.008361217, + 0.01741963, + -0.04443982, + -0.07408706, + -0.0302928, + -0.10016659, + 0.025746375, + 0.01681544, + 0.008698005, + -0.0004667209, + 0.0087767, + -0.021100726, + 0.003711238, + -0.023373105, + -0.01503881, + 0.04967642, + -0.0930721, + -0.046552327, + 0.09804994, + -0.013835043, + -0.0037497964, + 0.039764475, + 0.033894103, + 0.0012048046, + -0.037988536, + 0.041074146, + 0.04235108, + -0.08400901, + -0.018685354, + 0.07228467, + -0.010743437, + 0.010808383, + 0.009577177, + -0.033949137, + -0.006326134, + 0.026234496, + -0.041013833, + 0.038343027, + 0.00084823865, + 0.02851006, + 0.0077916514, + -0.030147677, + -0.027760647, + 0.004643397, + 0.005053343, + -0.008941861, + -0.026913425, + 0.042983938, + 0.01717477, + 0.0663102, + -0.0019370201, + 0.003287294, + -0.03727856, + 0.0035034667, + -0.013155771, + -0.007892782, + 0.041945223, + -0.0030665628, + -0.094774075, + 0.034818046, + -0.036818203, + -0.0029307893, + -0.00884741, + -0.00743541, + -0.009145366, + -0.021448582, + -0.042497415, + -0.006537858, + 0.0023786393, + -0.03640427, + 0.0031237768, + 0.06756371, + -0.015007449, + -0.045269705, + 0.025938397, + -0.0102713555, + -0.02172098, + 0.0008311765, + 0.032281272, + 0.028380793, + -0.055843204, + 0.0016028135, + 0.008903928, + 0.0085764015, + -0.014910333, + -0.014104748, + -0.018106278, + -0.037222672, + -0.022182018, + 0.08024584, + -0.06451804, + -0.02075624, + 0.020843761, + 0.03523371, + 0.012193457, + -0.05703897, + -0.0013516175, + 0.04106061, + -0.06275497, + -0.018204994, + 0.02172471, + -0.014526833, + -0.054614007, + -0.04518983, + 0.016957235, + -0.023265226, + -0.027596308, + -0.023523336, + -0.059039053, + 0.0041685067, + -0.039938442, + 0.04669978, + -0.0063979127, + 0.020483416, + 0.027639873, + -0.01206512, + 0.051813617, + 0.049028568, + 0.0068901125, + -0.035108544, + -0.011231821, + -0.014607724, + 0.014760893, + 0.055028442, + -0.035556052, + 0.042438332, + -0.093893364, + -0.087567605, + -0.016325593, + -0.052629195, + -0.07636775, + 0.032836746, + -0.015486794, + 0.052163288, + -0.0035887335, + 0.0029697292, + -0.015571485, + 0.016206617, + 0.06955324, + -0.018355895, + 0.051770963, + 0.016798811, + -0.04840591, + -0.027142415, + 0.007742883, + -0.01505668, + 0.01949886, + 0.027084991, + 0.07451987, + 0.01707506, + -0.009305742, + -0.031197278, + 0.034334995, + 0.03400155, + -0.023167107, + 0.041818704, + 0.08864219, + -0.010490497, + -0.015371323, + 0.039439347, + 0.041599363, + 0.010343794, + -0.031765327, + -0.043507814, + 0.046278544, + 0.0073079155, + -0.012219337, + 0.009139992, + -0.02176212, + -0.021882698, + 0.0134527, + 0.0050208997, + -0.008423276, + 0.041090664, + -0.020635158, + -0.036146075, + 0.01049579, + -0.079392806, + -0.06501304, + 0.0335013, + -0.012802067, + 0.024089638, + -0.04123427, + -0.005093254, + 0.04965449, + 0.01900141, + 0.02468455, + -0.026793627, + -0.00853688, + -0.026478257, + -0.021256402, + 0.019811329, + -0.02736609, + 0.0008755891, + -0.03280057, + 0.05230071, + -0.024271186, + 0.017648304, + -0.07038161, + -0.024559036, + -0.07172936, + -0.01706447, + -0.006269835, + -0.014418907, + 0.033071198, + -0.039413814, + 0.028617091, + 0.05658568, + 0.0631377, + -0.011613074, + 0.045226514, + 0.03267759, + 0.04698377, + -0.054020163, + 0.004418562, + 0.007869039, + 0.03307921, + -0.01226311, + -0.021438342, + -0.015542127, + 0.017207818, + -0.023682194, + 0.08018181, + -0.022875395, + -0.01348799, + -0.028109841, + -0.0451768, + -0.023686612, + 0.040311582, + 0.04083543, + -0.03210762, + -0.03917693, + -0.017097685, + -0.036972158, + -0.04078481, + 0.02192485, + -0.026830912, + -0.011077901, + 0.0045215045, + 0.023708722, + -0.024511881, + -0.048116196, + 0.005063682, + -0.0072107734, + 0.019443877, + -0.056393813, + -0.018381938, + -0.046558794, + 0.011450821, + -0.010548083, + 0.0033412941, + 0.04300793, + 0.023570552, + 0.011047298, + -0.025875632, + -0.013352994, + 0.05174488, + 0.021105226, + -0.01785354, + -0.0063682324, + 0.01556173, + -0.05248805, + 0.01078658, + -0.017563447, + 0.038102563, + -0.030159717, + 0.07094031, + 0.12957932, + -0.009026436, + 0.038504194, + -0.058084693, + 0.01352246, + -0.017025255, + -0.028957661, + 0.015611035, + -0.06158929, + -0.0005010816 + ], + "index": 0, + "object": "embedding" + } + ], + "model": "nomic-embed-text:137m-v1.5-fp16", + "object": "list", + "usage": { + "prompt_tokens": 5, + "total_tokens": 5 + } + } + }, + "is_streaming": false + } +} diff --git a/tests/integration/recordings/responses/48d2fb183a2a.json b/tests/integration/recordings/responses/48d2fb183a2a.json deleted file mode 100644 index 1b5ee286c..000000000 --- a/tests/integration/recordings/responses/48d2fb183a2a.json +++ /dev/null @@ -1,86 +0,0 @@ -{ - "request": { - "method": "POST", - "url": "http://localhost:11434/api/generate", - "headers": {}, - "body": { - "model": "llama3.2:3b-instruct-fp16", - "raw": true, - "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant. Michael Jordan was born in 1963. He played basketball for the Chicago Bulls for 15 seasons.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nPlease give me information about Michael Jordan.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nPlease respond in JSON format with the schema: {\"$defs\": {\"NBAStats\": {\"properties\": {\"year_for_draft\": {\"title\": \"Year For Draft\", \"type\": \"integer\"}, \"num_seasons_in_nba\": {\"title\": \"Num Seasons In Nba\", \"type\": \"integer\"}}, \"required\": [\"year_for_draft\", \"num_seasons_in_nba\"], \"title\": \"NBAStats\", \"type\": \"object\"}}, \"properties\": {\"first_name\": {\"title\": \"First Name\", \"type\": \"string\"}, \"last_name\": {\"title\": \"Last Name\", \"type\": \"string\"}, \"year_of_birth\": {\"title\": \"Year Of Birth\", \"type\": \"integer\"}, \"nba_stats\": {\"$ref\": \"#/$defs/NBAStats\"}}, \"required\": [\"first_name\", \"last_name\", \"year_of_birth\", \"nba_stats\"], \"title\": \"AnswerFormat\", \"type\": \"object\"}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", - "format": { - "$defs": { - "NBAStats": { - "properties": { - "year_for_draft": { - "title": "Year For Draft", - "type": "integer" - }, - "num_seasons_in_nba": { - "title": "Num Seasons In Nba", - "type": "integer" - } - }, - "required": [ - "year_for_draft", - "num_seasons_in_nba" - ], - "title": "NBAStats", - "type": "object" - } - }, - "properties": { - "first_name": { - "title": "First Name", - "type": "string" - }, - "last_name": { - "title": "Last Name", - "type": "string" - }, - "year_of_birth": { - "title": "Year Of Birth", - "type": "integer" - }, - "nba_stats": { - "$ref": "#/$defs/NBAStats" - } - }, - "required": [ - "first_name", - "last_name", - "year_of_birth", - "nba_stats" - ], - "title": "AnswerFormat", - "type": "object" - }, - "options": { - "temperature": 0.0 - }, - "stream": false - }, - "endpoint": "/api/generate", - "model": "llama3.2:3b-instruct-fp16" - }, - "response": { - "body": { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:40.283084Z", - "done": true, - "done_reason": "stop", - "total_duration": 2900042958, - "load_duration": 83372125, - "prompt_eval_count": 259, - "prompt_eval_duration": 352890750, - "eval_count": 60, - "eval_duration": 2462885208, - "response": "{\n \"first_name\": \"Michael\",\n \"last_name\": \"Jordan\",\n \"year_of_birth\": 1963,\n \"nba_stats\": {\n \"year_for_draft\": 1984,\n \"num_seasons_in_nba\": 15\n }\n}", - "thinking": null, - "context": null - } - }, - "is_streaming": false - } -} diff --git a/tests/integration/recordings/responses/4ebf08272d17.json b/tests/integration/recordings/responses/4ebf08272d17.json index 958d3ad9c..87cd4f5ca 100644 --- a/tests/integration/recordings/responses/4ebf08272d17.json +++ b/tests/integration/recordings/responses/4ebf08272d17.json @@ -21,7 +21,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -36,7 +36,7 @@ "logprobs": null } ], - "created": 1759267476, + "created": 1759282604, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -47,7 +47,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -62,7 +62,7 @@ "logprobs": null } ], - "created": 1759267476, + "created": 1759282605, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -73,7 +73,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -88,7 +88,7 @@ "logprobs": null } ], - "created": 1759267476, + "created": 1759282605, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -99,7 +99,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -114,7 +114,7 @@ "logprobs": null } ], - "created": 1759267476, + "created": 1759282605, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -125,7 +125,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -140,7 +140,7 @@ "logprobs": null } ], - "created": 1759267476, + "created": 1759282605, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -151,7 +151,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -166,7 +166,7 @@ "logprobs": null } ], - "created": 1759267476, + "created": 1759282605, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -177,7 +177,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -192,7 +192,7 @@ "logprobs": null } ], - "created": 1759267476, + "created": 1759282606, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -203,7 +203,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -218,7 +218,7 @@ "logprobs": null } ], - "created": 1759267476, + "created": 1759282606, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -229,7 +229,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -244,7 +244,7 @@ "logprobs": null } ], - "created": 1759267476, + "created": 1759282606, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -255,7 +255,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -270,7 +270,7 @@ "logprobs": null } ], - "created": 1759267477, + "created": 1759282606, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -281,7 +281,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -296,7 +296,7 @@ "logprobs": null } ], - "created": 1759267477, + "created": 1759282606, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -307,7 +307,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -322,7 +322,7 @@ "logprobs": null } ], - "created": 1759267477, + "created": 1759282607, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -333,7 +333,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -348,7 +348,7 @@ "logprobs": null } ], - "created": 1759267477, + "created": 1759282607, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -359,7 +359,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -374,7 +374,7 @@ "logprobs": null } ], - "created": 1759267477, + "created": 1759282607, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -385,7 +385,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -400,7 +400,7 @@ "logprobs": null } ], - "created": 1759267477, + "created": 1759282607, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -411,11 +411,11 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { - "content": " suggest", + "content": " give", "function_call": null, "refusal": null, "role": "assistant", @@ -426,7 +426,7 @@ "logprobs": null } ], - "created": 1759267477, + "created": 1759282608, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -437,85 +437,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " some", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267477, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " ways", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267477, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " for", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267477, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -530,7 +452,7 @@ "logprobs": null } ], - "created": 1759267477, + "created": 1759282608, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -541,11 +463,11 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { - "content": " to", + "content": " an", "function_call": null, "refusal": null, "role": "assistant", @@ -556,7 +478,7 @@ "logprobs": null } ], - "created": 1759267477, + "created": 1759282608, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -567,11 +489,11 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { - "content": " find", + "content": " overview", "function_call": null, "refusal": null, "role": "assistant", @@ -582,7 +504,7 @@ "logprobs": null } ], - "created": 1759267477, + "created": 1759282608, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -593,11 +515,11 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { - "content": " out", + "content": " of", "function_call": null, "refusal": null, "role": "assistant", @@ -608,7 +530,7 @@ "logprobs": null } ], - "created": 1759267477, + "created": 1759282608, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -619,111 +541,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " the", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267477, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " current", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267477, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " weather", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267477, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " in", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267478, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -738,7 +556,7 @@ "logprobs": null } ], - "created": 1759267478, + "created": 1759282608, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -749,1983 +567,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": ":\n\n", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267478, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": "1", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267478, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": ".", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267478, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " Check", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267478, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " online", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267478, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " weather", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267478, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " websites", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267478, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": ":", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267478, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " You", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267478, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " can", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267478, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " check", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267478, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " websites", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267478, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " like", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267478, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " Acc", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267478, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": "u", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267478, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": "Weather", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267479, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": ",", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267479, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " Weather", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267479, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": ".com", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267479, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": ",", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267479, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " or", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267479, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " Japan", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267479, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " Meteor", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267479, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": "ological", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267479, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " Agency", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267479, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " (", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267479, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": "J", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267479, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": "MA", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267479, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": ")", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267479, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " for", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267479, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " the", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267479, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " current", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267480, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " weather", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267480, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " conditions", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267480, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " and", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267480, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " forecast", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267480, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " in", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267480, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " Tokyo", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267480, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": ".\n", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267480, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": "2", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267480, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": ".", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267480, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " Use", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267480, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " a", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267480, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " mobile", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267480, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " app", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267480, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": ":", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267480, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " There", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267480, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " are", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267480, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " many", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267481, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " mobile", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267481, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " apps", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267481, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " available", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267481, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " that", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267481, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " provide", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267481, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " real", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267481, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": "-time", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267481, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " weather", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267481, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " information", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267481, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": ",", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267481, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " such", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267481, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " as", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267481, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " Dark", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267481, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " Sky", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267481, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": ",", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267481, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " Weather", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267482, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " Underground", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267482, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": ",", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267482, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " or", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267482, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " Japan", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267482, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": "-based", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267482, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " apps", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267482, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " like", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267482, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " Japan", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267482, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " Meteor", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267482, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": "ological", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267482, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " Corporation", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267482, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -2740,7 +582,7 @@ "logprobs": null } ], - "created": 1759267482, + "created": 1759282609, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -2751,11 +593,11 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { - "content": " (", + "content": " typical", "function_call": null, "refusal": null, "role": "assistant", @@ -2766,7 +608,7 @@ "logprobs": null } ], - "created": 1759267482, + "created": 1759282609, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -2777,995 +619,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": "JM", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267482, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": "Cor", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267482, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": "ps", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267482, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": ")", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267483, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " Weather", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267483, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " App", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267483, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": ".\n", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267483, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": "3", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267483, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": ".", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267483, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " Check", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267483, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " social", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267483, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " media", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267483, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": ":", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267483, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " Many", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267483, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " airlines", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267483, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": ",", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267483, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " airports", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267483, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": ",", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267483, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " and", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267483, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " tourist", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267483, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " attractions", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267484, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " also", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267484, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " share", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267484, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " the", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267484, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " current", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267484, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " weather", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267484, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " conditions", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267484, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " on", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267484, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " their", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267484, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " social", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267484, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " media", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267484, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " accounts", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267484, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": ".\n\n", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267484, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": "Please", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267484, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " note", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267484, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " that", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267484, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " Tokyo", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267484, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": "'s", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267485, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -3780,7 +634,7 @@ "logprobs": null } ], - "created": 1759267485, + "created": 1759282609, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -3791,11 +645,11 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { - "content": " is", + "content": " and", "function_call": null, "refusal": null, "role": "assistant", @@ -3806,7 +660,7 @@ "logprobs": null } ], - "created": 1759267485, + "created": 1759282609, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -3817,7 +671,449 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " suggest", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282609, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " some", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282610, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " resources", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282610, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " where", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282610, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " you", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282610, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " can", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282610, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " find", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282611, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " up", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282611, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "-to", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282611, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "-date", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282611, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " weather", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282611, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " forecasts", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282612, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": ".\n\n", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282612, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "Tok", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282612, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "yo", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282612, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " has", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282613, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " a", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282613, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -3832,7 +1128,7 @@ "logprobs": null } ], - "created": 1759267485, + "created": 1759282613, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -3843,7 +1139,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -3858,7 +1154,7 @@ "logprobs": null } ], - "created": 1759267485, + "created": 1759282613, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -3869,7 +1165,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -3884,7 +1180,7 @@ "logprobs": null } ], - "created": 1759267485, + "created": 1759282613, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -3895,7 +1191,33 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " climate", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282614, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -3910,7 +1232,7 @@ "logprobs": null } ], - "created": 1759267485, + "created": 1759282614, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -3921,7 +1243,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -3936,7 +1258,7 @@ "logprobs": null } ], - "created": 1759267485, + "created": 1759282614, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -3947,7 +1269,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -3962,7 +1284,7 @@ "logprobs": null } ], - "created": 1759267485, + "created": 1759282614, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -3973,7 +1295,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -3988,7 +1310,7 @@ "logprobs": null } ], - "created": 1759267485, + "created": 1759282614, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -3999,7 +1321,293 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": ".", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282614, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " Here", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282615, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "'s", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282615, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " a", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282615, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " general", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282615, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " idea", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282616, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " of", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282616, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " what", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282616, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " you", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282616, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " can", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282616, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " expect", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282616, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -4014,7 +1622,7 @@ "logprobs": null } ], - "created": 1759267485, + "created": 1759282617, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -4025,11 +1633,11 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { - "content": "-", + "content": "*", "function_call": null, "refusal": null, "role": "assistant", @@ -4040,7 +1648,7 @@ "logprobs": null } ], - "created": 1759267485, + "created": 1759282617, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -4051,813 +1659,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " Winter", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267485, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " (", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267485, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": "December", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267485, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " to", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267485, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " February", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267485, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": "):", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267486, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " Mild", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267486, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " temperatures", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267486, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": ",", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267486, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " with", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267486, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " average", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267486, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " highs", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267486, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " around", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267486, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " ", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267486, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": "9", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267486, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": "\u00b0C", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267486, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " (", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267486, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": "48", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267486, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": "\u00b0F", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267486, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": ")", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267486, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " and", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267486, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " lows", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267486, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " around", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267487, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " -", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267487, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": "2", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267487, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": "\u00b0C", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267487, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " (", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267487, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": "28", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267487, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": "\u00b0F", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267487, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": ").\n", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267487, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": "-", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267487, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -4872,7 +1674,7 @@ "logprobs": null } ], - "created": 1759267487, + "created": 1759282617, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -4883,7 +1685,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -4898,7 +1700,7 @@ "logprobs": null } ], - "created": 1759267487, + "created": 1759282617, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -4909,7 +1711,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -4924,7 +1726,7 @@ "logprobs": null } ], - "created": 1759267487, + "created": 1759282617, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -4935,7 +1737,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -4950,7 +1752,7 @@ "logprobs": null } ], - "created": 1759267487, + "created": 1759282618, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -4961,7 +1763,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -4976,7 +1778,7 @@ "logprobs": null } ], - "created": 1759267487, + "created": 1759282618, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -4987,7 +1789,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -5002,7 +1804,7 @@ "logprobs": null } ], - "created": 1759267487, + "created": 1759282618, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -5013,11 +1815,11 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { - "content": " Cool", + "content": " Mild", "function_call": null, "refusal": null, "role": "assistant", @@ -5028,7 +1830,7 @@ "logprobs": null } ], - "created": 1759267487, + "created": 1759282618, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -5039,11 +1841,11 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { - "content": " temperature", + "content": " temperatures", "function_call": null, "refusal": null, "role": "assistant", @@ -5054,7 +1856,7 @@ "logprobs": null } ], - "created": 1759267487, + "created": 1759282618, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -5065,11 +1867,11 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { - "content": ",", + "content": " ranging", "function_call": null, "refusal": null, "role": "assistant", @@ -5080,7 +1882,7 @@ "logprobs": null } ], - "created": 1759267488, + "created": 1759282619, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -5091,11 +1893,11 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { - "content": " with", + "content": " from", "function_call": null, "refusal": null, "role": "assistant", @@ -5106,7 +1908,7 @@ "logprobs": null } ], - "created": 1759267488, + "created": 1759282619, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -5117,85 +1919,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " average", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267488, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " highs", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267488, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " around", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267488, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -5210,7 +1934,7 @@ "logprobs": null } ], - "created": 1759267488, + "created": 1759282619, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -5221,11 +1945,11 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { - "content": "18", + "content": "10", "function_call": null, "refusal": null, "role": "assistant", @@ -5236,7 +1960,7 @@ "logprobs": null } ], - "created": 1759267488, + "created": 1759282619, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -5247,7 +1971,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -5262,7 +1986,7 @@ "logprobs": null } ], - "created": 1759267488, + "created": 1759282619, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -5273,475 +1997,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " (", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267488, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": "64", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267488, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": "\u00b0F", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267488, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": ")", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267488, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " and", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267488, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " lows", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267488, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " around", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267488, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " ", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267488, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": "8", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267488, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": "\u00b0C", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267489, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " (", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267489, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": "46", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267489, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": "\u00b0F", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267489, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": ").\n", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267489, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": "-", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267489, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " Summer", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267489, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": " (", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267489, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", - "choices": [ - { - "delta": { - "content": "June", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1759267489, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -5756,7 +2012,7 @@ "logprobs": null } ], - "created": 1759267489, + "created": 1759282620, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -5767,7 +2023,709 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " ", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282620, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "20", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282620, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "\u00b0C", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282620, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " (", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282620, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "50", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282621, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "\u00b0F", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282621, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " to", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282621, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " ", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282621, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "68", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282621, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "\u00b0F", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282622, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": ").", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282622, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " Cherry", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282622, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " bloss", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282622, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "oms", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282622, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " bloom", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282623, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " in", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282623, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " late", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282623, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " March", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282623, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " to", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282623, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " early", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282624, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " April", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282624, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": ".\n", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282624, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "*", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282624, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " Summer", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282624, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " (", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282625, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "June", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282625, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " to", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282625, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -5782,7 +2740,7 @@ "logprobs": null } ], - "created": 1759267489, + "created": 1759282625, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -5793,7 +2751,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -5808,7 +2766,7 @@ "logprobs": null } ], - "created": 1759267489, + "created": 1759282625, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -5819,7 +2777,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -5834,7 +2792,7 @@ "logprobs": null } ], - "created": 1759267489, + "created": 1759282626, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -5845,7 +2803,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -5860,7 +2818,7 @@ "logprobs": null } ], - "created": 1759267489, + "created": 1759282626, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -5871,7 +2829,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -5886,7 +2844,7 @@ "logprobs": null } ], - "created": 1759267489, + "created": 1759282626, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -5897,7 +2855,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -5912,7 +2870,7 @@ "logprobs": null } ], - "created": 1759267489, + "created": 1759282626, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -5923,7 +2881,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -5938,7 +2896,7 @@ "logprobs": null } ], - "created": 1759267489, + "created": 1759282626, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -5949,7 +2907,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -5964,7 +2922,7 @@ "logprobs": null } ], - "created": 1759267490, + "created": 1759282627, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -5975,7 +2933,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -5990,7 +2948,7 @@ "logprobs": null } ], - "created": 1759267490, + "created": 1759282627, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -6001,7 +2959,4271 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-359", + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " around", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282627, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " ", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282627, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "30", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282627, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "\u00b0C", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282628, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " (", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282628, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "86", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282628, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "\u00b0F", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282628, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": ").\n", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282628, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "*", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282629, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " Autumn", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282629, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " (", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282629, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "September", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282629, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " to", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282629, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " November", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282630, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "):", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282630, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " Comfort", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282630, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "able", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282630, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " temperatures", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282630, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " between", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282631, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " ", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282631, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "10", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282631, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "\u00b0C", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282631, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " and", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282631, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " ", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282632, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "20", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282632, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "\u00b0C", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282632, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " (", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282632, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "50", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282632, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "\u00b0F", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282633, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " to", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282633, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " ", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282633, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "68", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282633, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "\u00b0F", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282633, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": ").", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282634, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " Leaves", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282634, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " change", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282634, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " colors", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282634, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " in", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282634, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " October", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282635, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": ".\n", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282635, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "*", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282635, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " Winter", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282635, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " (", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282635, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "December", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282636, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " to", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282636, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " February", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282636, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "):", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282636, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " Cool", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282636, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " temperatures", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282637, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " ranging", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282637, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " from", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282637, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " -", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282637, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "5", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282637, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "\u00b0C", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282638, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " to", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282638, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " ", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282638, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "10", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282638, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "\u00b0C", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282638, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " (", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282638, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "23", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282639, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "\u00b0F", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282639, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " to", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282639, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " ", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282639, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "50", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282639, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "\u00b0F", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282640, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": ").\n\n", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282640, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "For", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282640, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " more", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282640, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " accurate", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282640, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " and", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282641, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " up", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282641, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "-to", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282641, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "-date", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282641, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " information", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282641, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": ",", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282642, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " you", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282642, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " can", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282642, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " check", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282642, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " the", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282642, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " following", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282643, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " resources", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282643, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": ":\n\n", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282643, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "1", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282643, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": ".", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282643, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " Japan", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282644, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " Meteor", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282644, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "ological", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282644, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " Agency", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282644, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " (", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282644, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "J", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282645, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "MA", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282645, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "):", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282645, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " Provides", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282645, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " current", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282645, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " weather", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282646, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " conditions", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282646, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " and", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282646, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " forecasts", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282646, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " for", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282646, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " Tokyo", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282647, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": ".\n", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282647, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "2", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282647, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": ".", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282647, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " Acc", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282647, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "u", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282648, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "Weather", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282648, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": ":", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282648, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " Offers", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282648, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " detailed", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282648, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " forecasts", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282649, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": ",", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282649, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " including", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282649, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " current", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282649, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " temperature", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282649, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": ",", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282650, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " humidity", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282650, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": ",", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282650, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " wind", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282650, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " speed", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282650, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": ",", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282651, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " and", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282651, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " precipitation", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282651, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " chances", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282651, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": ".\n", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282651, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "3", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282652, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": ".", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282652, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " Weather", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282652, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": ".com", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282652, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": ":", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282652, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " Features", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282653, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " real", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282653, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "-time", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282653, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " weather", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282653, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " updates", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282653, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": ",", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282654, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " forecasts", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282654, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": ",", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282654, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " and", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282654, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " radar", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282654, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " imagery", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282655, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " for", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282655, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " Tokyo", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282655, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": ".\n\n", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282655, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": "Please", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282655, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " note", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282655, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " that", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282656, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " these", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282656, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " resources", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282656, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " might", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282656, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " require", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282657, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " you", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282657, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " to", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282657, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " log", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282657, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " in", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282657, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " or", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282658, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " accept", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282658, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " cookies", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282658, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " to", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282658, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " access", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282658, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " the", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282659, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " most", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282659, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " accurate", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282659, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": " information", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282659, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", + "choices": [ + { + "delta": { + "content": ".", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282659, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-945", "choices": [ { "delta": { @@ -6016,7 +7238,7 @@ "logprobs": null } ], - "created": 1759267490, + "created": 1759282660, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, diff --git a/tests/integration/recordings/responses/50340cd4d253.json b/tests/integration/recordings/responses/50340cd4d253.json index 3101fa9d8..8ffa6e124 100644 --- a/tests/integration/recordings/responses/50340cd4d253.json +++ b/tests/integration/recordings/responses/50340cd4d253.json @@ -20,15 +20,15 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama-guard3:1b", - "created_at": "2025-09-03T17:38:01.239743Z", + "created_at": "2025-09-30T17:39:23.766462922Z", "done": true, "done_reason": "stop", - "total_duration": 207264667, - "load_duration": 73437959, + "total_duration": 2859320770, + "load_duration": 60934847, "prompt_eval_count": 216, - "prompt_eval_duration": 121657333, + "prompt_eval_duration": 2749991822, "eval_count": 2, - "eval_duration": 11348417, + "eval_duration": 47816462, "response": "safe", "thinking": null, "context": null diff --git a/tests/integration/recordings/responses/5357765a9ac9.json b/tests/integration/recordings/responses/5357765a9ac9.json new file mode 100644 index 000000000..ce4bd773a --- /dev/null +++ b/tests/integration/recordings/responses/5357765a9ac9.json @@ -0,0 +1,806 @@ +{ + "request": { + "method": "POST", + "url": "http://0.0.0.0:11434/v1/v1/embeddings", + "headers": {}, + "body": { + "model": "nomic-embed-text:137m-v1.5-fp16", + "input": [ + "This is a test file 0" + ], + "encoding_format": "float" + }, + "endpoint": "/v1/embeddings", + "model": "nomic-embed-text:137m-v1.5-fp16" + }, + "response": { + "body": { + "__type__": "openai.types.create_embedding_response.CreateEmbeddingResponse", + "__data__": { + "data": [ + { + "embedding": [ + 0.06569889, + 0.0075979824, + -0.13355534, + -0.03087419, + 0.06887596, + 0.0022278922, + 0.030457113, + 0.029343065, + -0.041988637, + -0.085280016, + -0.030396713, + 0.038043153, + 0.025799021, + 0.0029713905, + -0.028386902, + -0.027477825, + 0.03623284, + -0.04154503, + 0.00551161, + -0.020107845, + 0.036813777, + -0.029126925, + -0.06819024, + -0.006683371, + 0.12236409, + -0.0008511646, + -0.022556255, + 0.051949136, + -0.07988408, + -0.032928497, + 0.06524479, + 0.0012762198, + -0.002292936, + -0.029198533, + -0.012377746, + -0.026174542, + 0.021895576, + 0.037113264, + 0.03436928, + 0.008258402, + -0.016730672, + -0.025307849, + 0.0068733217, + -0.0034135508, + 0.020250086, + 0.03329193, + 0.012187189, + 0.076113224, + -0.019928403, + 0.012776066, + 0.007209404, + -0.022850547, + -0.0030079158, + 0.01193757, + 0.02421511, + -0.014447408, + -0.03570278, + -0.0005199167, + -0.021498382, + -0.03273841, + 0.041634835, + 0.0357598, + -0.051809516, + 0.04717076, + 0.014142166, + -0.044218663, + -0.04686818, + 0.024508895, + 0.0016807343, + 0.03689631, + 0.06549316, + -0.011174818, + -0.021753127, + 0.0125305895, + -0.018603666, + -0.049111377, + -0.010490791, + -0.06439277, + -0.06457874, + -0.027793122, + 0.012108071, + 0.02228997, + 0.023145016, + 0.064356215, + 0.06162452, + -0.023461625, + -0.011763129, + -0.017237727, + 0.016087933, + 0.026915565, + 0.048432816, + 0.019608956, + 0.0446655, + -0.042998426, + -0.022571366, + -0.010334031, + 0.022279797, + 0.07883467, + -0.011191799, + -0.026524613, + 0.0013984819, + 0.005972282, + 0.027293874, + -0.02065833, + 0.0285912, + 0.049571536, + -0.020621926, + 0.008375827, + -0.04923765, + -0.010991332, + 0.0071697976, + 0.050934322, + -0.043111023, + -0.033160962, + -0.015131605, + -0.012539622, + 0.041305505, + -0.033541363, + -0.041694295, + 0.011190744, + 0.007084672, + 0.015450092, + 0.042311884, + 0.03940029, + 0.01701689, + 0.013807599, + -0.04999148, + 0.0504365, + 0.024707705, + -0.04813005, + -0.020354733, + 0.024809042, + -0.038834315, + -0.033733364, + 0.028245933, + 0.0424937, + -0.013269442, + -0.025089223, + -0.02546163, + 0.020151038, + -0.042214695, + 0.0058155754, + 0.02213424, + 0.017433757, + 0.05158181, + -0.02869754, + 0.04465606, + 0.012662332, + -0.028051574, + 0.015604842, + 0.050896738, + 0.007599799, + 0.006281129, + 0.033418793, + 0.021920709, + -0.07913975, + 0.033958323, + -0.02553707, + 0.0044211005, + 0.051474363, + 0.028896896, + -0.013811369, + -0.015269997, + -0.0027181397, + -0.074844725, + -0.04378042, + 0.013777917, + 0.0941123, + 0.084751636, + -0.012578452, + -0.014671592, + -0.038143005, + -0.004176015, + 0.007933388, + -0.05929473, + -0.021193247, + 0.008781839, + -0.01596112, + 0.026119918, + -0.025445312, + 0.02648552, + -0.00568644, + 0.010799765, + 0.023444891, + -0.009518018, + -0.050896112, + 0.01034954, + -0.02753636, + -0.03769859, + -0.03366245, + -0.009905339, + -0.045516003, + -0.068003535, + -0.07863914, + 0.005519929, + -0.042954993, + -0.022231326, + -0.021004673, + 0.02902556, + -0.017120933, + 0.021249624, + 0.02768383, + -0.06314554, + 0.053207308, + -0.03886009, + 0.00476874, + -0.022096757, + -0.01341045, + -0.030357309, + 0.0137588475, + 0.031562295, + -0.005539913, + -0.032822832, + 0.034190398, + 0.055425715, + -0.027244035, + 0.006620907, + -0.022488393, + -0.026812593, + -0.027873514, + 0.018166311, + 0.003122373, + 0.0018363056, + -0.027016325, + 0.0046166135, + -0.0369997, + -0.034971904, + -0.018800624, + -0.0014946542, + -0.011367924, + 0.0035812103, + -0.07085738, + 0.033152454, + 0.023359593, + -0.027913084, + -0.0077732382, + -0.048488766, + 0.053926837, + -0.039162364, + 0.044420574, + -0.021989806, + 0.055259187, + -0.016539602, + -0.018407907, + 0.007724413, + -0.020046087, + -0.023352552, + -0.047689717, + 0.04136404, + 0.042082027, + -0.017346364, + 0.029248353, + 0.031323876, + 0.07688728, + -0.013567599, + -0.014497512, + -0.009294345, + -0.039481603, + -0.004710669, + -0.07827626, + 0.026850224, + -0.0140288705, + 0.02613264, + -0.0044927574, + -0.03384218, + -0.00079161214, + -0.056953214, + 0.03628688, + -0.020171795, + -0.012991032, + -0.013236439, + 0.0482173, + -0.0035148757, + -0.011471772, + 0.026540088, + -0.031246386, + 0.054621194, + 0.059837423, + 0.0044686636, + 0.044278976, + -0.007069389, + -0.008574732, + 0.005789034, + 0.026414782, + -0.0075685466, + -0.014385823, + 0.02829211, + 0.017918091, + 0.038316578, + 0.009408247, + -0.013512078, + 0.022944227, + -0.0155690005, + 0.0043662353, + 0.024858288, + 0.035380267, + 0.044127665, + -0.0147769265, + -0.0063019125, + 0.0031974213, + -0.012091373, + 0.02103759, + 0.035669435, + -0.013142072, + 0.022677507, + -0.06280885, + 0.038994793, + -0.047527548, + 0.010609448, + 0.043443497, + -0.09725285, + -0.018532714, + -0.028497247, + 0.030204087, + -0.006363635, + 0.060399804, + -0.0107133705, + 0.008450749, + 0.05759074, + -0.04678292, + 0.01396999, + -0.07399043, + 0.0007504193, + 0.031175617, + 0.0060865046, + 0.03421212, + 0.023408618, + 0.043368008, + -0.05970366, + -0.014861325, + 0.053525794, + 0.04850931, + -0.029100617, + -0.027497835, + 0.044973027, + 0.0405099, + 0.00850536, + 0.047304627, + -0.0038067936, + 0.061405297, + 0.03626454, + 0.018543653, + 0.0150030125, + 0.014765505, + 0.012231581, + -0.029379906, + -0.019150946, + 0.019597163, + -0.007974375, + 0.05469681, + -0.0018450669, + 0.03555379, + 0.022403168, + -0.022159277, + 0.039409384, + -0.00950375, + 0.015302587, + -0.002742015, + 0.049243126, + -0.014761497, + 0.028783482, + -0.021339092, + -0.0126494095, + -0.029378537, + 0.027175143, + 0.020410776, + -0.048842303, + 0.012824888, + 0.07513209, + 0.02679242, + -0.014250363, + -0.03768017, + 0.041978676, + 0.06390848, + 0.027395684, + 0.012390605, + -0.068697326, + -0.026561985, + -0.013103001, + 0.05081568, + 0.056574605, + -0.03550072, + -0.0033409016, + 0.041807074, + 0.026001278, + -0.014371649, + 0.03813918, + -0.019380845, + 0.058272604, + 0.031092493, + 0.0054262243, + 0.036123812, + -0.048604775, + 0.025506865, + -0.00573351, + 0.010888976, + 0.044062544, + -0.0073227165, + -0.06031213, + 0.02233619, + -0.011185928, + -0.020654337, + 0.0056568985, + 0.008660892, + -0.02760251, + 0.012655247, + -0.045171466, + -0.045431744, + 0.039053343, + -0.02334073, + 0.051499687, + -0.037237596, + -0.036204305, + -0.0661045, + 0.022786478, + 0.04503965, + 0.042866375, + 0.049955808, + -0.0158006, + -0.006718668, + 0.016262004, + 0.036782544, + 0.030297246, + -0.026872655, + -0.031357024, + 0.008424332, + 0.040544927, + 0.054497696, + 0.0003742172, + -0.09587798, + -0.016308863, + 0.011799034, + -0.0055135977, + 0.014207488, + -0.016967725, + 0.08251366, + -0.011782458, + -0.0080608055, + -0.016523587, + 0.04005391, + 0.04516666, + -0.049395572, + -0.016308561, + 0.006028617, + -0.040751286, + 0.14053217, + 0.10381706, + -0.07738247, + -0.044793732, + -0.008966316, + -0.02844784, + 0.021164771, + -0.03330297, + -0.012639106, + 0.037983377, + -0.013894287, + 0.029972676, + -0.03384708, + -0.008776539, + 0.033346817, + -0.0061010243, + 0.0051652323, + 0.06805391, + 0.046029896, + 0.029034972, + -0.002959955, + -0.0037809198, + -0.030130504, + -0.008491404, + 0.045628317, + -0.004553677, + -0.06380821, + 0.041239917, + -0.039542254, + -0.028727125, + 0.007622591, + -0.015135407, + 0.007827911, + 0.0017602865, + 0.016166357, + 0.032133713, + 0.0048149712, + -0.030142028, + -0.03905762, + 0.04570094, + 0.021713454, + -0.01015308, + 0.030249437, + 0.04793632, + -0.024754873, + 0.057805218, + 0.0062296274, + 0.064786054, + 0.027312867, + 0.017458709, + -0.020422962, + -0.033931006, + -0.055576656, + -0.0022137442, + 0.02330331, + 0.013868948, + 0.015872952, + 0.027338386, + -0.014782425, + 0.004494493, + -0.01329081, + -0.016142018, + -0.05443725, + -0.06303216, + -0.036463458, + -0.073589996, + 0.00017102716, + 0.027406873, + 0.047198333, + 0.051058855, + -0.005883208, + -0.0058205356, + -0.043531097, + -0.073391624, + 0.060281724, + -0.021565571, + 0.0029200057, + 0.019395538, + -0.017327337, + -0.0653435, + 0.025828788, + 0.00382072, + -0.025127921, + 0.028973421, + 0.046483908, + 0.02353495, + 0.051256366, + 0.027777418, + -0.016367994, + -0.031594142, + -0.014125466, + -0.0515892, + 0.028936012, + -0.016301127, + 0.064760074, + -0.042705704, + -0.03665835, + 0.0058707185, + -0.036659144, + -0.023149284, + -0.04758676, + -0.060163625, + 0.054598432, + -0.00078254647, + -0.112735756, + -0.0008261282, + -0.013952264, + -0.040117852, + -0.0019322386, + 0.008373793, + -0.037860926, + -0.015743056, + -0.0234362, + -0.06493749, + -0.069608204, + 0.029697478, + 0.0013986954, + 0.0041609188, + 0.018288933, + 0.019073283, + -0.041577518, + -0.0357768, + -0.0021765458, + -0.010237743, + -0.028734086, + 0.0041319, + -0.013383362, + 0.00577167, + -0.0053505367, + -0.022350835, + 0.01406836, + 0.034614973, + 0.036873527, + -0.04093488, + -0.03230344, + 0.018228276, + 0.0156018995, + 0.024933772, + 0.02783354, + -0.0080469055, + 0.023191504, + 0.041615404, + -0.04611942, + 0.068785064, + 0.0004912869, + -0.057737023, + -0.017378213, + 0.015246827, + -0.0045711, + 0.024566535, + 0.018834211, + -0.013144151, + -0.039206583, + -0.009895874, + -0.031059353, + -0.016976817, + 0.0449504, + 0.0032223936, + -0.025907526, + -0.056929037, + -0.013011389, + 0.021181583, + 0.0106028635, + -0.012212557, + -0.024159467, + 0.054833174, + -0.018079655, + -0.06036847, + -0.019181063, + -0.0036599508, + -0.04247008, + 0.06736818, + -0.05656677, + 0.00063564116, + -0.030859886, + 0.022682272, + -0.041298434, + 0.046203904, + -0.025341783, + 0.035256788, + -0.03913067, + -0.025138376, + 0.021381568, + 0.020233907, + 0.04396407, + -0.05447175, + 0.056231752, + -0.08152801, + -0.046155322, + -0.107502006, + -0.008449785, + -0.051441476, + 0.02187801, + 0.07710222, + 0.058793396, + 0.037536267, + 0.022781303, + -0.021965852, + -0.025323188, + 0.01036808, + 0.043830823, + -0.02973099, + 0.03564364, + 0.010773202, + -0.052458562, + 0.054098483, + 0.08024228, + 0.06560271, + 0.0001508493, + -0.020404926, + -0.0033358065, + 0.059732165, + -0.00095160346, + -0.04169797, + -0.08884556, + -0.021227196, + 0.02134743, + -0.043752395, + -8.042651e-05, + -0.0033908791, + 0.04362836, + -0.019251144, + -0.0071159727, + -0.01190997, + -0.05915786, + 0.03255786, + 0.012339297, + 0.036949337, + 0.015805522, + 0.014613892, + 0.04628766, + 0.043885946, + 0.07332898, + -0.020451782, + -0.016520225, + -0.0020803884, + -0.01159851, + 0.0426532, + 0.008053762, + 0.040212996, + -0.07245195, + 0.020705638, + -0.02203555, + -0.024147796, + -0.005401511, + -0.0035201178, + 0.014357559, + -0.011565124, + -0.06113777, + 0.00073033513, + 0.004304726, + 0.03700348, + -0.02675051, + 0.0020004935, + 0.03970252, + 0.04645308, + 0.031940658, + 0.011803997, + 0.047087885, + -0.020772861, + -0.02010736, + -0.008094346, + -0.017589118, + -0.05531338, + -0.037902128, + 0.026629327, + 0.014163693, + -0.028866766, + 0.08358291, + -0.011674367, + 0.030306904, + -0.016541358, + -0.00535445, + 0.010175458, + -0.009855767, + 0.051110856, + 0.0030403563, + -0.04535673, + -0.007742969, + -0.008183598, + -0.0282291, + -0.028479243, + -0.018404141, + 0.06131364, + -0.036709666, + -0.016097328, + -0.031855233, + -0.029608333, + 0.0516191, + -0.016996393, + -0.0043252064, + -0.018871896, + -0.011307787, + -0.010877992, + 0.030488119, + 0.010948365, + 0.029610623, + -0.032166634, + -0.032359682, + -0.020506512, + 0.0050876667, + -0.009433013, + 0.019670308, + -0.011595458, + 0.012013566, + 0.03396051, + -0.037603952, + -0.0032240797, + 0.03181483, + -0.02194272, + -0.02439024, + -0.015391741, + -0.0139405355, + 0.08458335, + -0.03672542, + 0.010359679, + -0.02451109, + 0.03226403, + 0.01353021, + -0.029357241, + -0.07104932, + 0.0121810455, + -0.010132696 + ], + "index": 0, + "object": "embedding" + } + ], + "model": "nomic-embed-text:137m-v1.5-fp16", + "object": "list", + "usage": { + "prompt_tokens": 6, + "total_tokens": 6 + } + } + }, + "is_streaming": false + } +} diff --git a/tests/integration/recordings/responses/545d86510a80.json b/tests/integration/recordings/responses/545d86510a80.json index 7cd718d56..e9d88a52a 100644 --- a/tests/integration/recordings/responses/545d86510a80.json +++ b/tests/integration/recordings/responses/545d86510a80.json @@ -22,7 +22,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:32.625862Z", + "created_at": "2025-10-01T01:38:20.882299989Z", "done": false, "done_reason": null, "total_duration": null, @@ -40,7 +40,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:32.668885Z", + "created_at": "2025-10-01T01:38:21.078187004Z", "done": false, "done_reason": null, "total_duration": null, @@ -58,7 +58,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:32.710947Z", + "created_at": "2025-10-01T01:38:21.272715034Z", "done": false, "done_reason": null, "total_duration": null, @@ -76,7 +76,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:32.752286Z", + "created_at": "2025-10-01T01:38:21.469070891Z", "done": false, "done_reason": null, "total_duration": null, @@ -94,7 +94,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:32.793309Z", + "created_at": "2025-10-01T01:38:21.673266264Z", "done": false, "done_reason": null, "total_duration": null, @@ -112,7 +112,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:32.834578Z", + "created_at": "2025-10-01T01:38:21.873306711Z", "done": false, "done_reason": null, "total_duration": null, @@ -130,7 +130,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:32.876536Z", + "created_at": "2025-10-01T01:38:22.070968284Z", "done": false, "done_reason": null, "total_duration": null, @@ -148,7 +148,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:32.918807Z", + "created_at": "2025-10-01T01:38:22.269036335Z", "done": false, "done_reason": null, "total_duration": null, @@ -166,7 +166,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:32.960101Z", + "created_at": "2025-10-01T01:38:22.465488517Z", "done": false, "done_reason": null, "total_duration": null, @@ -184,7 +184,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:33.00196Z", + "created_at": "2025-10-01T01:38:22.658421677Z", "done": false, "done_reason": null, "total_duration": null, @@ -202,7 +202,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:33.043876Z", + "created_at": "2025-10-01T01:38:22.852187817Z", "done": false, "done_reason": null, "total_duration": null, @@ -220,7 +220,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:33.08756Z", + "created_at": "2025-10-01T01:38:23.049518191Z", "done": false, "done_reason": null, "total_duration": null, @@ -238,15 +238,15 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:33.12966Z", + "created_at": "2025-10-01T01:38:23.248955312Z", "done": true, "done_reason": "stop", - "total_duration": 648814958, - "load_duration": 75300875, + "total_duration": 4434138141, + "load_duration": 43018186, "prompt_eval_count": 408, - "prompt_eval_duration": 66740291, + "prompt_eval_duration": 2022594115, "eval_count": 13, - "eval_duration": 505313125, + "eval_duration": 2367937192, "response": "", "thinking": null, "context": null diff --git a/tests/integration/recordings/responses/554de3cd986f.json b/tests/integration/recordings/responses/554de3cd986f.json index 7a359c50e..0bcb5dd00 100644 --- a/tests/integration/recordings/responses/554de3cd986f.json +++ b/tests/integration/recordings/responses/554de3cd986f.json @@ -22,7 +22,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:51.805591Z", + "created_at": "2025-10-01T01:34:19.167396532Z", "done": false, "done_reason": null, "total_duration": null, @@ -40,7 +40,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:51.850067Z", + "created_at": "2025-10-01T01:34:19.362195218Z", "done": false, "done_reason": null, "total_duration": null, @@ -58,7 +58,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:51.892443Z", + "created_at": "2025-10-01T01:34:19.556896355Z", "done": false, "done_reason": null, "total_duration": null, @@ -76,7 +76,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:51.934364Z", + "created_at": "2025-10-01T01:34:19.752258848Z", "done": false, "done_reason": null, "total_duration": null, @@ -94,7 +94,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:51.978382Z", + "created_at": "2025-10-01T01:34:19.949688527Z", "done": false, "done_reason": null, "total_duration": null, @@ -112,7 +112,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:52.019332Z", + "created_at": "2025-10-01T01:34:20.145337065Z", "done": false, "done_reason": null, "total_duration": null, @@ -130,7 +130,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:52.060708Z", + "created_at": "2025-10-01T01:34:20.340739605Z", "done": false, "done_reason": null, "total_duration": null, @@ -148,7 +148,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:52.102717Z", + "created_at": "2025-10-01T01:34:20.539146761Z", "done": false, "done_reason": null, "total_duration": null, @@ -166,7 +166,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:52.143996Z", + "created_at": "2025-10-01T01:34:20.73590849Z", "done": false, "done_reason": null, "total_duration": null, @@ -184,7 +184,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:52.185479Z", + "created_at": "2025-10-01T01:34:20.930252877Z", "done": false, "done_reason": null, "total_duration": null, @@ -202,7 +202,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:52.227562Z", + "created_at": "2025-10-01T01:34:21.124432932Z", "done": false, "done_reason": null, "total_duration": null, @@ -220,7 +220,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:52.270178Z", + "created_at": "2025-10-01T01:34:21.332871735Z", "done": false, "done_reason": null, "total_duration": null, @@ -238,7 +238,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:52.31151Z", + "created_at": "2025-10-01T01:34:21.52851911Z", "done": false, "done_reason": null, "total_duration": null, @@ -256,7 +256,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:52.35278Z", + "created_at": "2025-10-01T01:34:21.724649778Z", "done": false, "done_reason": null, "total_duration": null, @@ -274,7 +274,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:52.393954Z", + "created_at": "2025-10-01T01:34:21.922353561Z", "done": false, "done_reason": null, "total_duration": null, @@ -292,7 +292,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:52.435238Z", + "created_at": "2025-10-01T01:34:22.117061137Z", "done": false, "done_reason": null, "total_duration": null, @@ -310,7 +310,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:52.476197Z", + "created_at": "2025-10-01T01:34:22.31230442Z", "done": false, "done_reason": null, "total_duration": null, @@ -328,7 +328,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:52.517914Z", + "created_at": "2025-10-01T01:34:22.506582272Z", "done": false, "done_reason": null, "total_duration": null, @@ -346,15 +346,15 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:52.55904Z", + "created_at": "2025-10-01T01:34:22.702819703Z", "done": true, "done_reason": "stop", - "total_duration": 971882292, - "load_duration": 116634209, + "total_duration": 6447413112, + "load_duration": 45664730, "prompt_eval_count": 376, - "prompt_eval_duration": 99382958, + "prompt_eval_duration": 2864046437, "eval_count": 19, - "eval_duration": 755260750, + "eval_duration": 3537012183, "response": "", "thinking": null, "context": null diff --git a/tests/integration/recordings/responses/561746e1c8de.json b/tests/integration/recordings/responses/561746e1c8de.json deleted file mode 100644 index 1bb8a3345..000000000 --- a/tests/integration/recordings/responses/561746e1c8de.json +++ /dev/null @@ -1,221 +0,0 @@ -{ - "request": { - "method": "POST", - "url": "http://localhost:11434/api/generate", - "headers": {}, - "body": { - "model": "llama3.2:3b-instruct-fp16", - "raw": true, - "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant. You have access to functions, but you should only use them if they are required.\nYou are an expert in composing functions. You are given a question and a set of possible functions.\nBased on the question, you may or may not need to make one function/tool call to achieve the purpose.\n\nIf you decide to invoke any of the function(s), you MUST put it in the format of [func_name1(params_name1=params_value1, params_name2=params_value2...), func_name2(params)]\nIf you decide to invoke a function, you SHOULD NOT include any other text in the response. besides the function call in the above format.\nFor a boolean parameter, be sure to use `True` or `False` (capitalized) for the value.\n\n\nHere is a list of functions in JSON format that you can invoke.\n\n[\n {\n \"name\": \"get_weather\",\n \"description\": \"Get the current weather\",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"location\"],\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state (both required), e.g. San Francisco, CA.\"\n }\n }\n }\n }\n]\n\nYou can answer general questions or invoke tools when necessary.\nIn addition to tool calls, you should also augment your responses by using the tool outputs.\nPretend you are a weather assistant.\nYou MUST use one of the provided functions/tools to answer the user query.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhat's the weather like in San Francisco, CA?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", - "options": { - "temperature": 0.0 - }, - "stream": true - }, - "endpoint": "/api/generate", - "model": "llama3.2:3b-instruct-fp16" - }, - "response": { - "body": [ - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:20.465701Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "[", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:20.507671Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "get", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:20.549443Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "_weather", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:20.590803Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "(location", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:20.631683Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "=\"", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:20.672443Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "San", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:20.713329Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " Francisco", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:20.754254Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ",", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:20.795119Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " CA", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:20.836145Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "\")]", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:20.877784Z", - "done": true, - "done_reason": "stop", - "total_duration": 612057417, - "load_duration": 97443583, - "prompt_eval_count": 341, - "prompt_eval_duration": 100914750, - "eval_count": 11, - "eval_duration": 413024250, - "response": "", - "thinking": null, - "context": null - } - } - ], - "is_streaming": true - } -} diff --git a/tests/integration/recordings/responses/563b994bb7d1.json b/tests/integration/recordings/responses/563b994bb7d1.json deleted file mode 100644 index 62e38dc5c..000000000 --- a/tests/integration/recordings/responses/563b994bb7d1.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "request": { - "method": "POST", - "url": "http://localhost:11434/api/generate", - "headers": {}, - "body": { - "model": "llama3.2:3b-instruct-fp16", - "raw": true, - "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant. You have access to functions, but you should only use them if they are required.\nYou are an expert in composing functions. You are given a question and a set of possible functions.\nBased on the question, you may or may not need to make one function/tool call to achieve the purpose.\n\nIf you decide to invoke any of the function(s), you MUST put it in the format of [func_name1(params_name1=params_value1, params_name2=params_value2...), func_name2(params)]\nIf you decide to invoke a function, you SHOULD NOT include any other text in the response. besides the function call in the above format.\nFor a boolean parameter, be sure to use `True` or `False` (capitalized) for the value.\n\n\nHere is a list of functions in JSON format that you can invoke.\n\n[\n {\n \"name\": \"get_weather\",\n \"description\": \"Get the current weather\",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"location\"],\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state (both required), e.g. San Francisco, CA.\"\n }\n }\n }\n }\n]\n\nYou can answer general questions or invoke tools when necessary.\nIn addition to tool calls, you should also augment your responses by using the tool outputs.\nPretend you are a weather assistant.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhat's the weather like in San Francisco, CA?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", - "options": { - "temperature": 0.0 - }, - "stream": false - }, - "endpoint": "/api/generate", - "model": "llama3.2:3b-instruct-fp16" - }, - "response": { - "body": { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:19.594923Z", - "done": true, - "done_reason": "stop", - "total_duration": 988472417, - "load_duration": 117976625, - "prompt_eval_count": 326, - "prompt_eval_duration": 451625542, - "eval_count": 11, - "eval_duration": 418313417, - "response": "[get_weather(location=\"San Francisco, CA\")]", - "thinking": null, - "context": null - } - }, - "is_streaming": false - } -} diff --git a/tests/integration/recordings/responses/5c8d7ada4919.json b/tests/integration/recordings/responses/5c8d7ada4919.json new file mode 100644 index 000000000..775663c6c --- /dev/null +++ b/tests/integration/recordings/responses/5c8d7ada4919.json @@ -0,0 +1,101 @@ +{ + "request": { + "method": "POST", + "url": "http://0.0.0.0:11434/v1/v1/chat/completions", + "headers": {}, + "body": { + "model": "llama3.2:3b-instruct-fp16", + "messages": [ + { + "role": "user", + "content": "what's the current time? You MUST call the `get_current_time` function to find out." + } + ], + "response_format": { + "type": "text" + }, + "stream": true, + "tools": [ + { + "type": "function", + "function": { + "type": "function", + "name": "get_current_time", + "description": "Get the current time", + "parameters": {}, + "strict": null + } + } + ] + }, + "endpoint": "/v1/chat/completions", + "model": "llama3.2:3b-instruct-fp16" + }, + "response": { + "body": [ + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-188", + "choices": [ + { + "delta": { + "content": "", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": [ + { + "index": 0, + "id": "call_bij0w4gk", + "function": { + "arguments": "{}", + "name": "get_current_time" + }, + "type": "function" + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759253831, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-188", + "choices": [ + { + "delta": { + "content": "", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": "tool_calls", + "index": 0, + "logprobs": null + } + ], + "created": 1759253831, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + } + ], + "is_streaming": true + } +} diff --git a/tests/integration/recordings/responses/5f5d16afadb4.json b/tests/integration/recordings/responses/5f5d16afadb4.json deleted file mode 100644 index f93d688c4..000000000 --- a/tests/integration/recordings/responses/5f5d16afadb4.json +++ /dev/null @@ -1,221 +0,0 @@ -{ - "request": { - "method": "POST", - "url": "http://localhost:11434/api/generate", - "headers": {}, - "body": { - "model": "llama3.2:3b-instruct-fp16", - "raw": true, - "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant. You have access to functions, but you should only use them if they are required.\nYou are an expert in composing functions. You are given a question and a set of possible functions.\nBased on the question, you may or may not need to make one function/tool call to achieve the purpose.\n\nIf you decide to invoke any of the function(s), you MUST put it in the format of [func_name1(params_name1=params_value1, params_name2=params_value2...), func_name2(params)]\nIf you decide to invoke a function, you SHOULD NOT include any other text in the response. besides the function call in the above format.\nFor a boolean parameter, be sure to use `True` or `False` (capitalized) for the value.\n\n\nHere is a list of functions in JSON format that you can invoke.\n\n[\n {\n \"name\": \"get_weather\",\n \"description\": \"Get the current weather\",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"location\"],\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state (both required), e.g. San Francisco, CA.\"\n }\n }\n }\n }\n]\n\nYou can answer general questions or invoke tools when necessary.\nIn addition to tool calls, you should also augment your responses by using the tool outputs.\nPretend you are a weather assistant.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhat's the weather like in San Francisco, CA?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", - "options": { - "temperature": 0.0 - }, - "stream": true - }, - "endpoint": "/api/generate", - "model": "llama3.2:3b-instruct-fp16" - }, - "response": { - "body": [ - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:19.808372Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "[", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:19.84991Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "get", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:19.892111Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "_weather", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:19.933857Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "(location", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:19.975148Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "=\"", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:20.016641Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "San", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:20.058229Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " Francisco", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:20.100222Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ",", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:20.143456Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " CA", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:20.184657Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "\")]", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:20.226017Z", - "done": true, - "done_reason": "stop", - "total_duration": 598395375, - "load_duration": 129432167, - "prompt_eval_count": 326, - "prompt_eval_duration": 50057334, - "eval_count": 11, - "eval_duration": 418284791, - "response": "", - "thinking": null, - "context": null - } - } - ], - "is_streaming": true - } -} diff --git a/tests/integration/recordings/responses/6906a6e71988.json b/tests/integration/recordings/responses/6906a6e71988.json index 6574cab53..3e561b183 100644 --- a/tests/integration/recordings/responses/6906a6e71988.json +++ b/tests/integration/recordings/responses/6906a6e71988.json @@ -20,15 +20,15 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama-guard3:1b", - "created_at": "2025-09-03T17:38:00.98692Z", + "created_at": "2025-09-30T17:39:20.866577556Z", "done": true, "done_reason": "stop", - "total_duration": 332473583, - "load_duration": 90611333, + "total_duration": 4350589762, + "load_duration": 53782244, "prompt_eval_count": 317, - "prompt_eval_duration": 229691000, + "prompt_eval_duration": 4243686737, "eval_count": 2, - "eval_duration": 11571291, + "eval_duration": 52523173, "response": "safe", "thinking": null, "context": null diff --git a/tests/integration/recordings/responses/6b3e593ad9b8.json b/tests/integration/recordings/responses/6b3e593ad9b8.json index 0165009cb..e5a85eb3d 100644 --- a/tests/integration/recordings/responses/6b3e593ad9b8.json +++ b/tests/integration/recordings/responses/6b3e593ad9b8.json @@ -21,7 +21,7 @@ "body": { "__type__": "openai.types.chat.chat_completion.ChatCompletion", "__data__": { - "id": "chatcmpl-738", + "id": "chatcmpl-819", "choices": [ { "finish_reason": "stop", @@ -38,7 +38,7 @@ } } ], - "created": 1759245079, + "created": 1759282466, "model": "llama-guard3:1b", "object": "chat.completion", "service_tier": null, diff --git a/tests/integration/recordings/responses/6cc063bbd7d3.json b/tests/integration/recordings/responses/6cc063bbd7d3.json deleted file mode 100644 index ab6e12602..000000000 --- a/tests/integration/recordings/responses/6cc063bbd7d3.json +++ /dev/null @@ -1,383 +0,0 @@ -{ - "request": { - "method": "POST", - "url": "http://localhost:11434/api/generate", - "headers": {}, - "body": { - "model": "llama3.2:3b-instruct-fp16", - "raw": true, - "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhat is the name of the US captial?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", - "options": { - "temperature": 0.0 - }, - "stream": true - }, - "endpoint": "/api/generate", - "model": "llama3.2:3b-instruct-fp16" - }, - "response": { - "body": [ - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:17.402486Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "The", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:17.444334Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " capital", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:17.484625Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " of", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:17.525063Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " the", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:17.565015Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " United", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:17.60499Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " States", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:17.64509Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " is", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:17.685566Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " Washington", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:17.725855Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ",", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:17.766056Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " D", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:17.806415Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ".C", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:17.847273Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ".", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:17.888576Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " (", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:17.928952Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "short", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:17.969744Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " for", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:18.010869Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " District", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:18.051109Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " of", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:18.093266Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " Columbia", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:18.135749Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ").", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:18.176649Z", - "done": true, - "done_reason": "stop", - "total_duration": 907420000, - "load_duration": 66756750, - "prompt_eval_count": 26, - "prompt_eval_duration": 62900875, - "eval_count": 20, - "eval_duration": 777306958, - "response": "", - "thinking": null, - "context": null - } - } - ], - "is_streaming": true - } -} diff --git a/tests/integration/recordings/responses/6d35c91287e2.json b/tests/integration/recordings/responses/6d35c91287e2.json index a7af894e8..6d38dd48b 100644 --- a/tests/integration/recordings/responses/6d35c91287e2.json +++ b/tests/integration/recordings/responses/6d35c91287e2.json @@ -22,7 +22,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:03.549266Z", + "created_at": "2025-10-01T01:36:25.060343636Z", "done": false, "done_reason": null, "total_duration": null, @@ -40,7 +40,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:03.592203Z", + "created_at": "2025-10-01T01:36:25.261200569Z", "done": false, "done_reason": null, "total_duration": null, @@ -58,7 +58,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:03.63417Z", + "created_at": "2025-10-01T01:36:25.462791752Z", "done": false, "done_reason": null, "total_duration": null, @@ -76,7 +76,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:03.677268Z", + "created_at": "2025-10-01T01:36:25.660954264Z", "done": false, "done_reason": null, "total_duration": null, @@ -94,7 +94,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:03.719768Z", + "created_at": "2025-10-01T01:36:25.857710285Z", "done": false, "done_reason": null, "total_duration": null, @@ -112,7 +112,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:03.762204Z", + "created_at": "2025-10-01T01:36:26.055796043Z", "done": false, "done_reason": null, "total_duration": null, @@ -130,7 +130,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:03.80404Z", + "created_at": "2025-10-01T01:36:26.256947843Z", "done": false, "done_reason": null, "total_duration": null, @@ -148,7 +148,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:03.845678Z", + "created_at": "2025-10-01T01:36:26.454224889Z", "done": false, "done_reason": null, "total_duration": null, @@ -166,7 +166,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:03.887086Z", + "created_at": "2025-10-01T01:36:26.663146208Z", "done": false, "done_reason": null, "total_duration": null, @@ -184,7 +184,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:03.928422Z", + "created_at": "2025-10-01T01:36:26.878266227Z", "done": false, "done_reason": null, "total_duration": null, @@ -202,7 +202,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:03.969641Z", + "created_at": "2025-10-01T01:36:27.086618766Z", "done": false, "done_reason": null, "total_duration": null, @@ -220,7 +220,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:04.011212Z", + "created_at": "2025-10-01T01:36:27.28577576Z", "done": false, "done_reason": null, "total_duration": null, @@ -238,15 +238,15 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:04.052626Z", + "created_at": "2025-10-01T01:36:27.484586207Z", "done": true, "done_reason": "stop", - "total_duration": 731936583, - "load_duration": 147334791, + "total_duration": 4491434092, + "load_duration": 44110434, "prompt_eval_count": 417, - "prompt_eval_duration": 79443792, + "prompt_eval_duration": 2021505668, "eval_count": 13, - "eval_duration": 504352750, + "eval_duration": 2425224707, "response": "", "thinking": null, "context": null diff --git a/tests/integration/recordings/responses/6fbea1abca7c.json b/tests/integration/recordings/responses/6fbea1abca7c.json index c16fe1268..5b18a66f1 100644 --- a/tests/integration/recordings/responses/6fbea1abca7c.json +++ b/tests/integration/recordings/responses/6fbea1abca7c.json @@ -22,7 +22,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:01.89965Z", + "created_at": "2025-10-01T01:36:11.873171882Z", "done": false, "done_reason": null, "total_duration": null, @@ -40,7 +40,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:01.941253Z", + "created_at": "2025-10-01T01:36:12.073738984Z", "done": false, "done_reason": null, "total_duration": null, @@ -58,7 +58,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:01.982621Z", + "created_at": "2025-10-01T01:36:12.272476639Z", "done": false, "done_reason": null, "total_duration": null, @@ -76,7 +76,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:02.024144Z", + "created_at": "2025-10-01T01:36:12.469220325Z", "done": false, "done_reason": null, "total_duration": null, @@ -94,7 +94,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:02.065495Z", + "created_at": "2025-10-01T01:36:12.665965955Z", "done": false, "done_reason": null, "total_duration": null, @@ -112,7 +112,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:02.107529Z", + "created_at": "2025-10-01T01:36:12.860442987Z", "done": false, "done_reason": null, "total_duration": null, @@ -130,7 +130,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:02.149217Z", + "created_at": "2025-10-01T01:36:13.055440385Z", "done": false, "done_reason": null, "total_duration": null, @@ -148,7 +148,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:02.190357Z", + "created_at": "2025-10-01T01:36:13.25612888Z", "done": false, "done_reason": null, "total_duration": null, @@ -166,7 +166,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:02.231501Z", + "created_at": "2025-10-01T01:36:13.454322876Z", "done": false, "done_reason": null, "total_duration": null, @@ -184,7 +184,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:02.272546Z", + "created_at": "2025-10-01T01:36:13.651445403Z", "done": false, "done_reason": null, "total_duration": null, @@ -202,7 +202,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:02.313561Z", + "created_at": "2025-10-01T01:36:13.851107226Z", "done": false, "done_reason": null, "total_duration": null, @@ -220,7 +220,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:02.354563Z", + "created_at": "2025-10-01T01:36:14.048095911Z", "done": false, "done_reason": null, "total_duration": null, @@ -238,7 +238,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:02.395585Z", + "created_at": "2025-10-01T01:36:14.250994986Z", "done": false, "done_reason": null, "total_duration": null, @@ -256,7 +256,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:02.436854Z", + "created_at": "2025-10-01T01:36:14.454971706Z", "done": false, "done_reason": null, "total_duration": null, @@ -274,7 +274,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:02.47814Z", + "created_at": "2025-10-01T01:36:14.654349738Z", "done": false, "done_reason": null, "total_duration": null, @@ -292,7 +292,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:02.519661Z", + "created_at": "2025-10-01T01:36:14.851507509Z", "done": false, "done_reason": null, "total_duration": null, @@ -310,7 +310,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:02.561119Z", + "created_at": "2025-10-01T01:36:15.044987002Z", "done": false, "done_reason": null, "total_duration": null, @@ -328,7 +328,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:02.602821Z", + "created_at": "2025-10-01T01:36:15.246563515Z", "done": false, "done_reason": null, "total_duration": null, @@ -346,15 +346,15 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:02.644633Z", + "created_at": "2025-10-01T01:36:15.447689838Z", "done": true, "done_reason": "stop", - "total_duration": 1375629459, - "load_duration": 94090250, + "total_duration": 35945660492, + "load_duration": 42881569, "prompt_eval_count": 386, - "prompt_eval_duration": 535119167, + "prompt_eval_duration": 32326727198, "eval_count": 19, - "eval_duration": 745684041, + "eval_duration": 3575452190, "response": "", "thinking": null, "context": null diff --git a/tests/integration/recordings/responses/6fe1d4fedf12.json b/tests/integration/recordings/responses/6fe1d4fedf12.json index 8fd079a85..d8dc4e458 100644 --- a/tests/integration/recordings/responses/6fe1d4fedf12.json +++ b/tests/integration/recordings/responses/6fe1d4fedf12.json @@ -24,7 +24,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-358", + "id": "chatcmpl-332", "choices": [ { "delta": { @@ -39,7 +39,7 @@ "logprobs": null } ], - "created": 1756921324, + "created": 1759254026, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -50,11 +50,11 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-358", + "id": "chatcmpl-332", "choices": [ { "delta": { - "content": "'m", + "content": "'d", "function_call": null, "refusal": null, "role": "assistant", @@ -65,7 +65,7 @@ "logprobs": null } ], - "created": 1756921324, + "created": 1759254026, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -76,11 +76,11 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-358", + "id": "chatcmpl-332", "choices": [ { "delta": { - "content": " not", + "content": " be", "function_call": null, "refusal": null, "role": "assistant", @@ -91,7 +91,7 @@ "logprobs": null } ], - "created": 1756921324, + "created": 1759254027, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -102,11 +102,11 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-358", + "id": "chatcmpl-332", "choices": [ { "delta": { - "content": " able", + "content": " happy", "function_call": null, "refusal": null, "role": "assistant", @@ -117,7 +117,7 @@ "logprobs": null } ], - "created": 1756921324, + "created": 1759254027, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -128,7 +128,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-358", + "id": "chatcmpl-332", "choices": [ { "delta": { @@ -143,7 +143,7 @@ "logprobs": null } ], - "created": 1756921324, + "created": 1759254027, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -154,11 +154,11 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-358", + "id": "chatcmpl-332", "choices": [ { "delta": { - "content": " provide", + "content": " help", "function_call": null, "refusal": null, "role": "assistant", @@ -169,7 +169,7 @@ "logprobs": null } ], - "created": 1756921324, + "created": 1759254027, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -180,319 +180,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " real", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921324, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "-time", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921324, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " or", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921324, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " current", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921324, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " weather", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921324, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " information", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921325, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": ".", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921325, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " However", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921325, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": ",", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921325, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " I", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921325, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " can", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921325, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " tell", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921325, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", + "id": "chatcmpl-332", "choices": [ { "delta": { @@ -507,7 +195,7 @@ "logprobs": null } ], - "created": 1756921325, + "created": 1759254027, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -518,215 +206,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " that", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921325, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " Tokyo", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921325, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " has", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921325, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " a", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921325, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " humid", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921325, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " subt", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921325, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "ropical", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921325, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " climate", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921325, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", + "id": "chatcmpl-332", "choices": [ { "delta": { @@ -741,7 +221,7 @@ "logprobs": null } ], - "created": 1756921325, + "created": 1759254028, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -752,4349 +232,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " hot", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921325, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " and", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921325, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " humid", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921325, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " summers", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921325, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": ".", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921325, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " Here", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921325, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "'s", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921325, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " an", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921326, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " overview", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921326, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " of", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921326, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " typical", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921326, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " seasonal", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921326, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " weather", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921326, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " patterns", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921326, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": ":\n\n", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921326, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "1", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921326, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": ".", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921326, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " **", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921326, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "Spring", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921326, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " (", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921326, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "March", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921326, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " to", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921326, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " May", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921326, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": ")**", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921326, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": ":", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921326, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " Mild", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921326, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " temperatures", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921326, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": ",", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921326, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " ranging", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921326, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " from", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921326, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " ", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921326, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "15", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921327, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "\u00b0C", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921327, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " (", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921327, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "59", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921327, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "\u00b0F", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921327, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": ")", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921327, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " to", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921327, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " ", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921327, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "20", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921327, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "\u00b0C", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921327, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " (", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921327, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "68", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921327, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "\u00b0F", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921327, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "),", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921327, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " with", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921327, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " gentle", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921327, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " humidity", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921327, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": ".\n\n", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921327, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "2", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921327, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": ".", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921327, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " **", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921327, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "Summer", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921327, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " (", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921327, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "June", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921327, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " to", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921327, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " August", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921328, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": ")**", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921328, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": ":", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921328, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " Hot", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921328, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " and", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921328, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " humid", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921328, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": ",", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921328, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " with", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921328, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " temperatures", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921328, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " generally", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921328, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " between", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921328, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " ", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921328, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "25", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921328, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "\u00b0C", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921328, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " (", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921328, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "77", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921328, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "\u00b0F", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921328, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": ")", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921328, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " and", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921328, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " ", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921328, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "35", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921328, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "\u00b0C", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921328, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " (", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921328, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "95", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921328, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "\u00b0F", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921329, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": ").", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921329, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " Heat", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921329, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "waves", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921329, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " are", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921329, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " common", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921329, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " during", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921329, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " this", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921329, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " period", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921329, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": ".\n\n", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921329, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "3", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921329, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": ".", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921329, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " **", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921329, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "Aut", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921329, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "umn", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921329, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " (", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921329, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "September", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921329, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " to", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921329, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " November", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921329, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": ")**", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921329, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": ":", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921329, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " Comfort", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921329, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "able", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921329, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " temperatures", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921329, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " of", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921329, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " about", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921330, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " ", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921330, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "15", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921330, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "\u00b0C", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921330, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " (", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921330, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "59", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921330, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "\u00b0F", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921330, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": ")", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921330, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " to", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921330, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " ", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921330, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "20", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921330, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "\u00b0C", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921330, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " (", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921330, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "68", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921330, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "\u00b0F", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921330, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "),", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921330, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " making", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921330, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " it", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921330, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " a", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921330, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " lovely", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921330, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " season", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921330, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " for", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921330, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " sight", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921330, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "seeing", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921330, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": ".\n\n", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921331, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "4", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921331, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": ".", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921331, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " **", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921331, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "Winter", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921331, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " (", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921331, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "December", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921331, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " to", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921331, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " February", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921331, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": ")**", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921331, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": ":", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921331, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " Cool", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921331, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " and", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921331, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " relatively", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921331, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " dry", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921331, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": ",", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921331, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " with", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921331, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " average", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921331, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " temperatures", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921331, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " ranging", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921331, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " from", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921331, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " -", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921331, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "2", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921331, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "\u00b0C", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921331, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " (", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921332, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "28", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921332, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "\u00b0F", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921332, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": ")", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921332, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " to", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921332, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " ", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921332, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "10", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921332, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "\u00b0C", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921332, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " (", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921332, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "50", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921332, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "\u00b0F", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921332, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": ").\n\n", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921332, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "To", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921332, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " get", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921332, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", + "id": "chatcmpl-332", "choices": [ { "delta": { @@ -5109,7 +247,7 @@ "logprobs": null } ], - "created": 1756921332, + "created": 1759254028, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -5120,11 +258,11 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-358", + "id": "chatcmpl-332", "choices": [ { "delta": { - "content": " current", + "content": " latest", "function_call": null, "refusal": null, "role": "assistant", @@ -5135,7 +273,7 @@ "logprobs": null } ], - "created": 1756921332, + "created": 1759254028, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -5146,11 +284,11 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-358", + "id": "chatcmpl-332", "choices": [ { "delta": { - "content": " weather", + "content": " information", "function_call": null, "refusal": null, "role": "assistant", @@ -5161,7 +299,7 @@ "logprobs": null } ], - "created": 1756921332, + "created": 1759254028, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -5172,11 +310,11 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-358", + "id": "chatcmpl-332", "choices": [ { "delta": { - "content": " in", + "content": " on", "function_call": null, "refusal": null, "role": "assistant", @@ -5187,7 +325,7 @@ "logprobs": null } ], - "created": 1756921332, + "created": 1759254028, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -5198,7 +336,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-358", + "id": "chatcmpl-332", "choices": [ { "delta": { @@ -5213,7 +351,7 @@ "logprobs": null } ], - "created": 1756921332, + "created": 1759254029, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -5224,7 +362,111 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-358", + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": "'s", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254029, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " weather", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254029, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": "!", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254029, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " However", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254029, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", "choices": [ { "delta": { @@ -5239,7 +481,7 @@ "logprobs": null } ], - "created": 1756921332, + "created": 1759254030, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -5250,7 +492,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-358", + "id": "chatcmpl-332", "choices": [ { "delta": { @@ -5265,7 +507,7 @@ "logprobs": null } ], - "created": 1756921332, + "created": 1759254030, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -5276,11 +518,11 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-358", + "id": "chatcmpl-332", "choices": [ { "delta": { - "content": " recommend", + "content": "'m", "function_call": null, "refusal": null, "role": "assistant", @@ -5291,7 +533,7 @@ "logprobs": null } ], - "created": 1756921332, + "created": 1759254030, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -5302,11 +544,11 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-358", + "id": "chatcmpl-332", "choices": [ { "delta": { - "content": " checking", + "content": " a", "function_call": null, "refusal": null, "role": "assistant", @@ -5317,7 +559,7 @@ "logprobs": null } ], - "created": 1756921332, + "created": 1759254030, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -5328,11 +570,11 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-358", + "id": "chatcmpl-332", "choices": [ { "delta": { - "content": " online", + "content": " large", "function_call": null, "refusal": null, "role": "assistant", @@ -5343,7 +585,7 @@ "logprobs": null } ], - "created": 1756921332, + "created": 1759254030, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -5354,11 +596,11 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-358", + "id": "chatcmpl-332", "choices": [ { "delta": { - "content": " resources", + "content": " language", "function_call": null, "refusal": null, "role": "assistant", @@ -5369,7 +611,7 @@ "logprobs": null } ], - "created": 1756921332, + "created": 1759254031, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -5380,11 +622,11 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-358", + "id": "chatcmpl-332", "choices": [ { "delta": { - "content": " such", + "content": " model", "function_call": null, "refusal": null, "role": "assistant", @@ -5395,7 +637,7 @@ "logprobs": null } ], - "created": 1756921333, + "created": 1759254031, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -5406,111 +648,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " as", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921333, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": " Acc", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921333, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "u", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921333, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", - "choices": [ - { - "delta": { - "content": "Weather", - "function_call": null, - "refusal": null, - "role": "assistant", - "tool_calls": null - }, - "finish_reason": null, - "index": 0, - "logprobs": null - } - ], - "created": 1756921333, - "model": "llama3.2:3b-instruct-fp16", - "object": "chat.completion.chunk", - "service_tier": null, - "system_fingerprint": "fp_ollama", - "usage": null - } - }, - { - "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", - "__data__": { - "id": "chatcmpl-358", + "id": "chatcmpl-332", "choices": [ { "delta": { @@ -5525,7 +663,7 @@ "logprobs": null } ], - "created": 1756921333, + "created": 1759254031, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -5536,11 +674,11 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-358", + "id": "chatcmpl-332", "choices": [ { "delta": { - "content": " Weather", + "content": " I", "function_call": null, "refusal": null, "role": "assistant", @@ -5551,7 +689,7 @@ "logprobs": null } ], - "created": 1756921333, + "created": 1759254031, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -5562,11 +700,11 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-358", + "id": "chatcmpl-332", "choices": [ { "delta": { - "content": ".com", + "content": " don", "function_call": null, "refusal": null, "role": "assistant", @@ -5577,7 +715,7 @@ "logprobs": null } ], - "created": 1756921333, + "created": 1759254031, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -5588,11 +726,11 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-358", + "id": "chatcmpl-332", "choices": [ { "delta": { - "content": " or", + "content": "'t", "function_call": null, "refusal": null, "role": "assistant", @@ -5603,7 +741,7 @@ "logprobs": null } ], - "created": 1756921333, + "created": 1759254032, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -5614,11 +752,11 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-358", + "id": "chatcmpl-332", "choices": [ { "delta": { - "content": " Met", + "content": " have", "function_call": null, "refusal": null, "role": "assistant", @@ -5629,7 +767,7 @@ "logprobs": null } ], - "created": 1756921333, + "created": 1759254032, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -5640,11 +778,11 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-358", + "id": "chatcmpl-332", "choices": [ { "delta": { - "content": "e", + "content": " real", "function_call": null, "refusal": null, "role": "assistant", @@ -5655,7 +793,7 @@ "logprobs": null } ], - "created": 1756921333, + "created": 1759254032, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -5666,11 +804,11 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-358", + "id": "chatcmpl-332", "choices": [ { "delta": { - "content": "ors", + "content": "-time", "function_call": null, "refusal": null, "role": "assistant", @@ -5681,7 +819,7 @@ "logprobs": null } ], - "created": 1756921333, + "created": 1759254032, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -5692,7 +830,631 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-358", + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " access", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254032, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " to", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254033, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " current", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254033, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " weather", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254033, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " conditions", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254033, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": ".\n\n", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254033, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": "But", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254034, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " I", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254034, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " can", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254034, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " suggest", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254034, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " some", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254034, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " ways", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254035, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " for", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254035, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " you", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254035, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " to", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254035, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " find", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254035, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " out", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254036, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " the", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254036, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " current", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254036, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " weather", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254036, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " in", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254036, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " Tokyo", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254037, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": ":\n\n", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254037, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": "1", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254037, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", "choices": [ { "delta": { @@ -5707,7 +1469,7 @@ "logprobs": null } ], - "created": 1756921333, + "created": 1759254037, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -5718,7 +1480,3491 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-358", + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " Check", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254037, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " online", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254038, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " weather", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254038, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " websites", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254038, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": ":", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254038, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " You", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254038, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " can", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254039, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " check", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254039, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " websites", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254039, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " like", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254039, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " Acc", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254039, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": "u", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254040, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": "Weather", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254040, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": ",", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254040, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " Weather", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254040, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": ".com", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254040, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": ",", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254041, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " or", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254041, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " Japan", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254041, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " Meteor", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254041, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": "ological", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254041, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " Agency", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254042, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " (", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254042, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": "J", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254042, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": "MA", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254042, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": ")", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254042, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " for", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254043, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " the", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254043, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " latest", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254043, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " weather", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254043, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " forecast", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254043, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " and", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254044, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " current", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254044, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " conditions", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254044, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": ".\n", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254044, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": "2", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254044, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": ".", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254045, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " Use", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254045, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " a", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254045, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " mobile", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254045, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " app", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254045, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": ":", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254046, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " Download", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254046, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " a", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254046, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " weather", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254046, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " app", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254046, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " on", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254047, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " your", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254047, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " smartphone", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254047, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": ",", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254047, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " such", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254047, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " as", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254048, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " Dark", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254048, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " Sky", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254048, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " or", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254048, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " Weather", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254048, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " Underground", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254049, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": ",", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254049, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " to", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254049, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " get", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254049, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " real", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254049, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": "-time", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254050, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " weather", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254050, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " updates", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254050, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": ".\n", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254050, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": "3", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254050, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": ".", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254051, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " Ask", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254051, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " a", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254051, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " virtual", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254051, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " assistant", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254051, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": ":", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254052, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " If", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254052, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " you", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254052, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " have", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254052, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " a", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254052, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " virtual", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254053, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " assistant", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254053, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " like", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254053, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " Siri", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254053, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": ",", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254053, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " Google", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254054, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " Assistant", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254054, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": ",", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254054, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " or", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254054, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " Alexa", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254054, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": ",", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254055, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " you", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254055, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " can", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254055, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " ask", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254055, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " them", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254055, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " for", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254056, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " the", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254056, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " current", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254056, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " weather", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254056, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " in", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254056, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " Tokyo", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254057, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": ".\n\n", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254057, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": "Please", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254057, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " note", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254057, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " that", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254057, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " Tokyo", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254058, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": "'s", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254058, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " weather", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254058, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " can", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254058, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " vary", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254058, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " greatly", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254059, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " depending", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254059, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " on", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254059, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " the", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254059, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " season", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254059, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " and", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254060, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " location", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254060, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " within", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254060, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " the", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254060, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " city", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254060, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": ".", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254061, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " Would", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254061, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " you", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254061, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " like", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254061, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " to", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254061, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " know", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254062, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " more", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254062, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " about", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254062, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " the", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254062, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " typical", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254062, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " weather", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254063, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " patterns", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254063, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " in", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254063, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " Tokyo", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254063, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " throughout", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254063, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " the", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254064, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": " year", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254064, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", + "choices": [ + { + "delta": { + "content": "?", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759254064, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-332", "choices": [ { "delta": { @@ -5733,7 +4979,7 @@ "logprobs": null } ], - "created": 1756921333, + "created": 1759254064, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, diff --git a/tests/integration/recordings/responses/70adef2c30c4.json b/tests/integration/recordings/responses/70adef2c30c4.json deleted file mode 100644 index f8f3ce7df..000000000 --- a/tests/integration/recordings/responses/70adef2c30c4.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "request": { - "method": "POST", - "url": "http://localhost:11434/api/generate", - "headers": {}, - "body": { - "model": "llama3.2:3b-instruct-fp16", - "raw": true, - "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhich planet has rings around it with a name starting with letter S?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", - "options": { - "temperature": 0.0 - }, - "stream": false - }, - "endpoint": "/api/generate", - "model": "llama3.2:3b-instruct-fp16" - }, - "response": { - "body": { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:17.227488Z", - "done": true, - "done_reason": "stop", - "total_duration": 3003964916, - "load_duration": 111221916, - "prompt_eval_count": 30, - "prompt_eval_duration": 72578583, - "eval_count": 70, - "eval_duration": 2819555375, - "response": "The answer is Saturn! Saturn's ring system is one of the most iconic and well-known in our solar system. The rings are made up of ice particles, rock debris, and dust that orbit around the planet due to its gravitational pull.\n\nWould you like to know more about Saturn's rings or is there something else I can help you with?", - "thinking": null, - "context": null - } - }, - "is_streaming": false - } -} diff --git a/tests/integration/recordings/responses/731824c54461.json b/tests/integration/recordings/responses/731824c54461.json deleted file mode 100644 index 2d88c6329..000000000 --- a/tests/integration/recordings/responses/731824c54461.json +++ /dev/null @@ -1,203 +0,0 @@ -{ - "request": { - "method": "POST", - "url": "http://localhost:11434/api/generate", - "headers": {}, - "body": { - "model": "llama3.2:3b-instruct-fp16", - "raw": true, - "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nGive me a sentence that contains the word: hello<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", - "options": { - "temperature": 0.0 - }, - "stream": true - }, - "endpoint": "/api/generate", - "model": "llama3.2:3b-instruct-fp16" - }, - "response": { - "body": [ - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-18T19:47:58.267146Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "Hello", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-18T19:47:58.309006Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ",", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-18T19:47:58.351179Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " how", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-18T19:47:58.393262Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " can", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-18T19:47:58.436079Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " I", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-18T19:47:58.478393Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " assist", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-18T19:47:58.520608Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " you", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-18T19:47:58.562885Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " today", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-18T19:47:58.604683Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "?", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-18T19:47:58.646586Z", - "done": true, - "done_reason": "stop", - "total_duration": 1011323917, - "load_duration": 76575458, - "prompt_eval_count": 31, - "prompt_eval_duration": 553259250, - "eval_count": 10, - "eval_duration": 380302792, - "response": "", - "thinking": null, - "context": null - } - } - ], - "is_streaming": true - } -} diff --git a/tests/integration/recordings/responses/7354ec181984.json b/tests/integration/recordings/responses/7354ec181984.json deleted file mode 100644 index b73a7cd50..000000000 --- a/tests/integration/recordings/responses/7354ec181984.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "request": { - "method": "POST", - "url": "http://localhost:11434/api/generate", - "headers": {}, - "body": { - "model": "llama3.2:3b-instruct-fp16", - "raw": true, - "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhat is the smallest country in the world?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", - "options": { - "temperature": 0.0 - }, - "stream": false - }, - "endpoint": "/api/generate", - "model": "llama3.2:3b-instruct-fp16" - }, - "response": { - "body": { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:39:54.374714Z", - "done": true, - "done_reason": "stop", - "total_duration": 6321793333, - "load_duration": 182255958, - "prompt_eval_count": 25, - "prompt_eval_duration": 67964459, - "eval_count": 150, - "eval_duration": 6070867875, - "response": "The smallest country in the world is the Vatican City, which has a total area of approximately 0.44 km\u00b2 (0.17 sq mi). It is an independent city-state located within Rome, Italy, and is home to the Pope and the central government of the Catholic Church.\n\nTo put that into perspective, the Vatican City is smaller than a golf course! Despite its tiny size, it has its own government, currency, postal system, and even its own police force. It's also home to numerous iconic landmarks like St. Peter's Basilica and the Sistine Chapel.\n\nInterestingly, the Vatican City is not only the smallest country in the world but also the most densely populated, with a population of just over 800 people!", - "thinking": null, - "context": null - } - }, - "is_streaming": false - } -} diff --git a/tests/integration/recordings/responses/73e97be515d9.json b/tests/integration/recordings/responses/73e97be515d9.json index 6df3dd956..a56724ae3 100644 --- a/tests/integration/recordings/responses/73e97be515d9.json +++ b/tests/integration/recordings/responses/73e97be515d9.json @@ -41,7 +41,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-116", + "id": "chatcmpl-72", "choices": [ { "delta": { @@ -52,7 +52,7 @@ "tool_calls": [ { "index": 0, - "id": "call_0c2qffvv", + "id": "call_aone7ocw", "function": { "arguments": "{\"city\":\"Tokyo\"}", "name": "get_weather" @@ -66,7 +66,7 @@ "logprobs": null } ], - "created": 1759267492, + "created": 1759282724, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -77,7 +77,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-116", + "id": "chatcmpl-72", "choices": [ { "delta": { @@ -87,12 +87,12 @@ "role": "assistant", "tool_calls": null }, - "finish_reason": "stop", + "finish_reason": "tool_calls", "index": 0, "logprobs": null } ], - "created": 1759267492, + "created": 1759282724, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, diff --git a/tests/integration/recordings/responses/75d0dd9d0fa3.json b/tests/integration/recordings/responses/75d0dd9d0fa3.json deleted file mode 100644 index 561fa1e67..000000000 --- a/tests/integration/recordings/responses/75d0dd9d0fa3.json +++ /dev/null @@ -1,64 +0,0 @@ -{ - "request": { - "method": "POST", - "url": "http://localhost:11434/api/generate", - "headers": {}, - "body": { - "model": "llama3.2:3b-instruct-fp16", - "prompt": "<|begin_of_text|>Michael Jordan was born in 1963. He played basketball for the Chicago Bulls. He retired in 2003.Please respond in JSON format with the schema: {\"properties\": {\"name\": {\"title\": \"Name\", \"type\": \"string\"}, \"year_born\": {\"title\": \"Year Born\", \"type\": \"string\"}, \"year_retired\": {\"title\": \"Year Retired\", \"type\": \"string\"}}, \"required\": [\"name\", \"year_born\", \"year_retired\"], \"title\": \"AnswerFormat\", \"type\": \"object\"}", - "raw": true, - "format": { - "properties": { - "name": { - "title": "Name", - "type": "string" - }, - "year_born": { - "title": "Year Born", - "type": "string" - }, - "year_retired": { - "title": "Year Retired", - "type": "string" - } - }, - "required": [ - "name", - "year_born", - "year_retired" - ], - "title": "AnswerFormat", - "type": "object" - }, - "options": { - "temperature": 0.0, - "max_tokens": 50, - "num_predict": 50 - }, - "stream": false - }, - "endpoint": "/api/generate", - "model": "llama3.2:3b-instruct-fp16" - }, - "response": { - "body": { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:17.508028Z", - "done": true, - "done_reason": "stop", - "total_duration": 1529591917, - "load_duration": 84990667, - "prompt_eval_count": 119, - "prompt_eval_duration": 189045583, - "eval_count": 29, - "eval_duration": 1254813583, - "response": "{ \"name\": \"Michael Jordan\", \"year_born\": \"1963\", \"year_retired\": \"2003\"}\n ", - "thinking": null, - "context": null - } - }, - "is_streaming": false - } -} diff --git a/tests/integration/recordings/responses/7a047bcf8b19.json b/tests/integration/recordings/responses/7a047bcf8b19.json index 4f9c8b06e..7cd6c3f7c 100644 --- a/tests/integration/recordings/responses/7a047bcf8b19.json +++ b/tests/integration/recordings/responses/7a047bcf8b19.json @@ -21,7 +21,7 @@ "body": { "__type__": "openai.types.chat.chat_completion.ChatCompletion", "__data__": { - "id": "chatcmpl-236", + "id": "chatcmpl-737", "choices": [ { "finish_reason": "stop", @@ -38,7 +38,7 @@ } } ], - "created": 1759247859, + "created": 1759282582, "model": "llama-guard3:1b", "object": "chat.completion", "service_tier": null, diff --git a/tests/integration/recordings/responses/7b4815aba6c5.json b/tests/integration/recordings/responses/7b4815aba6c5.json index f1e8e7165..0494b4180 100644 --- a/tests/integration/recordings/responses/7b4815aba6c5.json +++ b/tests/integration/recordings/responses/7b4815aba6c5.json @@ -22,7 +22,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:48.840898Z", + "created_at": "2025-10-01T01:33:52.93635761Z", "done": false, "done_reason": null, "total_duration": null, @@ -40,7 +40,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:48.883619Z", + "created_at": "2025-10-01T01:33:53.133195005Z", "done": false, "done_reason": null, "total_duration": null, @@ -58,7 +58,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:48.92504Z", + "created_at": "2025-10-01T01:33:53.332277092Z", "done": false, "done_reason": null, "total_duration": null, @@ -76,7 +76,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:48.966274Z", + "created_at": "2025-10-01T01:33:53.529012616Z", "done": false, "done_reason": null, "total_duration": null, @@ -94,7 +94,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:49.007525Z", + "created_at": "2025-10-01T01:33:53.724651797Z", "done": false, "done_reason": null, "total_duration": null, @@ -112,7 +112,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:49.049125Z", + "created_at": "2025-10-01T01:33:53.923248219Z", "done": false, "done_reason": null, "total_duration": null, @@ -130,7 +130,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:49.090893Z", + "created_at": "2025-10-01T01:33:54.117881107Z", "done": false, "done_reason": null, "total_duration": null, @@ -148,7 +148,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:49.132101Z", + "created_at": "2025-10-01T01:33:54.311986552Z", "done": false, "done_reason": null, "total_duration": null, @@ -166,7 +166,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:49.17401Z", + "created_at": "2025-10-01T01:33:54.505749874Z", "done": false, "done_reason": null, "total_duration": null, @@ -184,7 +184,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:49.216115Z", + "created_at": "2025-10-01T01:33:54.699245098Z", "done": false, "done_reason": null, "total_duration": null, @@ -202,7 +202,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:49.257109Z", + "created_at": "2025-10-01T01:33:54.890029079Z", "done": false, "done_reason": null, "total_duration": null, @@ -220,7 +220,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:49.298731Z", + "created_at": "2025-10-01T01:33:55.081182058Z", "done": false, "done_reason": null, "total_duration": null, @@ -238,7 +238,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:49.338833Z", + "created_at": "2025-10-01T01:33:55.27115012Z", "done": false, "done_reason": null, "total_duration": null, @@ -256,7 +256,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:49.38053Z", + "created_at": "2025-10-01T01:33:55.46403171Z", "done": false, "done_reason": null, "total_duration": null, @@ -274,7 +274,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:49.421378Z", + "created_at": "2025-10-01T01:33:55.655042212Z", "done": false, "done_reason": null, "total_duration": null, @@ -292,7 +292,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:49.462646Z", + "created_at": "2025-10-01T01:33:55.844320935Z", "done": false, "done_reason": null, "total_duration": null, @@ -310,7 +310,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:49.503814Z", + "created_at": "2025-10-01T01:33:56.035465828Z", "done": false, "done_reason": null, "total_duration": null, @@ -328,7 +328,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:49.545397Z", + "created_at": "2025-10-01T01:33:56.240155299Z", "done": false, "done_reason": null, "total_duration": null, @@ -346,15 +346,15 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:49.586834Z", + "created_at": "2025-10-01T01:33:56.432393304Z", "done": true, "done_reason": "stop", - "total_duration": 1409239209, - "load_duration": 118889250, + "total_duration": 34185152900, + "load_duration": 44303323, "prompt_eval_count": 368, - "prompt_eval_duration": 543077166, + "prompt_eval_duration": 30642631331, "eval_count": 19, - "eval_duration": 746733584, + "eval_duration": 3497664639, "response": "", "thinking": null, "context": null diff --git a/tests/integration/recordings/responses/7bcb0f86c91b.json b/tests/integration/recordings/responses/7bcb0f86c91b.json deleted file mode 100644 index 4c9a55153..000000000 --- a/tests/integration/recordings/responses/7bcb0f86c91b.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "request": { - "method": "POST", - "url": "http://localhost:11434/api/generate", - "headers": {}, - "body": { - "model": "llama3.2:3b-instruct-fp16", - "raw": true, - "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nTest metrics generation 0<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", - "options": { - "temperature": 0.0 - }, - "stream": false - }, - "endpoint": "/api/generate", - "model": "llama3.2:3b-instruct-fp16" - }, - "response": { - "body": { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-11T15:51:12.918723Z", - "done": true, - "done_reason": "stop", - "total_duration": 8868987792, - "load_duration": 2793275292, - "prompt_eval_count": 21, - "prompt_eval_duration": 250000000, - "eval_count": 344, - "eval_duration": 5823000000, - "response": "Here are some common test metrics used to evaluate the performance of a system:\n\n1. **Accuracy**: The proportion of correct predictions or classifications out of total predictions made.\n2. **Precision**: The ratio of true positives (correctly predicted instances) to the sum of true positives and false positives (incorrectly predicted instances).\n3. **Recall**: The ratio of true positives to the sum of true positives and false negatives (missed instances).\n4. **F1-score**: The harmonic mean of precision and recall, providing a balanced measure of both.\n5. **Mean Squared Error (MSE)**: The average squared difference between predicted and actual values.\n6. **Mean Absolute Error (MAE)**: The average absolute difference between predicted and actual values.\n7. **Root Mean Squared Percentage Error (RMSPE)**: The square root of the mean of the squared percentage differences between predicted and actual values.\n8. **Coefficient of Determination (R-squared, R2)**: Measures how well a model fits the data, with higher values indicating better fit.\n9. **Mean Absolute Percentage Error (MAPE)**: The average absolute percentage difference between predicted and actual values.\n10. **Normalized Mean Squared Error (NMSE)**: Similar to MSE, but normalized by the mean of the actual values.\n\nThese metrics can be used for various types of data, including:\n\n* Regression problems (e.g., predicting continuous values)\n* Classification problems (e.g., predicting categorical labels)\n* Time series forecasting\n* Clustering and dimensionality reduction\n\nWhen choosing a metric, consider the specific problem you're trying to solve, the type of data, and the desired level of precision.", - "thinking": null, - "context": null - } - }, - "is_streaming": false - } -} diff --git a/tests/integration/recordings/responses/80e4404d8987.json b/tests/integration/recordings/responses/80e4404d8987.json index 7eabfc363..09d510916 100644 --- a/tests/integration/recordings/responses/80e4404d8987.json +++ b/tests/integration/recordings/responses/80e4404d8987.json @@ -22,7 +22,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:46.708948Z", + "created_at": "2025-10-01T01:33:10.76700718Z", "done": false, "done_reason": null, "total_duration": null, @@ -40,7 +40,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:46.749031Z", + "created_at": "2025-10-01T01:33:10.956949035Z", "done": false, "done_reason": null, "total_duration": null, @@ -58,7 +58,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:46.790192Z", + "created_at": "2025-10-01T01:33:11.147886127Z", "done": false, "done_reason": null, "total_duration": null, @@ -76,7 +76,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:46.831093Z", + "created_at": "2025-10-01T01:33:11.337832912Z", "done": false, "done_reason": null, "total_duration": null, @@ -94,7 +94,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:46.873135Z", + "created_at": "2025-10-01T01:33:11.524017554Z", "done": false, "done_reason": null, "total_duration": null, @@ -112,7 +112,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:46.91375Z", + "created_at": "2025-10-01T01:33:11.712703934Z", "done": false, "done_reason": null, "total_duration": null, @@ -130,7 +130,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:46.95439Z", + "created_at": "2025-10-01T01:33:11.903877596Z", "done": false, "done_reason": null, "total_duration": null, @@ -148,7 +148,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:46.995224Z", + "created_at": "2025-10-01T01:33:12.095535165Z", "done": false, "done_reason": null, "total_duration": null, @@ -166,7 +166,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:47.035887Z", + "created_at": "2025-10-01T01:33:12.291614477Z", "done": false, "done_reason": null, "total_duration": null, @@ -184,15 +184,15 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:47.076806Z", + "created_at": "2025-10-01T01:33:12.483844314Z", "done": true, "done_reason": "stop", - "total_duration": 2069654958, - "load_duration": 177579833, + "total_duration": 4303509972, + "load_duration": 44748689, "prompt_eval_count": 31, - "prompt_eval_duration": 1521851250, + "prompt_eval_duration": 2539513749, "eval_count": 10, - "eval_duration": 369478042, + "eval_duration": 1718623697, "response": "", "thinking": null, "context": null diff --git a/tests/integration/recordings/responses/836f51dfb3c5.json b/tests/integration/recordings/responses/836f51dfb3c5.json index 85f3aff00..a850054cd 100644 --- a/tests/integration/recordings/responses/836f51dfb3c5.json +++ b/tests/integration/recordings/responses/836f51dfb3c5.json @@ -20,15 +20,15 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama-guard3:1b", - "created_at": "2025-09-03T17:37:51.562847Z", + "created_at": "2025-09-30T17:38:19.258171865Z", "done": true, "done_reason": "stop", - "total_duration": 272296250, - "load_duration": 131747125, + "total_duration": 2789705003, + "load_duration": 60163509, "prompt_eval_count": 214, - "prompt_eval_duration": 124006709, + "prompt_eval_duration": 2677292181, "eval_count": 2, - "eval_duration": 15572291, + "eval_duration": 51690110, "response": "safe", "thinking": null, "context": null diff --git a/tests/integration/recordings/responses/840fbb380b73.json b/tests/integration/recordings/responses/840fbb380b73.json index 4367d8788..017f726e8 100644 --- a/tests/integration/recordings/responses/840fbb380b73.json +++ b/tests/integration/recordings/responses/840fbb380b73.json @@ -20,15 +20,15 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama-guard3:1b", - "created_at": "2025-09-03T17:37:47.871962Z", + "created_at": "2025-09-30T17:37:27.310573231Z", "done": true, "done_reason": "stop", - "total_duration": 301629042, - "load_duration": 102832917, + "total_duration": 3251121805, + "load_duration": 47089617, "prompt_eval_count": 233, - "prompt_eval_duration": 154806625, + "prompt_eval_duration": 3006835928, "eval_count": 5, - "eval_duration": 43361542, + "eval_duration": 196620033, "response": "unsafe\nS1", "thinking": null, "context": null diff --git a/tests/integration/recordings/responses/84fc473e7b29.json b/tests/integration/recordings/responses/84fc473e7b29.json index a4b228f05..f01f11759 100644 --- a/tests/integration/recordings/responses/84fc473e7b29.json +++ b/tests/integration/recordings/responses/84fc473e7b29.json @@ -21,7 +21,7 @@ "body": { "__type__": "openai.types.chat.chat_completion.ChatCompletion", "__data__": { - "id": "chatcmpl-344", + "id": "chatcmpl-165", "choices": [ { "finish_reason": "stop", @@ -38,7 +38,7 @@ } } ], - "created": 1759247858, + "created": 1759282579, "model": "llama-guard3:1b", "object": "chat.completion", "service_tier": null, diff --git a/tests/integration/recordings/responses/87577729d812.json b/tests/integration/recordings/responses/87577729d812.json index 7c268aa2e..9b8699084 100644 --- a/tests/integration/recordings/responses/87577729d812.json +++ b/tests/integration/recordings/responses/87577729d812.json @@ -21,7 +21,7 @@ "body": { "__type__": "openai.types.chat.chat_completion.ChatCompletion", "__data__": { - "id": "chatcmpl-119", + "id": "chatcmpl-609", "choices": [ { "finish_reason": "stop", @@ -38,7 +38,7 @@ } } ], - "created": 1759245069, + "created": 1759282388, "model": "llama-guard3:1b", "object": "chat.completion", "service_tier": null, diff --git a/tests/integration/recordings/responses/87c056adc35c.json b/tests/integration/recordings/responses/87c056adc35c.json new file mode 100644 index 000000000..cf635dd7e --- /dev/null +++ b/tests/integration/recordings/responses/87c056adc35c.json @@ -0,0 +1,806 @@ +{ + "request": { + "method": "POST", + "url": "http://0.0.0.0:11434/v1/v1/embeddings", + "headers": {}, + "body": { + "model": "nomic-embed-text:137m-v1.5-fp16", + "input": [ + "Why are data structures important?" + ], + "encoding_format": "float" + }, + "endpoint": "/v1/embeddings", + "model": "nomic-embed-text:137m-v1.5-fp16" + }, + "response": { + "body": { + "__type__": "openai.types.create_embedding_response.CreateEmbeddingResponse", + "__data__": { + "data": [ + { + "embedding": [ + -0.0055067283, + 0.0691788, + -0.12835562, + -0.054449122, + 0.056506466, + 0.008154408, + 0.016579939, + -0.005861886, + -0.053147435, + -0.06689316, + -0.0125774965, + 0.012131817, + 0.10522907, + -0.022567436, + -0.010184469, + 0.0047555137, + -0.09560516, + -0.02869415, + 0.005823712, + 0.026181953, + -0.050526746, + -0.019493021, + 0.012390013, + 0.014383491, + 0.026209505, + 0.061908394, + 0.03508825, + -0.06008353, + -0.024454756, + 0.060678, + 0.06708033, + -0.0022188132, + 0.034376595, + -0.03279394, + -0.06730504, + -0.07369063, + -0.037954886, + 0.041736037, + -0.0022857673, + -0.036154196, + -0.0043730233, + 0.02660196, + -0.043143313, + -0.016130125, + 0.056613196, + 0.0035527975, + -0.017358474, + -0.06225926, + 0.063272394, + -0.025721373, + 0.045175213, + -0.033949595, + 0.009468214, + 0.0092460355, + 0.08431274, + 0.01425319, + 0.011694144, + 0.031544022, + 0.034130182, + -0.076243795, + 0.068438105, + 0.11499481, + -0.059728492, + 0.02415792, + 0.008430943, + -0.04239523, + -0.045541644, + 0.0042671585, + -0.022412328, + -0.016552199, + 0.038433194, + 0.035031006, + 0.01044125, + -0.035626266, + -0.018012544, + 0.019699976, + -0.0018288917, + 0.032518297, + -0.0177986, + 0.042808123, + 0.022334872, + -0.014575339, + 0.051781073, + -0.026092554, + 0.006079152, + 0.02757349, + 0.019296495, + -0.00514512, + 0.00082866545, + 0.06785129, + 0.018279642, + -0.054320488, + 0.03349167, + 0.048226908, + -0.07671358, + 0.028916309, + -0.0010493343, + 0.02221549, + 0.016000975, + 0.01223793, + -0.017005093, + -0.033222955, + -0.0055971234, + 0.03769521, + -0.008500556, + -0.0026479687, + 0.018203754, + 0.040224712, + -0.021299101, + -0.019668331, + -0.011704243, + 0.07116387, + -0.03220624, + 0.0041646096, + -0.012268384, + -0.007227694, + 0.057473723, + -0.07691696, + -0.06090154, + -0.032882772, + -0.024933215, + -0.030841816, + 0.063512295, + 0.050505444, + -0.009545097, + -0.019137407, + -0.014251317, + 0.035820402, + 0.025301578, + -0.032520078, + -0.023825355, + -0.02894602, + -0.072710305, + 0.003224811, + 0.02377651, + 0.027730972, + -0.07713202, + -0.0330053, + 0.05449727, + 0.044401404, + -0.006475545, + 0.047970258, + -0.057762735, + -0.033274963, + 0.018484, + -0.004733799, + 0.048722517, + -0.015905516, + -0.012622708, + -0.04765113, + 0.013506974, + 0.044848952, + -0.0065122605, + 0.0021293245, + 0.0020283123, + -0.018023405, + 0.025206288, + -0.021057727, + 0.01721119, + 0.029168243, + 0.07257681, + 0.022936262, + -0.011233473, + 0.015861422, + -0.019733926, + -0.05565718, + 0.026574634, + -0.007964335, + -0.00105196, + 0.012244276, + -0.010458468, + 0.00025068677, + 0.029596092, + -0.02004873, + 0.03952663, + -0.036656335, + 0.016609907, + -0.050120637, + 0.11185912, + -0.050909996, + -0.048775107, + -0.020030547, + 0.0153389415, + 0.0011901723, + -0.038483646, + 0.02004873, + 0.017939426, + -0.017415283, + -0.03634165, + -0.02609482, + 0.021946523, + 0.02326441, + -0.052063353, + -0.0030024708, + -0.008184734, + -0.011170216, + -0.008318481, + 0.040304467, + 0.019288791, + 7.0962094e-05, + -0.047486935, + -0.019311698, + -0.04947344, + 0.026369695, + -0.057666145, + 0.034645956, + -0.050079547, + 0.035380702, + -0.015542651, + -0.024575872, + 0.07835102, + -0.025289344, + 0.005440495, + 0.015665129, + -0.01966988, + -0.07520282, + -0.02425893, + -0.047322523, + -0.020614233, + 0.038350448, + -0.026481356, + -0.040539965, + 0.0661944, + 0.02502757, + -0.010155566, + -0.035468638, + -0.01562628, + -0.04135564, + -0.031548798, + -0.049242284, + -0.04551279, + -0.036385354, + 0.035608906, + 0.021134995, + 0.018818628, + 0.043228216, + 0.042133935, + -0.015709238, + 0.06552171, + -0.0044355174, + 0.0021416203, + 0.021100294, + -0.009039295, + 0.00014870724, + 0.040932197, + 0.017849974, + -0.019864114, + -0.047478165, + -0.05676394, + 0.049951475, + -0.048136313, + -0.017876703, + 0.012142189, + 0.02373712, + 0.0334763, + -0.035479926, + -0.012235951, + -0.030320909, + 0.021752922, + 0.03523251, + 0.04498809, + -0.03067527, + -0.020974364, + -0.046126693, + -0.03995082, + 0.012467275, + 0.022052003, + -0.018320043, + 0.0013203244, + -0.004935072, + 0.0050206785, + -0.0047598844, + 0.011211644, + 0.039831202, + 0.027249418, + 0.014987716, + -0.01940106, + -0.009642856, + -0.07113845, + 0.054759383, + -0.018858217, + -0.024562797, + -0.08670976, + -0.004677105, + -9.054924e-05, + 0.051185664, + 0.01569594, + 0.053627595, + 0.0003285345, + 0.027126677, + 0.033433437, + 0.033166908, + -0.023327576, + 0.060068127, + 0.08517537, + -0.039610267, + 0.028960181, + 0.027604481, + 0.0029389325, + -0.076566145, + -0.0273395, + 0.08770552, + 0.05686777, + 0.01246495, + -0.016718954, + 0.010576854, + 0.018693427, + -0.026167914, + -0.0641247, + 0.00813129, + -0.008773337, + -0.010244281, + 0.0024596818, + 0.027441284, + -0.03914519, + 0.03687808, + 0.0073220856, + 0.02342061, + 0.0123781385, + -0.0035178016, + 0.0015435648, + -0.029216826, + -0.031155663, + -0.073616505, + 0.009858675, + 0.06776608, + -0.015782345, + 0.023255533, + -0.014765486, + -0.019421978, + 0.050556473, + -0.03567379, + 0.015625134, + -0.027594624, + -0.07591481, + 0.025782052, + -0.0038178826, + -0.011459214, + -0.015950324, + 0.0015048053, + -0.016965888, + -0.025626767, + -0.009411103, + -0.043649834, + 0.010833025, + 0.029808043, + -0.036940675, + -0.040114816, + 0.034165625, + -0.014691349, + -0.059829887, + 0.016475074, + -0.018302068, + 0.00890752, + -0.018081741, + 0.015727276, + 0.017466683, + 0.011933743, + -0.028065827, + 0.0052258503, + 0.0062493044, + 0.0044333255, + -0.011237428, + -0.0069862586, + -0.033975184, + 0.023760261, + -0.015055696, + 0.0039600013, + 0.020392103, + 0.024047762, + -0.02872406, + 0.007738409, + -0.01555987, + 0.03011806, + 0.040093675, + -0.0033892216, + -0.06931259, + -0.019519035, + -0.008750149, + 0.04236017, + 0.059455607, + -0.007929568, + -0.008857907, + -0.041450884, + 0.029837137, + -0.0729099, + 0.005836722, + -0.004100339, + -0.0029754906, + 0.01634229, + -0.029647883, + -0.050842095, + -0.029163536, + 0.009248952, + -0.0028640334, + -0.052900236, + -0.05512097, + 0.055659927, + 0.04992974, + -0.004757618, + -0.036179878, + -0.07280319, + -0.03567622, + -0.044285037, + -0.008555347, + 0.04550832, + -0.00094304525, + -0.0656589, + -0.030906383, + -0.023528634, + 0.004441927, + 0.025694514, + 0.0041591898, + -0.035672203, + -0.02444802, + 0.013817473, + 0.01189618, + 0.0062793735, + 0.0036719819, + 0.014963965, + 0.053757705, + 0.06549391, + 0.042496137, + 0.010899155, + 0.043035947, + 0.032150052, + 0.09407309, + 0.024764558, + -0.011964197, + -0.048119746, + 0.008351835, + 0.06145398, + 0.019204808, + -0.0030630424, + -0.06240826, + 0.03536538, + 0.018408166, + 0.06362795, + -0.07275413, + 0.068704925, + 0.014603027, + -0.06760976, + -0.0031986972, + 0.010279434, + 0.03215372, + 0.06905764, + -0.023212021, + -0.022716299, + -0.072324574, + 0.08606839, + 0.012951449, + 0.021978272, + 0.031508896, + -0.0057483097, + 0.09630234, + -0.0063684364, + -0.012098242, + -0.03970645, + 0.028056627, + 0.087799124, + -0.03352194, + -0.016433993, + -0.046286825, + 0.016221909, + 0.009365449, + -0.053078208, + 0.0009465837, + -0.048553433, + 0.04233797, + 0.042736158, + -0.022603348, + 0.027159866, + 0.0115378685, + -0.04380032, + 0.0344026, + 0.0620608, + -0.04509567, + -0.025683708, + 0.052748833, + 0.045589417, + -0.02661964, + -0.011906934, + -0.022709992, + -0.021741541, + 0.030429155, + 0.025474131, + -0.03997484, + -0.01695355, + 0.039500427, + 0.0066278055, + 0.017997347, + -0.010868054, + 0.034119062, + 0.0492591, + -0.025168648, + -0.03258354, + 0.017921297, + 0.002936628, + -0.016890781, + -0.01574124, + 0.0097997, + 0.0144984145, + -0.0050222855, + -0.03178876, + -0.010070219, + 0.0038994572, + 0.082671225, + -0.064686015, + -0.0023998383, + -0.0709133, + -0.012587475, + 0.004713978, + -0.008365287, + 0.04570752, + 0.019821582, + -0.045601755, + 0.005780342, + 0.023135826, + -0.03841521, + -0.014287952, + -0.040951498, + 0.001222165, + -0.0015837784, + 0.008921765, + -0.021013433, + 0.029224606, + 0.018224735, + -0.038594235, + -0.0011877345, + 0.03056137, + 0.045560293, + 0.03386976, + -0.08028984, + -0.02174568, + 0.010873439, + -0.02909561, + -0.028367657, + 0.06934649, + 0.03567452, + 0.045095395, + 0.017239548, + 0.025105212, + -0.047474947, + 0.027460333, + 0.01906143, + -0.059046946, + 0.011000827, + -0.030548505, + -0.00993384, + -0.047402643, + -0.03227493, + 0.01925817, + -0.024694432, + -0.017810628, + -0.0051988256, + -0.046833005, + 0.011399863, + -0.009450567, + -0.013994235, + -0.029993635, + 0.03204231, + 0.055144217, + 0.02970146, + 0.05029242, + 0.04417347, + 0.019293677, + 0.011820924, + 0.021562446, + 0.025712157, + 0.026714647, + 0.015479491, + -0.029627334, + 0.013564938, + 0.022211872, + 0.0008475917, + 0.02283723, + -0.0019577122, + -0.028588077, + -0.032387972, + -0.047514796, + 0.016408252, + -0.024263887, + 0.04294992, + 0.0058976035, + 0.04238604, + -0.0014817569, + -0.008880384, + -0.01518041, + 0.039314184, + -0.034863494, + -0.031348925, + 0.02491094, + 0.023272267, + -0.01213154, + -0.0029186436, + 0.009363544, + -0.020474007, + 0.022881426, + 0.011876272, + -0.099849775, + 0.04103065, + 0.036249414, + 0.018814126, + 0.011653004, + 0.01733942, + 0.038440976, + 0.031077309, + -0.023530783, + -0.060318835, + -0.01800236, + 0.040951062, + -0.015199813, + -0.048856284, + 0.007818538, + 0.0192296, + -0.046680138, + 4.1682793e-05, + -0.01107478, + 0.033890743, + -0.036434487, + 0.013583908, + -0.056057207, + 0.015355855, + -0.0056020026, + 0.027543671, + 0.006491281, + -0.062176593, + -0.0027985624, + 0.0154205365, + 0.05427184, + -0.042704068, + 0.08902915, + -0.0867114, + 0.011701053, + -0.031208558, + 0.0035119688, + 0.020856252, + 0.029149834, + -0.013294537, + 0.006884604, + -0.004071396, + -0.016199552, + 0.0140966065, + 0.034344625, + 0.044646475, + -0.014534568, + 0.06434988, + 0.057418663, + 0.054409288, + -0.032788362, + 0.025831478, + 0.053699754, + 0.01104724, + -0.013593943, + 0.021206772, + -0.057033155, + 0.002879689, + -0.02299407, + -0.025942653, + -0.01795699, + -0.0005103142, + 0.009943925, + -0.0111974655, + -0.043488014, + 0.02352647, + -0.00085910445, + 0.036153458, + 0.008397858, + -0.0125623, + 0.045501575, + 0.017022615, + 0.02164789, + 0.044366788, + -0.05922759, + 0.06606177, + 0.032538608, + 0.015617672, + -0.05665216, + -0.048967004, + -0.008281686, + 0.03639404, + 0.013526518, + 0.048029386, + -0.0032675986, + -0.02734557, + 0.034290742, + -0.010661151, + -0.044663135, + -0.010002009, + -0.023236647, + -0.009099468, + -0.050651174, + -0.01877344, + -0.057528064, + -0.006980231, + 0.020679744, + 0.00032431784, + 0.004773796, + 0.0069069746, + 0.016760433, + 0.008305804, + -0.028032228, + 0.024984887, + 0.015810564, + 0.028754044, + 0.013413702, + 0.04405434, + 0.006831175, + -0.013154476, + 0.025184985, + 0.020763578, + -0.027210625, + 0.047467683, + 0.012808554, + 0.019128239, + -0.006344172, + -0.0012825177, + -0.04123715, + -0.070471205, + 0.026458906, + 0.011127495, + -0.053800732, + -0.042026933, + 0.014701638, + -0.009170802, + 0.010387788, + 0.014916444, + 0.0058068377, + 0.014975564, + 0.0056835464, + -0.049073413, + -0.022337116, + -0.021429205, + 0.011414711, + -0.059687294, + 0.026811803, + -0.033584774, + 0.03430464, + -0.061727095, + -0.002469326, + -0.025580805, + 0.042926375, + -0.022121925, + 0.0075072222, + -0.025951052, + -0.032126367, + -0.016206766, + 0.05476613, + 0.027255341, + 0.017624483, + -0.053568747, + -0.009815464, + -0.021195231, + 0.01143239, + -0.055088513, + 0.05115604, + -0.020695584, + 0.016151866, + 0.09019919, + 0.035570264, + 0.027598873, + 0.0329581, + 0.051568285, + 0.030362109, + -0.009580888, + -0.0100544235, + -0.024147386, + 0.0180904 + ], + "index": 0, + "object": "embedding" + } + ], + "model": "nomic-embed-text:137m-v1.5-fp16", + "object": "list", + "usage": { + "prompt_tokens": 6, + "total_tokens": 6 + } + } + }, + "is_streaming": false + } +} diff --git a/tests/integration/recordings/responses/8aba89449cdc.json b/tests/integration/recordings/responses/8aba89449cdc.json index 6aa6cd2c5..bb0841bbe 100644 --- a/tests/integration/recordings/responses/8aba89449cdc.json +++ b/tests/integration/recordings/responses/8aba89449cdc.json @@ -37,7 +37,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-676", + "id": "chatcmpl-79", "choices": [ { "delta": { @@ -52,7 +52,7 @@ "logprobs": null } ], - "created": 1759267544, + "created": 1759282364, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -63,7 +63,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-676", + "id": "chatcmpl-79", "choices": [ { "delta": { @@ -78,7 +78,7 @@ "logprobs": null } ], - "created": 1759267544, + "created": 1759282364, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -89,7 +89,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-676", + "id": "chatcmpl-79", "choices": [ { "delta": { @@ -104,7 +104,7 @@ "logprobs": null } ], - "created": 1759267544, + "created": 1759282364, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -115,7 +115,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-676", + "id": "chatcmpl-79", "choices": [ { "delta": { @@ -130,7 +130,7 @@ "logprobs": null } ], - "created": 1759267544, + "created": 1759282364, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -141,7 +141,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-676", + "id": "chatcmpl-79", "choices": [ { "delta": { @@ -156,7 +156,7 @@ "logprobs": null } ], - "created": 1759267544, + "created": 1759282365, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -167,7 +167,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-676", + "id": "chatcmpl-79", "choices": [ { "delta": { @@ -182,7 +182,7 @@ "logprobs": null } ], - "created": 1759267544, + "created": 1759282365, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -193,7 +193,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-676", + "id": "chatcmpl-79", "choices": [ { "delta": { @@ -208,7 +208,7 @@ "logprobs": null } ], - "created": 1759267544, + "created": 1759282365, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -219,7 +219,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-676", + "id": "chatcmpl-79", "choices": [ { "delta": { @@ -234,7 +234,7 @@ "logprobs": null } ], - "created": 1759267544, + "created": 1759282365, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, diff --git a/tests/integration/recordings/responses/946376830d67.json b/tests/integration/recordings/responses/946376830d67.json index 18c8b0000..52ee33bb6 100644 --- a/tests/integration/recordings/responses/946376830d67.json +++ b/tests/integration/recordings/responses/946376830d67.json @@ -22,7 +22,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-30T15:57:30.748684225Z", + "created_at": "2025-10-01T01:34:32.266493609Z", "done": false, "done_reason": null, "total_duration": null, @@ -40,7 +40,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-30T15:57:30.761891114Z", + "created_at": "2025-10-01T01:34:32.468394034Z", "done": false, "done_reason": null, "total_duration": null, @@ -58,7 +58,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-30T15:57:30.772555814Z", + "created_at": "2025-10-01T01:34:32.668683201Z", "done": false, "done_reason": null, "total_duration": null, @@ -76,7 +76,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-30T15:57:30.782836359Z", + "created_at": "2025-10-01T01:34:32.86812Z", "done": false, "done_reason": null, "total_duration": null, @@ -94,7 +94,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-30T15:57:30.792350554Z", + "created_at": "2025-10-01T01:34:33.066156104Z", "done": false, "done_reason": null, "total_duration": null, @@ -112,7 +112,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-30T15:57:30.801914057Z", + "created_at": "2025-10-01T01:34:33.258437386Z", "done": false, "done_reason": null, "total_duration": null, @@ -130,7 +130,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-30T15:57:30.811393683Z", + "created_at": "2025-10-01T01:34:33.455421239Z", "done": false, "done_reason": null, "total_duration": null, @@ -148,7 +148,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-30T15:57:30.820947077Z", + "created_at": "2025-10-01T01:34:33.653866336Z", "done": false, "done_reason": null, "total_duration": null, @@ -166,7 +166,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-30T15:57:30.830440923Z", + "created_at": "2025-10-01T01:34:33.849413071Z", "done": false, "done_reason": null, "total_duration": null, @@ -184,7 +184,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-30T15:57:30.840009115Z", + "created_at": "2025-10-01T01:34:34.044100975Z", "done": false, "done_reason": null, "total_duration": null, @@ -202,7 +202,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-30T15:57:30.850657096Z", + "created_at": "2025-10-01T01:34:34.239766712Z", "done": false, "done_reason": null, "total_duration": null, @@ -220,7 +220,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-30T15:57:30.860246788Z", + "created_at": "2025-10-01T01:34:34.435865862Z", "done": false, "done_reason": null, "total_duration": null, @@ -238,15 +238,15 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-30T15:57:30.869711085Z", + "created_at": "2025-10-01T01:34:34.629495297Z", "done": true, "done_reason": "stop", - "total_duration": 287660073, - "load_duration": 149338464, + "total_duration": 4426089450, + "load_duration": 45156482, "prompt_eval_count": 407, - "prompt_eval_duration": 9497286, + "prompt_eval_duration": 2016388423, "eval_count": 13, - "eval_duration": 128120190, + "eval_duration": 2363948468, "response": "", "thinking": null, "context": null diff --git a/tests/integration/recordings/responses/97d3812bfccb.json b/tests/integration/recordings/responses/97d3812bfccb.json index 11e0fb402..e46bd8ff8 100644 --- a/tests/integration/recordings/responses/97d3812bfccb.json +++ b/tests/integration/recordings/responses/97d3812bfccb.json @@ -20,15 +20,15 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama-guard3:1b", - "created_at": "2025-09-03T17:37:52.965106Z", + "created_at": "2025-09-30T17:38:28.757983551Z", "done": true, "done_reason": "stop", - "total_duration": 376594792, - "load_duration": 158273792, + "total_duration": 2983247976, + "load_duration": 54874758, "prompt_eval_count": 217, - "prompt_eval_duration": 177001375, + "prompt_eval_duration": 2733668666, "eval_count": 5, - "eval_duration": 40927500, + "eval_duration": 194120880, "response": "unsafe\nS1", "thinking": null, "context": null diff --git a/tests/integration/recordings/responses/97e259c0d3e5.json b/tests/integration/recordings/responses/97e259c0d3e5.json index 2e47bca80..7238eeaef 100644 --- a/tests/integration/recordings/responses/97e259c0d3e5.json +++ b/tests/integration/recordings/responses/97e259c0d3e5.json @@ -22,7 +22,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:53.505006Z", + "created_at": "2025-10-01T01:34:45.948323264Z", "done": false, "done_reason": null, "total_duration": null, @@ -40,7 +40,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:53.547032Z", + "created_at": "2025-10-01T01:34:46.150643413Z", "done": false, "done_reason": null, "total_duration": null, @@ -58,7 +58,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:53.588985Z", + "created_at": "2025-10-01T01:34:46.345718638Z", "done": false, "done_reason": null, "total_duration": null, @@ -76,7 +76,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:53.631139Z", + "created_at": "2025-10-01T01:34:46.536839034Z", "done": false, "done_reason": null, "total_duration": null, @@ -94,7 +94,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:53.67269Z", + "created_at": "2025-10-01T01:34:46.730927915Z", "done": false, "done_reason": null, "total_duration": null, @@ -112,7 +112,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:53.714798Z", + "created_at": "2025-10-01T01:34:46.923249037Z", "done": false, "done_reason": null, "total_duration": null, @@ -130,7 +130,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:53.756492Z", + "created_at": "2025-10-01T01:34:47.118794722Z", "done": false, "done_reason": null, "total_duration": null, @@ -148,7 +148,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:53.798115Z", + "created_at": "2025-10-01T01:34:47.311093083Z", "done": false, "done_reason": null, "total_duration": null, @@ -166,7 +166,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:53.840012Z", + "created_at": "2025-10-01T01:34:47.500911354Z", "done": false, "done_reason": null, "total_duration": null, @@ -184,7 +184,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:53.882555Z", + "created_at": "2025-10-01T01:34:47.691237236Z", "done": false, "done_reason": null, "total_duration": null, @@ -202,7 +202,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:53.924566Z", + "created_at": "2025-10-01T01:34:47.88193831Z", "done": false, "done_reason": null, "total_duration": null, @@ -220,7 +220,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:53.966279Z", + "created_at": "2025-10-01T01:34:48.072350123Z", "done": false, "done_reason": null, "total_duration": null, @@ -238,7 +238,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:54.008483Z", + "created_at": "2025-10-01T01:34:48.264819734Z", "done": false, "done_reason": null, "total_duration": null, @@ -256,7 +256,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:54.050042Z", + "created_at": "2025-10-01T01:34:48.46196594Z", "done": false, "done_reason": null, "total_duration": null, @@ -274,7 +274,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:54.092416Z", + "created_at": "2025-10-01T01:34:48.664135581Z", "done": false, "done_reason": null, "total_duration": null, @@ -292,7 +292,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:54.134857Z", + "created_at": "2025-10-01T01:34:48.860761943Z", "done": false, "done_reason": null, "total_duration": null, @@ -310,7 +310,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:54.176408Z", + "created_at": "2025-10-01T01:34:49.058887372Z", "done": false, "done_reason": null, "total_duration": null, @@ -328,7 +328,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:54.217553Z", + "created_at": "2025-10-01T01:34:49.255951122Z", "done": false, "done_reason": null, "total_duration": null, @@ -346,15 +346,15 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:54.259141Z", + "created_at": "2025-10-01T01:34:49.448811175Z", "done": true, "done_reason": "stop", - "total_duration": 1008303875, - "load_duration": 119709875, + "total_duration": 7098227825, + "load_duration": 42591593, "prompt_eval_count": 384, - "prompt_eval_duration": 132645959, + "prompt_eval_duration": 3553000114, "eval_count": 19, - "eval_duration": 755215708, + "eval_duration": 3502025035, "response": "", "thinking": null, "context": null diff --git a/tests/integration/recordings/responses/9b812cbcb88d.json b/tests/integration/recordings/responses/9b812cbcb88d.json deleted file mode 100644 index cedfd1c42..000000000 --- a/tests/integration/recordings/responses/9b812cbcb88d.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "request": { - "method": "POST", - "url": "http://localhost:11434/api/generate", - "headers": {}, - "body": { - "model": "llama3.2:3b-instruct-fp16", - "raw": true, - "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant. You have access to functions, but you should only use them if they are required.\nYou are an expert in composing functions. You are given a question and a set of possible functions.\nBased on the question, you may or may not need to make one function/tool call to achieve the purpose.\n\nIf you decide to invoke any of the function(s), you MUST put it in the format of [func_name1(params_name1=params_value1, params_name2=params_value2...), func_name2(params)]\nIf you decide to invoke a function, you SHOULD NOT include any other text in the response. besides the function call in the above format.\nFor a boolean parameter, be sure to use `True` or `False` (capitalized) for the value.\n\n\nHere is a list of functions in JSON format that you can invoke.\n\n[\n {\n \"name\": \"get_weather\",\n \"description\": \"Get the current weather\",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"location\"],\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state (both required), e.g. San Francisco, CA.\"\n }\n }\n }\n }\n]\n\nYou can answer general questions or invoke tools when necessary.\nIn addition to tool calls, you should also augment your responses by using the tool outputs.\nPretend you are a weather assistant.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhat's the weather like in San Francisco?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", - "options": { - "temperature": 0.0 - }, - "stream": false - }, - "endpoint": "/api/generate", - "model": "llama3.2:3b-instruct-fp16" - }, - "response": { - "body": { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:51.035807Z", - "done": true, - "done_reason": "stop", - "total_duration": 1044135792, - "load_duration": 50873709, - "prompt_eval_count": 324, - "prompt_eval_duration": 511000000, - "eval_count": 11, - "eval_duration": 481000000, - "response": "[get_weather(location=\"San Francisco, CA\")]", - "thinking": null, - "context": null - } - }, - "is_streaming": false - } -} diff --git a/tests/integration/recordings/responses/9c140a29ae09.json b/tests/integration/recordings/responses/9c140a29ae09.json index a436484d7..99b1e4cf8 100644 --- a/tests/integration/recordings/responses/9c140a29ae09.json +++ b/tests/integration/recordings/responses/9c140a29ae09.json @@ -22,7 +22,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:55.13567Z", + "created_at": "2025-10-01T01:34:59.108944421Z", "done": false, "done_reason": null, "total_duration": null, @@ -40,7 +40,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:55.17774Z", + "created_at": "2025-10-01T01:34:59.303969394Z", "done": false, "done_reason": null, "total_duration": null, @@ -58,7 +58,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:55.220061Z", + "created_at": "2025-10-01T01:34:59.496380344Z", "done": false, "done_reason": null, "total_duration": null, @@ -76,7 +76,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:55.261406Z", + "created_at": "2025-10-01T01:34:59.690402813Z", "done": false, "done_reason": null, "total_duration": null, @@ -94,7 +94,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:55.302615Z", + "created_at": "2025-10-01T01:34:59.886883901Z", "done": false, "done_reason": null, "total_duration": null, @@ -112,7 +112,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:55.343879Z", + "created_at": "2025-10-01T01:35:00.092344957Z", "done": false, "done_reason": null, "total_duration": null, @@ -130,7 +130,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:55.384951Z", + "created_at": "2025-10-01T01:35:00.294533906Z", "done": false, "done_reason": null, "total_duration": null, @@ -148,7 +148,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:55.426563Z", + "created_at": "2025-10-01T01:35:00.491944714Z", "done": false, "done_reason": null, "total_duration": null, @@ -166,7 +166,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:55.467648Z", + "created_at": "2025-10-01T01:35:00.687125699Z", "done": false, "done_reason": null, "total_duration": null, @@ -184,7 +184,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:55.509469Z", + "created_at": "2025-10-01T01:35:00.883643235Z", "done": false, "done_reason": null, "total_duration": null, @@ -202,7 +202,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:55.552302Z", + "created_at": "2025-10-01T01:35:01.078457636Z", "done": false, "done_reason": null, "total_duration": null, @@ -220,7 +220,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:55.596236Z", + "created_at": "2025-10-01T01:35:01.278324163Z", "done": false, "done_reason": null, "total_duration": null, @@ -238,15 +238,15 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:37:55.637816Z", + "created_at": "2025-10-01T01:35:01.476682242Z", "done": true, "done_reason": "stop", - "total_duration": 726849208, - "load_duration": 147625750, + "total_duration": 4443849560, + "load_duration": 44492422, "prompt_eval_count": 415, - "prompt_eval_duration": 75722709, + "prompt_eval_duration": 2029440575, "eval_count": 13, - "eval_duration": 502787333, + "eval_duration": 2369292378, "response": "", "thinking": null, "context": null diff --git a/tests/integration/recordings/responses/9c28ec9ac338.json b/tests/integration/recordings/responses/9c28ec9ac338.json deleted file mode 100644 index 45bfebee5..000000000 --- a/tests/integration/recordings/responses/9c28ec9ac338.json +++ /dev/null @@ -1,347 +0,0 @@ -{ - "request": { - "method": "POST", - "url": "http://localhost:11434/api/generate", - "headers": {}, - "body": { - "model": "llama3.2:3b-instruct-fp16", - "raw": true, - "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant. You have access to functions, but you should only use them if they are required.\nYou are an expert in composing functions. You are given a question and a set of possible functions.\nBased on the question, you may or may not need to make one function/tool call to achieve the purpose.\n\nIf you decide to invoke any of the function(s), you MUST put it in the format of [func_name1(params_name1=params_value1, params_name2=params_value2...), func_name2(params)]\nIf you decide to invoke a function, you SHOULD NOT include any other text in the response. besides the function call in the above format.\nFor a boolean parameter, be sure to use `True` or `False` (capitalized) for the value.\n\n\nHere is a list of functions in JSON format that you can invoke.\n\n[\n {\n \"name\": \"greet_everyone\",\n \"description\": \"\",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"url\"],\n \"properties\": {\n \"url\": {\n \"type\": \"string\",\n \"description\": \"\"\n }\n }\n }\n },\n {\n \"name\": \"get_boiling_point\",\n \"description\": \"\n Returns the boiling point of a liquid in Celsius or Fahrenheit.\n\n :param liquid_name: The name of the liquid\n :param celsius: Whether to return the boiling point in Celsius\n :return: The boiling point of the liquid in Celcius or Fahrenheit\n \",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"liquid_name\", \"celsius\"],\n \"properties\": {\n \"liquid_name\": {\n \"type\": \"string\",\n \"description\": \"\"\n },\n \"celsius\": {\n \"type\": \"boolean\",\n \"description\": \"\"\n }\n }\n }\n }\n]\n\nYou can answer general questions or invoke tools when necessary.\nIn addition to tool calls, you should also augment your responses by using the tool outputs.\nYou are a helpful assistant.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nSay hi to the world. Use tools to do so.<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n[greet_everyone(url=\"world\")]<|eot_id|><|start_header_id|>ipython<|end_header_id|>\n\nHello, world!<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\nHow can I assist you further?<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhat is the boiling point of polyjuice? Use tools to answer.<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", - "options": { - "temperature": 0.0 - }, - "stream": true - }, - "endpoint": "/api/generate", - "model": "llama3.2:3b-instruct-fp16" - }, - "response": { - "body": [ - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:34:23.434819Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "[", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:34:23.477986Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "get", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:34:23.520282Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "_bo", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:34:23.561947Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "iling", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:34:23.603986Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "_point", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:34:23.646447Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "(", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:34:23.688452Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "liquid", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:34:23.730147Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "_name", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:34:23.772004Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "='", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:34:23.813913Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "poly", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:34:23.856Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "ju", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:34:23.897939Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "ice", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:34:23.939953Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "',", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:34:23.982033Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " c", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:34:24.026067Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "elsius", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:34:24.069083Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "=True", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:34:24.112349Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ")]", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:34:24.155424Z", - "done": true, - "done_reason": "stop", - "total_duration": 896931125, - "load_duration": 89697291, - "prompt_eval_count": 511, - "prompt_eval_duration": 83876750, - "eval_count": 18, - "eval_duration": 722156292, - "response": "", - "thinking": null, - "context": null - } - } - ], - "is_streaming": true - } -} diff --git a/tests/integration/recordings/responses/9d84bd0e850f.json b/tests/integration/recordings/responses/9d84bd0e850f.json new file mode 100644 index 000000000..57fd3b6b4 --- /dev/null +++ b/tests/integration/recordings/responses/9d84bd0e850f.json @@ -0,0 +1,806 @@ +{ + "request": { + "method": "POST", + "url": "http://0.0.0.0:11434/v1/v1/embeddings", + "headers": {}, + "body": { + "model": "nomic-embed-text:137m-v1.5-fp16", + "input": [ + "What is the secret string?" + ], + "encoding_format": "float" + }, + "endpoint": "/v1/embeddings", + "model": "nomic-embed-text:137m-v1.5-fp16" + }, + "response": { + "body": { + "__type__": "openai.types.create_embedding_response.CreateEmbeddingResponse", + "__data__": { + "data": [ + { + "embedding": [ + -0.0032982507, + 0.024048105, + -0.12853289, + -0.09328222, + 0.04537147, + -0.013081095, + -0.022548871, + -0.012610871, + -0.03398259, + -0.03565345, + -0.12065609, + 0.05795731, + 0.030304907, + -0.050054844, + 0.044562623, + -0.007028393, + 0.029729357, + -0.06559633, + -0.003016649, + -0.059145726, + -0.0025048342, + -0.026853323, + -0.03845482, + 0.04652661, + 0.11377396, + 0.049402785, + 0.024986612, + -0.03374037, + 0.0072453716, + -0.031222388, + 0.028143488, + -0.02944117, + 0.015612549, + 0.011335137, + -0.03345625, + -0.052290704, + 0.020818414, + -0.0072931233, + -0.049004156, + 0.051721945, + -0.0289778, + 0.055966485, + -0.008853474, + -0.0033013513, + 0.042488985, + -0.02503629, + -0.023478491, + 6.361688e-05, + 0.029803744, + -0.0853184, + 0.058609914, + -0.024255395, + 0.053932793, + -0.019457405, + 0.051705584, + 0.01818444, + 0.0011400589, + -0.030472878, + 0.030476563, + 0.04045823, + 0.06775606, + 0.028657041, + -0.026482275, + 0.034275167, + 0.057681337, + -0.029520353, + -0.02563013, + 0.04497156, + 0.011341844, + -0.01990484, + 0.062490467, + 0.0149883, + 0.012965385, + -0.03740664, + -0.066844806, + -0.0049723284, + 0.013713347, + -0.017963262, + -0.018934384, + 0.027482966, + 0.040457863, + -0.013168924, + -0.0035037915, + 0.008605596, + -0.0050318716, + -0.035094846, + -0.023209162, + 0.012752807, + -0.0040029115, + 0.054372996, + -0.0016313397, + 0.010949289, + 0.037629694, + 0.03467603, + -0.01404976, + 0.016396504, + 0.009641418, + 0.037466723, + -0.049439345, + -0.03486651, + 0.00909679, + -0.032654777, + 0.028879896, + 0.010429663, + 0.0076558427, + 0.029257128, + -0.012736472, + -0.008938538, + -0.039327268, + 0.00024551645, + -0.0125722345, + 0.05394095, + -0.041321404, + -0.03592415, + 0.024531987, + -0.029710697, + 0.020478822, + -0.04660627, + -0.0313377, + -0.018237257, + -0.05293816, + -0.01908866, + 0.014138931, + 0.044201765, + -0.016025335, + 0.04669023, + -0.017082678, + 0.03196799, + 0.015393837, + -0.07515081, + -0.032932557, + 0.004582849, + -0.039644938, + 0.014318785, + 0.027004478, + 0.041546088, + -0.020133901, + 0.007899893, + 0.041371964, + 0.012456413, + 0.004301203, + 0.023503434, + -0.031698585, + -0.036926363, + 0.033228748, + -0.079850696, + 0.013027165, + -0.0041246368, + -0.061089512, + -0.03559738, + 0.01957783, + 0.006304584, + 0.022936152, + -0.00869367, + -0.016258465, + -0.03193504, + 0.07083036, + 1.3158466e-05, + -0.000789161, + 0.059398863, + 0.024287345, + 0.032700937, + 0.00014210193, + 0.03839921, + -0.068401694, + -0.042496935, + 0.033600904, + 0.07475036, + 0.030072743, + 0.042306513, + -0.04167343, + 0.014361867, + 0.003916772, + 0.012658739, + -0.0208498, + -0.006698081, + 0.0020109043, + -0.038274035, + 0.012730541, + -0.028303085, + 0.002623988, + -0.03940956, + 0.04325401, + 0.022744924, + -0.04673316, + -0.012081508, + -0.0012117454, + -0.05294897, + -0.012454307, + -0.05645314, + -0.042802032, + -0.018745977, + -0.078520805, + -0.006411952, + 0.0028680202, + -0.015461434, + -0.023440903, + 0.0034964534, + 0.021797534, + 0.0086095035, + -0.06603934, + 0.026726916, + -0.0175542, + -0.017027961, + 0.010762627, + 0.01514871, + 0.039492007, + -0.007983469, + 0.03619062, + 0.0168234, + 0.07535989, + -0.025904786, + -0.017366076, + -0.01347189, + 0.0018522989, + -0.022092728, + 0.012061661, + 0.012215762, + -0.021970322, + 0.016265877, + 0.059915975, + -0.009835821, + 0.042733837, + -0.018232534, + -0.039544348, + 0.048661057, + -0.04855545, + -0.0098408945, + -0.058503207, + 0.0077513047, + -0.0077372594, + -0.117901914, + 0.028783537, + 0.06965414, + -0.019801978, + -0.010675623, + 0.0051592723, + 0.027830902, + 0.0086547155, + 0.02346684, + 0.010180381, + 0.010100905, + 0.012445904, + 0.02678591, + -0.019694107, + 0.06288537, + -0.031153811, + -0.025075698, + 0.023629734, + 0.043685034, + -0.020924108, + 0.012402358, + -0.018577745, + 0.021082113, + 0.028547145, + -0.037001748, + -0.011313099, + -0.01756746, + 0.00010444474, + -0.055237714, + 0.0032047168, + -0.01408867, + 0.043286763, + -0.0110951485, + 0.0040360685, + -0.01238232, + 0.008533453, + 0.004865151, + 0.019677898, + -0.013659801, + -0.013150981, + 0.04567707, + -0.023701515, + -0.02194, + -0.02315702, + 0.008358462, + 0.020533461, + -0.019584313, + 0.0068455758, + 0.011320068, + -0.05442082, + 0.020411376, + -0.037794303, + 0.013764559, + -0.04595593, + 0.022671962, + 0.0015506811, + -0.04903287, + -0.0034638422, + 0.010126593, + 0.0398443, + 0.014924688, + -0.00285095, + 0.026505185, + 0.033000916, + 0.027125781, + 0.03644317, + 0.016125385, + 0.013681576, + -0.039973572, + 0.008721206, + 0.0072165024, + -0.00014323213, + 0.027076578, + -0.03140859, + -0.02935517, + 0.019970547, + -0.006123944, + 0.0261947, + 0.004149205, + -0.04233941, + 0.01762215, + 0.060215384, + 0.04274169, + -0.041242544, + 0.07079954, + -0.02192986, + 0.0066491943, + 0.061972313, + -0.00027346352, + -0.028163994, + -0.051354542, + 0.011054066, + -0.068790704, + -0.02264598, + 0.006427555, + -0.010099159, + 0.03748625, + -0.054964446, + -0.047367398, + 0.01665378, + 0.026939042, + -0.052629273, + -0.013164712, + -0.0185081, + 0.049786516, + -0.023693098, + -0.014896749, + -0.043053966, + -0.011251035, + 0.02001209, + -0.005552487, + 0.024903947, + -0.035587218, + 0.029973872, + 0.01619007, + -0.028468877, + -0.04486142, + 0.07410715, + 0.04597798, + -0.058169637, + 0.028120043, + -0.040351056, + 0.034274198, + 0.0005454698, + 0.033752613, + 0.028961617, + 0.00026255855, + 0.049489483, + 0.009841828, + 0.043682307, + -0.04498248, + 0.016212659, + -0.037912693, + 0.037102655, + 0.0024109408, + 0.015737364, + -0.022307407, + -0.0025394107, + 0.037405036, + -0.054835204, + 0.0320709, + 0.0067557557, + -0.0075890548, + -0.01591746, + -0.011909059, + -0.11405957, + -0.035998806, + -0.019466246, + 0.039460458, + 0.027758196, + -0.05538542, + -0.0080383, + -0.0036382494, + 0.020207345, + -0.009298509, + -0.036259625, + -0.011394148, + 0.050165977, + 0.0017537237, + -0.025921056, + -0.030647554, + -0.058813423, + -0.006920564, + -0.004205008, + -0.013795641, + 0.011260714, + 0.035107456, + 0.004822095, + -0.040850554, + -0.048511803, + -0.035496302, + 0.0063335723, + -0.013322335, + -0.023558998, + 0.07930992, + -0.012620598, + -0.034293715, + 0.08328258, + -0.019366555, + 0.03698619, + 0.047513835, + 0.008357678, + -0.066831276, + -0.02082262, + -0.0015991073, + 0.003765559, + -0.029072076, + -0.03816226, + -0.011767357, + 0.07332908, + 0.04895749, + 0.006689078, + 0.00029748515, + -0.026718164, + 0.00036674147, + -0.0017685532, + 0.034337346, + -0.03850612, + -0.08448081, + 0.023124069, + 0.031469442, + 0.05461369, + 0.0150575545, + -0.011481356, + 0.021065626, + -0.015059441, + -0.03412943, + -0.03363207, + 0.07253375, + 0.020403067, + 0.021076659, + 0.013130626, + 0.02942604, + 0.025791297, + 0.07377326, + 0.05306959, + 0.0010705212, + -0.05967892, + 0.07230877, + -0.04268709, + -0.043011066, + 0.0023348934, + 0.017243292, + 0.083405286, + -0.017652802, + -0.022455063, + 0.006875074, + 0.05107323, + -0.004959619, + -0.009972133, + -0.0076400945, + -0.027601436, + 0.023383798, + 0.03201444, + -0.014467706, + 0.0222043, + -0.029323487, + 0.09220868, + 0.11730722, + -0.019923192, + 0.025141044, + 0.04414654, + -0.023898387, + 0.024932057, + -0.0022838234, + -0.02317694, + 0.046928406, + -0.015200478, + 0.043392334, + -0.009497074, + 0.050595526, + -0.052608166, + -0.06341073, + 0.01764765, + 0.050764337, + 0.009962085, + -0.014817001, + -0.043528218, + 0.011283477, + 0.03162563, + 0.006628474, + 0.04251924, + -0.009266219, + 0.000588541, + -0.07837013, + -0.0035156938, + -0.028765965, + -0.00510325, + -0.0124228755, + 0.029888988, + 0.019898314, + -0.010900937, + 0.040689927, + 0.024022892, + -0.0040173554, + 0.03332095, + -0.04180631, + -0.080019884, + -0.028443588, + -0.047766674, + 0.0033815126, + -0.024960354, + -0.024660213, + 0.070443876, + -0.0024894238, + 0.09180418, + 0.018026538, + 0.036161616, + 0.00799906, + -0.006396599, + 0.039654985, + 0.008694138, + -0.008564176, + -0.07807781, + 0.033734564, + -0.0013041289, + -0.011019946, + 0.013449641, + -0.040933467, + -0.02253431, + 0.005898656, + -5.7860056e-05, + -0.027337592, + 0.030869937, + -0.038230628, + -0.027078092, + 0.0368399, + -0.03543492, + 0.039026134, + 0.0112541355, + 0.016505718, + -0.009606484, + 0.0004166137, + 0.019906865, + -0.017261252, + -0.029536013, + -0.002165905, + -0.0012417852, + -0.024301674, + 0.030746931, + -0.020348042, + -0.038710874, + 0.00048686584, + -0.016712623, + -0.045763664, + -0.0036347655, + -0.003329149, + 0.0019252732, + 0.019242223, + 0.033618063, + 0.002100299, + 0.009325876, + 0.0025050559, + -0.0024080786, + -0.015726727, + 0.008574558, + -0.02200334, + 0.04011618, + 0.04645626, + -0.039199144, + 0.012834688, + -0.04762284, + 0.030188235, + -0.020982744, + -0.00890629, + -0.02327833, + -0.058146186, + -0.050042126, + -0.042070866, + 0.009775578, + -0.042891078, + 0.02366119, + -0.021638528, + -0.008520272, + 0.043798972, + -0.028892903, + -0.07899356, + 0.0025773922, + -0.03532012, + -0.05134102, + 0.02882059, + 0.011530511, + 0.054503333, + -0.015186478, + 0.0053656455, + -0.040727176, + -0.010181232, + 0.014485777, + 0.010053276, + 0.03588428, + 0.050228212, + 0.040914807, + -0.021811074, + -0.009043635, + 0.04546432, + 0.05599287, + 0.05093548, + 0.00575169, + -0.009603692, + 0.08623272, + -0.005562126, + -0.035713222, + -0.0037661153, + 0.0482513, + -0.025935618, + 0.022839705, + 0.029907469, + -0.051781233, + -0.060429472, + 0.043899428, + -0.04184034, + -0.0081241, + -0.026821263, + 0.08344081, + -0.026048664, + -0.045267113, + -0.027881708, + -0.012180103, + 0.045505904, + -0.07117413, + 0.05662321, + -0.026671642, + -0.024000023, + -0.031813554, + 0.05153235, + -0.028020483, + 0.07026464, + -0.025191095, + 0.07143681, + 0.051605754, + -0.009703007, + -0.029227225, + -0.00065767125, + -0.0075300005, + 0.07697022, + 0.041171554, + 0.022690801, + 0.023518566, + -0.0118862875, + -0.0019155933, + 0.047873914, + -0.027927285, + 0.02106777, + 0.07642541, + -0.065543994, + 0.01864564, + -0.067919835, + -0.050306533, + -0.052590683, + 0.011256092, + -0.000894737, + -0.005858903, + -0.04342036, + 0.04395577, + -0.009446447, + 0.052444723, + -0.030406285, + -0.02533691, + 0.011770685, + 0.026355814, + 0.0064105205, + 0.07591828, + -0.01750948, + 0.060417976, + 0.0132931825, + 0.040372994, + 0.0331364, + -0.068492234, + -0.043099575, + 0.00020726812, + 0.015288213, + -0.0217876, + -0.008847198, + 0.008991637, + -0.022200268, + -0.026020769, + -0.060431115, + -0.036312483, + -0.06356333, + -0.019940577, + -0.06611774, + -0.016805809, + -0.046658624, + 0.056505382, + 0.036633372, + -0.06401027, + 0.025166163, + -0.046789452, + 0.07699744, + -0.007920236, + 0.047786005, + 0.023061091, + 0.039938573, + -0.040108122, + -0.015772898, + 0.00716303, + -0.009237628, + -0.034444094, + 0.028462611, + -0.01609163, + 0.015767207, + -0.018959865, + 0.045077763, + -0.021746196, + 0.049683467, + 0.018513858, + -0.036215466, + -0.018966345, + -0.028596113, + 0.040023156, + 0.008453986, + -0.020839535, + 0.0090973275, + -0.013051281, + -0.03853055, + 0.048016917, + -0.00038126565, + 0.050981052, + -0.012403114, + 0.009137451, + -0.009048387, + 0.021072997, + -0.018361593, + 0.029914865, + 0.03225918, + -0.023554014, + 0.008001624, + -0.023180075, + 0.011162308, + 0.041094445, + 0.0005753008, + -0.0039947922, + 0.003565787, + -0.0031719306, + -0.009397488, + -0.060294356, + 0.046168815, + -0.011650087, + -0.0081371255, + 0.030847827, + -0.05003843, + -0.051973872, + 0.073908724, + 0.05296223, + 0.0010943229, + 0.031026546, + 0.03573846, + 0.08544318, + 0.010603667, + 0.021817919, + -0.025213707, + -0.018352825, + 0.046616767, + -0.024417114, + -0.059228994, + 0.014890397, + -0.0010511203 + ], + "index": 0, + "object": "embedding" + } + ], + "model": "nomic-embed-text:137m-v1.5-fp16", + "object": "list", + "usage": { + "prompt_tokens": 6, + "total_tokens": 6 + } + } + }, + "is_streaming": false + } +} diff --git a/tests/integration/recordings/responses/9fadf5a3d68f.json b/tests/integration/recordings/responses/9fadf5a3d68f.json index aba45bcd3..2ba404b70 100644 --- a/tests/integration/recordings/responses/9fadf5a3d68f.json +++ b/tests/integration/recordings/responses/9fadf5a3d68f.json @@ -20,15 +20,15 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama-guard3:1b", - "created_at": "2025-09-03T17:38:03.270261Z", + "created_at": "2025-09-30T17:40:05.569054257Z", "done": true, "done_reason": "stop", - "total_duration": 244051875, - "load_duration": 111239500, + "total_duration": 2957218530, + "load_duration": 54048822, "prompt_eval_count": 224, - "prompt_eval_duration": 120962791, + "prompt_eval_duration": 2853937923, "eval_count": 2, - "eval_duration": 11306292, + "eval_duration": 48703790, "response": "safe", "thinking": null, "context": null diff --git a/tests/integration/recordings/responses/a4ef4fd267a0.json b/tests/integration/recordings/responses/a4ef4fd267a0.json new file mode 100644 index 000000000..02e1fd72c --- /dev/null +++ b/tests/integration/recordings/responses/a4ef4fd267a0.json @@ -0,0 +1,806 @@ +{ + "request": { + "method": "POST", + "url": "http://0.0.0.0:11434/v1/v1/embeddings", + "headers": {}, + "body": { + "model": "nomic-embed-text:137m-v1.5-fp16", + "input": [ + "This is a test file 1" + ], + "encoding_format": "float" + }, + "endpoint": "/v1/embeddings", + "model": "nomic-embed-text:137m-v1.5-fp16" + }, + "response": { + "body": { + "__type__": "openai.types.create_embedding_response.CreateEmbeddingResponse", + "__data__": { + "data": [ + { + "embedding": [ + 0.026792325, + 0.03093699, + -0.15664786, + -0.031769898, + 0.048670463, + -0.0033944864, + 0.04933814, + 0.012026393, + -0.063936, + -0.042519215, + 0.0006952768, + 0.045919683, + -0.008758177, + 0.01672516, + -0.06760369, + -0.04147062, + 0.062523685, + -0.064990245, + -0.006743896, + -0.05164598, + 0.0026207995, + -0.026605248, + -0.08703309, + -0.020834887, + 0.1326039, + 0.022190811, + -0.06336449, + 0.041573867, + -0.09539482, + -0.016348843, + 0.040155534, + -0.03646593, + 0.017186256, + -0.035168163, + -0.010381799, + -0.027018616, + 0.03469282, + 0.02928655, + 0.05159615, + 0.021040829, + -0.030119466, + -0.008437525, + 0.005015108, + -0.008472868, + 0.03012562, + 0.011633383, + 0.0030256396, + 0.044329047, + 0.009031695, + 0.0035846739, + 0.011534351, + 0.016298097, + -0.021354701, + 0.027153566, + 0.033898223, + -0.0024417024, + 0.0056214235, + 0.005837161, + 0.00562505, + -0.060362887, + 0.028006515, + 0.025593396, + -0.081357956, + 0.03580927, + -0.0067716073, + -0.046097863, + -0.028055403, + 0.0036626458, + -0.01241678, + 0.00208724, + 0.08872791, + -0.009103828, + 0.037730407, + -0.019509701, + 0.012843728, + -0.04402494, + 0.016731374, + -0.05801879, + -0.05453479, + -0.01068673, + 0.06356347, + 0.04127069, + 0.0067519997, + 0.03927803, + 0.09383723, + -0.028977362, + -0.0297527, + -0.014329299, + 0.006879821, + 0.03446831, + 0.016232423, + 0.032534376, + 0.02363687, + -0.011648355, + -0.01195166, + 0.003325076, + -0.007844654, + 0.041290022, + -0.004359298, + 0.0022596763, + 0.037966512, + 0.015887316, + 0.018222453, + -0.027174357, + 0.02473576, + 0.012280125, + -0.013674789, + 0.008666073, + -0.06826804, + -0.021038985, + 0.0016152107, + 0.02413647, + -0.018368484, + -0.025226548, + 0.013705246, + -0.018989984, + 0.0683322, + -0.025142781, + -0.027675495, + 0.0023693573, + -0.010056788, + -0.01769984, + 0.026491402, + 0.069633484, + 0.024076829, + 0.044652022, + -0.062568866, + 0.031585287, + 0.0054407343, + -0.038442608, + -0.011100477, + 0.018971642, + 0.01565612, + -0.03252838, + 0.0063219094, + 0.022529257, + 0.008277373, + 0.011207819, + -0.058460347, + -0.017124427, + -0.029950188, + -0.011155674, + 0.026960243, + 0.017531564, + 0.045436632, + -0.021886634, + 0.028391592, + 0.022554222, + -0.019893171, + 0.0041664722, + 0.053086217, + 0.0054540504, + 0.015131434, + 0.01327971, + 0.013327672, + -0.067845084, + 0.018720692, + -0.0025512152, + 0.023763299, + 0.05842385, + 0.00019893165, + -0.021977939, + -0.030850312, + 0.028413272, + -0.047995366, + -0.04297481, + -0.0011310787, + 0.08633486, + 0.07842147, + -0.0439257, + -0.023544447, + -0.057144523, + -0.02520807, + -0.015982438, + -0.05408948, + -0.031477932, + 0.008370782, + -0.02216448, + 0.02113249, + -0.022829711, + 0.036768507, + -0.010499057, + 0.0033416639, + 0.026612421, + -0.0040408946, + -0.037447333, + -0.002586024, + -0.02990973, + -0.062172376, + -0.0029027562, + -0.0032355392, + -0.01683112, + -0.08550601, + -0.06503881, + 0.019303314, + -0.048659757, + 0.009732844, + -0.03025688, + 0.028209025, + -0.006922874, + -0.0024255237, + -0.011451635, + -0.044170108, + 0.019439884, + -0.028493812, + -0.021424118, + -0.012596394, + -0.026894623, + -0.016631894, + 0.006937038, + 0.038847376, + -0.019490546, + -0.035997394, + 0.0343228, + 0.046157695, + -0.03467906, + -0.011670025, + -0.02360443, + -0.03209323, + -0.023816131, + 0.011261538, + 0.004140802, + 0.05378309, + -0.034095783, + 0.0032736673, + -0.023968946, + -0.057925865, + -0.038374748, + -0.023432449, + -0.031378884, + -0.018283365, + -0.044473544, + 0.023770774, + 0.012151021, + -0.00989798, + -0.016579827, + -0.03912221, + 0.061459407, + -0.02270193, + 0.046470493, + -0.03565845, + 0.038344137, + -0.00060047704, + -0.010866198, + -0.010595391, + 0.0040242574, + -0.011870223, + -0.030662687, + 0.053333513, + 0.016585337, + -0.034385324, + 0.019072872, + 0.02482893, + 0.060127478, + 0.022492146, + -0.02539478, + -0.007217331, + -0.026689157, + 0.0328626, + -0.045700822, + 0.015094248, + -0.048051264, + 0.033289358, + -0.015658941, + -0.047716986, + -0.009127074, + -0.029856639, + 0.031833287, + -0.041548215, + -0.036257725, + -0.031805903, + 0.017809667, + -0.006915335, + -0.019608539, + 0.021878801, + -0.03172998, + 0.007869648, + 0.025838438, + -0.00058663427, + 0.03564143, + -0.018670827, + 0.009602577, + -0.009344786, + 0.016194435, + 0.037599266, + 0.00694385, + 0.048156716, + -0.0063888165, + 0.02603451, + 0.029694544, + -0.001316076, + 0.04268831, + -0.0067985193, + 0.022871338, + 0.014592814, + 0.00715007, + 0.043508768, + -0.01459811, + 0.020012084, + 0.01285804, + -0.020089578, + 0.022833034, + 0.031225007, + 0.04425304, + 0.025835698, + -0.03154635, + 0.037163053, + -0.032706518, + 0.01870285, + 0.033385955, + -0.07165778, + 0.008837176, + -0.03407519, + 0.011077847, + -0.032700922, + 0.04877876, + 0.0436143, + 0.013553518, + 0.071895495, + -0.030767605, + -0.0058505647, + -0.079715356, + -0.035949104, + 0.0126587115, + 0.022821989, + 0.023578636, + 0.0064976574, + 0.050335396, + -0.027013855, + -0.05704946, + 0.06652898, + 0.075718984, + -0.06392454, + -0.03972515, + 0.033892315, + 0.029048424, + 0.034230053, + 0.048473887, + 0.004268155, + 0.050873943, + 0.017966365, + 0.031012183, + 0.035040673, + 0.0069641634, + 0.03588263, + -0.054883715, + -0.015174634, + 0.031095453, + -0.0034547914, + 0.07055899, + 0.006959644, + 0.0054922295, + 0.022231862, + 0.0027122695, + 0.009299621, + 0.022458393, + 0.04126543, + -0.021928346, + 0.039010584, + -0.0193515, + 0.03772616, + -0.01625833, + -0.016094128, + -0.009658867, + 0.018461023, + 0.011062551, + -0.034120347, + 0.016894026, + 0.073283896, + 0.022197865, + -0.017135348, + 0.0017097074, + 0.05956092, + 0.063407786, + 0.042028006, + 0.042882785, + -0.07191631, + -0.009047546, + 0.0035314842, + 0.040281277, + 0.0517425, + -0.027128628, + 0.027991537, + 0.03381131, + 0.005920727, + -0.011691999, + 0.0267714, + -0.010963327, + 0.056068476, + -0.0005457899, + -0.01650052, + 0.017984223, + -0.08018128, + 0.04320543, + 0.011011166, + 0.004089064, + 0.01760083, + -0.006808394, + -0.051000126, + -0.008992308, + -0.013578323, + -0.012156638, + -0.0067469757, + 0.0150457695, + -0.02010428, + -0.010990015, + -0.029041639, + -0.04632667, + 0.020392314, + 0.0072885626, + 0.027568653, + -0.024584606, + -0.018145312, + -0.060855325, + 0.0025272707, + 0.02513976, + 0.037904035, + 9.171318e-05, + 0.014477873, + -0.012227636, + 0.0050520534, + 0.045649383, + 0.013770142, + -0.020129545, + -0.036889248, + -0.007372258, + 0.056743897, + 0.068659395, + -0.016984485, + -0.09025703, + -0.020056212, + 0.013750284, + 0.028645078, + -0.007090899, + -0.026898425, + 0.074853, + 0.0004840898, + -0.009810746, + -0.033916537, + 0.027401606, + 0.041416552, + -0.05452964, + -0.04670048, + -0.01061277, + 0.015118332, + 0.11969722, + 0.08716515, + -0.043436825, + -0.045450028, + -0.011495474, + -0.0053251395, + 0.018191162, + -0.023512367, + 0.02439878, + 0.07168296, + -0.029718433, + 0.05978129, + -0.018310038, + 0.00019201823, + 0.0588457, + -0.004629452, + 0.011157221, + 0.07020875, + 0.029090729, + 0.011827569, + -0.016118564, + 0.030296495, + -0.04006995, + 0.005592458, + 0.059310023, + -0.0139375925, + -0.056882996, + -0.0043539144, + -0.04476427, + 0.008733033, + 0.0181087, + -0.033747524, + 0.023971833, + -0.04448808, + 0.01909963, + 0.03931093, + 0.004226108, + -0.05194325, + -0.039234832, + 0.022266004, + -0.0063400185, + 0.029090801, + 0.014526388, + 0.027634978, + 0.020610472, + 0.027755301, + 0.019532172, + 0.07653513, + 0.038188096, + 0.013058072, + -0.021564314, + -0.004024598, + -0.032580923, + -0.008680397, + -0.0010052286, + 0.019816427, + -0.0051071616, + -0.004137778, + -0.0146190785, + -0.017425163, + -0.018814942, + 0.009330389, + -0.034730554, + -0.09950049, + -0.011828971, + -0.048524242, + -0.015290795, + 0.003975381, + 0.034570675, + 0.086534545, + 0.0023209865, + 0.024228156, + 0.001791505, + -0.030159235, + 0.029798415, + 0.029238526, + 0.003280956, + 0.03067396, + -0.017041316, + -0.10483067, + 0.045287162, + -0.0044179363, + -0.029821943, + 0.085055605, + 0.06824925, + 0.016470019, + 0.012064929, + -0.012787015, + -0.0062754382, + -0.008308865, + -0.0017331241, + -0.05941388, + -0.0042225947, + 0.005673389, + 0.06117662, + -0.06577193, + -0.017765824, + 0.012709231, + -0.046415754, + 0.00533243, + -0.030084299, + -0.068151176, + 0.041388392, + -0.008748364, + -0.06503942, + 0.04298269, + -0.0395347, + -0.060710963, + -0.023440724, + 0.026063284, + -0.03867607, + 0.0051523917, + -0.04764507, + -0.02051396, + -0.03816295, + 0.01834131, + 0.003109336, + 0.00040601534, + -0.000574874, + 0.023330892, + -0.03975682, + -0.011863705, + -0.0008176911, + 0.0012484301, + 0.02382547, + 0.011094778, + -0.029535167, + 0.002527838, + -0.030506654, + -0.031074118, + 0.032151125, + 0.016547065, + 0.053861786, + -0.045584653, + -0.0364264, + 0.042833533, + -0.0032813142, + 0.010841442, + 0.029280445, + -0.0074102865, + 0.0031719606, + 0.0066031497, + -0.015888812, + 0.03645216, + -0.035819612, + -0.035440333, + -0.0300292, + 0.008848944, + 0.008425931, + -0.020204162, + 0.0029528947, + 0.005234882, + -0.025068615, + -0.017057832, + -0.041331146, + 0.00070108456, + 0.014641318, + -0.0060291695, + -0.04652187, + -0.029138539, + 0.0040340438, + 0.045350928, + 0.015156647, + -0.0013569613, + 0.0013388247, + 0.06328819, + 0.008267542, + -0.0843244, + 0.007819933, + -0.015028652, + -0.036059376, + 0.053294875, + -0.028327828, + 0.019679923, + -0.040117774, + 0.020920893, + -0.043621734, + 0.06002377, + -0.029151496, + -0.0045994134, + -0.009784679, + -0.03870092, + 0.010416321, + 0.059916586, + 0.07692586, + -0.06094488, + 0.030034011, + -0.054865606, + -0.053873308, + -0.062464256, + 0.005752507, + -0.046865426, + 0.018496031, + 0.050554793, + 0.07667609, + 0.04521703, + 0.021193774, + -0.010788837, + -0.049785435, + 0.009305702, + 0.036620248, + 0.007600405, + 0.05725011, + 0.030702267, + -0.0476178, + 0.068317704, + 0.06863345, + 0.035322998, + -0.02223456, + -0.003943451, + 0.00566325, + 0.043405402, + -0.049774975, + -0.059950616, + -0.060994945, + -0.00272665, + 0.02056273, + -0.05611676, + 0.008522081, + 0.008111256, + 0.022916265, + -0.0012039327, + -0.02415934, + 0.006603039, + -0.07728265, + 0.023383535, + 0.010126175, + 0.066026114, + 0.019516824, + -0.02743895, + 0.031764206, + 0.042299137, + 0.06816786, + 0.0013242968, + -0.037178222, + -0.06037109, + -0.038619135, + 0.058209002, + 0.032519363, + 0.040420506, + -0.081026524, + -0.007876469, + -0.058994833, + -0.021188803, + 0.0087137325, + -0.0060559064, + -0.018234588, + -0.016353764, + -0.041321892, + -0.009873551, + -0.0014623556, + 0.0708463, + 0.003149389, + -0.017390637, + 0.043613207, + 0.008190076, + 0.031949073, + 0.0059449924, + 0.04650619, + -0.03871478, + -0.02993407, + 0.006429338, + 0.00781245, + -0.0533047, + -0.04324872, + 0.030584995, + 0.027463216, + 0.00546872, + 0.07692511, + -0.028224103, + 0.008554065, + -0.014472004, + 0.011852825, + -0.0035424957, + 0.009787675, + 0.09010725, + 0.044465154, + -0.033444583, + 0.011267346, + -0.0009460784, + -0.042941727, + 0.0075897933, + -0.0339105, + 0.056183178, + -0.057945125, + -0.04466646, + -0.03827882, + -0.030259024, + 0.023189662, + -0.018669333, + 0.0075938306, + 0.0009940926, + -0.036094803, + 0.00955545, + 0.032975323, + 0.0029834385, + 0.05080568, + -0.017404221, + -0.016065422, + -0.048709493, + 0.0115149645, + -0.028778277, + 0.027973842, + -0.004772469, + -0.005541551, + 0.028508712, + -0.053011157, + 0.011259917, + 0.032425366, + -0.004184233, + -0.018505724, + -0.03317818, + -0.0035943638, + 0.082571395, + -0.06401087, + 0.002303715, + -0.032291833, + 0.028782103, + 0.00977568, + -0.012253565, + -0.050462194, + 0.008639128, + -0.053021718 + ], + "index": 0, + "object": "embedding" + } + ], + "model": "nomic-embed-text:137m-v1.5-fp16", + "object": "list", + "usage": { + "prompt_tokens": 6, + "total_tokens": 6 + } + } + }, + "is_streaming": false + } +} diff --git a/tests/integration/recordings/responses/a59d0d7c1485.json b/tests/integration/recordings/responses/a59d0d7c1485.json index c951596ce..3011a4ffa 100644 --- a/tests/integration/recordings/responses/a59d0d7c1485.json +++ b/tests/integration/recordings/responses/a59d0d7c1485.json @@ -20,15 +20,15 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama-guard3:1b", - "created_at": "2025-09-03T17:38:04.367295Z", + "created_at": "2025-09-30T17:40:13.28032796Z", "done": true, "done_reason": "stop", - "total_duration": 276503250, - "load_duration": 125852000, + "total_duration": 3178842015, + "load_duration": 44428132, "prompt_eval_count": 238, - "prompt_eval_duration": 138575125, + "prompt_eval_duration": 3081272287, "eval_count": 2, - "eval_duration": 11277208, + "eval_duration": 52562543, "response": "safe", "thinking": null, "context": null diff --git a/tests/integration/recordings/responses/a6810c23eda8.json b/tests/integration/recordings/responses/a6810c23eda8.json deleted file mode 100644 index d5b5c5a6d..000000000 --- a/tests/integration/recordings/responses/a6810c23eda8.json +++ /dev/null @@ -1,799 +0,0 @@ -{ - "request": { - "method": "POST", - "url": "http://localhost:11434/api/generate", - "headers": {}, - "body": { - "model": "llama3.2:3b-instruct-fp16", - "prompt": "<|begin_of_text|>Complete the sentence using one word: Roses are red, violets are ", - "raw": true, - "options": { - "temperature": 0.0, - "max_tokens": 50, - "num_predict": 50 - }, - "stream": true - }, - "endpoint": "/api/generate", - "model": "llama3.2:3b-instruct-fp16" - }, - "response": { - "body": [ - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:13.985194Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " ______", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:14.027686Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "_", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:14.068694Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ".\n\n", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:14.10959Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "The", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:14.150266Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " best", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:14.190959Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " answer", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:14.231689Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " is", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:14.272328Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " blue", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:14.312774Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ".", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:14.353348Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " The", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:14.393886Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " traditional", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:14.434753Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " nursery", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:14.474992Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " rhyme", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:14.515133Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " goes", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:14.555579Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " like", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:14.596355Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " this", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:14.637241Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ":\n\n", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:14.679196Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "R", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:14.719878Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "oses", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:14.759719Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " are", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:14.79997Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " red", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:14.84053Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ",\n", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:14.881964Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "V", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:14.921986Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "io", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:14.962551Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "lets", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:15.003226Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " are", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:15.043676Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " blue", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:15.083952Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ",\n", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:15.124797Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "Sugar", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:15.165202Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " is", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:15.205416Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " sweet", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:15.245854Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ",\n", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:15.286352Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "And", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:15.326952Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " so", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:15.367575Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " are", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:15.408069Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " you", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:15.448413Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "!", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:15.489223Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " (", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:15.530477Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "Or", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:15.571317Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " something", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:15.612263Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " similar", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:15.652533Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ".)", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:15.692748Z", - "done": true, - "done_reason": "stop", - "total_duration": 1808812333, - "load_duration": 57887042, - "prompt_eval_count": 18, - "prompt_eval_duration": 42042750, - "eval_count": 43, - "eval_duration": 1708293042, - "response": "", - "thinking": null, - "context": null - } - } - ], - "is_streaming": true - } -} diff --git a/tests/integration/recordings/responses/ae1c22f18ecc.json b/tests/integration/recordings/responses/ae1c22f18ecc.json deleted file mode 100644 index c9a47657b..000000000 --- a/tests/integration/recordings/responses/ae1c22f18ecc.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "request": { - "method": "POST", - "url": "http://localhost:11434/api/generate", - "headers": {}, - "body": { - "model": "llama3.2:3b-instruct-fp16", - "raw": true, - "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nTest trace 0<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", - "options": { - "temperature": 0.0 - }, - "stream": false - }, - "endpoint": "/api/generate", - "model": "llama3.2:3b-instruct-fp16" - }, - "response": { - "body": { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:41:47.144448Z", - "done": true, - "done_reason": "stop", - "total_duration": 2462760250, - "load_duration": 83668541, - "prompt_eval_count": 20, - "prompt_eval_duration": 74227125, - "eval_count": 58, - "eval_duration": 2304346166, - "response": "I'm happy to help you with your test, but I don't see what kind of test we are testing. Could you please provide more context or clarify what kind of test you would like me to perform? Is it a programming test, a language proficiency test, or something else?", - "thinking": null, - "context": null - } - }, - "is_streaming": false - } -} diff --git a/tests/integration/recordings/responses/ae6835cfe70e.json b/tests/integration/recordings/responses/ae6835cfe70e.json deleted file mode 100644 index 9766c6023..000000000 --- a/tests/integration/recordings/responses/ae6835cfe70e.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "request": { - "method": "POST", - "url": "http://localhost:11434/api/generate", - "headers": {}, - "body": { - "model": "llama3.2:3b-instruct-fp16", - "raw": true, - "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant. You have access to functions, but you should only use them if they are required.\nYou are an expert in composing functions. You are given a question and a set of possible functions.\nBased on the question, you may or may not need to make one function/tool call to achieve the purpose.\n\nIf you decide to invoke any of the function(s), you MUST put it in the format of [func_name1(params_name1=params_value1, params_name2=params_value2...), func_name2(params)]\nIf you decide to invoke a function, you SHOULD NOT include any other text in the response. besides the function call in the above format.\nFor a boolean parameter, be sure to use `True` or `False` (capitalized) for the value.\n\n\nHere is a list of functions in JSON format that you can invoke.\n\n[\n {\n \"name\": \"get_object_namespace_list\",\n \"description\": \"Get the list of objects in a namespace\",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"kind\", \"namespace\"],\n \"properties\": {\n \"kind\": {\n \"type\": \"string\",\n \"description\": \"the type of object\"\n },\n \"namespace\": {\n \"type\": \"string\",\n \"description\": \"the name of the namespace\"\n }\n }\n }\n }\n]\n\nYou can answer general questions or invoke tools when necessary.\nIn addition to tool calls, you should also augment your responses by using the tool outputs.\nYou are a helpful assistant.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhat pods are in the namespace openshift-lightspeed?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n[get_object_namespace_list(kind=\"pod\", namespace=\"openshift-lightspeed\")]<|eot_id|><|start_header_id|>ipython<|end_header_id|>\n\nthe objects are pod1, pod2, pod3<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", - "options": { - "temperature": 0.0 - }, - "stream": false - }, - "endpoint": "/api/generate", - "model": "llama3.2:3b-instruct-fp16" - }, - "response": { - "body": { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:18.871277Z", - "done": true, - "done_reason": "stop", - "total_duration": 644170416, - "load_duration": 69749500, - "prompt_eval_count": 386, - "prompt_eval_duration": 531218583, - "eval_count": 2, - "eval_duration": 42446084, - "response": "[]", - "thinking": null, - "context": null - } - }, - "is_streaming": false - } -} diff --git a/tests/integration/recordings/responses/b14ff438ca99.json b/tests/integration/recordings/responses/b14ff438ca99.json deleted file mode 100644 index 180ec3286..000000000 --- a/tests/integration/recordings/responses/b14ff438ca99.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "request": { - "method": "POST", - "url": "http://localhost:11434/api/generate", - "headers": {}, - "body": { - "model": "llama3.2:3b-instruct-fp16", - "raw": true, - "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhat is the currency of Japan?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", - "options": { - "temperature": 0.0 - }, - "stream": false - }, - "endpoint": "/api/generate", - "model": "llama3.2:3b-instruct-fp16" - }, - "response": { - "body": { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:39:59.708499Z", - "done": true, - "done_reason": "stop", - "total_duration": 5293681583, - "load_duration": 196095541, - "prompt_eval_count": 23, - "prompt_eval_duration": 72668042, - "eval_count": 124, - "eval_duration": 5024327166, - "response": "The official currency of Japan is the Japanese yen (\u00a5). It is abbreviated as \"JPY\" and its symbol is \u00a5. The yen is divided into 100 sen, although the sen has been officially discontinued since 1967.\n\nYou can exchange your money for yen at banks, currency exchange offices, or use ATMs to withdraw cash from an ATM. Credit cards are also widely accepted in Japan, especially among major retailers and restaurants.\n\nIt's worth noting that some businesses may not accept foreign currencies other than US dollars, so it's a good idea to have some local currency on hand when traveling to Japan.", - "thinking": null, - "context": null - } - }, - "is_streaming": false - } -} diff --git a/tests/integration/recordings/responses/b28f75bd87dc.json b/tests/integration/recordings/responses/b28f75bd87dc.json index 4a874e119..d37fbede8 100644 --- a/tests/integration/recordings/responses/b28f75bd87dc.json +++ b/tests/integration/recordings/responses/b28f75bd87dc.json @@ -21,7 +21,7 @@ "body": { "__type__": "openai.types.chat.chat_completion.ChatCompletion", "__data__": { - "id": "chatcmpl-316", + "id": "chatcmpl-489", "choices": [ { "finish_reason": "stop", @@ -38,7 +38,7 @@ } } ], - "created": 1759247858, + "created": 1759282539, "model": "llama-guard3:1b", "object": "chat.completion", "service_tier": null, diff --git a/tests/integration/recordings/responses/b37b79e8ef96.json b/tests/integration/recordings/responses/b37b79e8ef96.json new file mode 100644 index 000000000..62446b0a3 --- /dev/null +++ b/tests/integration/recordings/responses/b37b79e8ef96.json @@ -0,0 +1,806 @@ +{ + "request": { + "method": "POST", + "url": "http://0.0.0.0:11434/v1/v1/embeddings", + "headers": {}, + "body": { + "model": "nomic-embed-text:137m-v1.5-fp16", + "input": [ + "This is a test file 2" + ], + "encoding_format": "float" + }, + "endpoint": "/v1/embeddings", + "model": "nomic-embed-text:137m-v1.5-fp16" + }, + "response": { + "body": { + "__type__": "openai.types.create_embedding_response.CreateEmbeddingResponse", + "__data__": { + "data": [ + { + "embedding": [ + 0.051801182, + 0.0010255196, + -0.15081488, + -0.017234368, + 0.03322784, + -0.012282827, + 0.03583359, + -0.016244456, + -0.074344784, + -0.06549673, + -0.0063170893, + 0.06420392, + -0.00028500104, + -0.026120752, + -0.026853874, + -0.033764943, + 0.08796864, + -0.046479028, + -0.0025558919, + -0.038775135, + -0.0014058551, + -0.028691545, + -0.05656057, + -0.018200194, + 0.12270096, + 0.041239902, + -0.02222655, + 0.0531555, + -0.09066884, + -0.013796611, + 0.044840023, + -0.021647913, + 0.025695423, + -0.06534594, + -0.024780698, + -0.03968167, + 0.040749285, + 0.023914833, + 0.023482118, + 0.026546348, + -0.02443028, + -0.009490436, + -0.008743914, + -0.012776919, + 0.0009962226, + -0.015167954, + -0.0038977817, + 0.06930047, + -0.022295639, + -0.035409007, + 0.014115908, + 0.016303558, + -0.0033719216, + 0.03682686, + 0.037707012, + -0.022630926, + -0.017144458, + -0.0066924277, + 0.018952414, + -0.058043465, + 0.034397043, + 0.029942181, + -0.04684707, + 0.06177867, + -0.013171469, + -0.06911453, + -0.04349347, + 0.015371565, + -0.01577527, + 0.01773439, + 0.08167559, + -0.002524611, + 0.028078772, + -0.035727963, + 0.011468994, + -0.06786054, + 0.009889452, + -0.0483287, + -0.055014182, + 0.004846103, + 0.042441696, + 0.054850332, + -0.007020451, + 0.028316598, + 0.07431518, + -0.028391074, + -0.050833736, + 0.0032326267, + -0.0005422939, + 0.04113234, + 0.026234375, + 0.053396035, + 0.05735619, + -0.01717059, + -0.028027328, + 0.02691892, + 0.02503625, + 0.062557764, + -0.027271569, + 0.016149832, + 0.0077075553, + 0.012159427, + 0.034784008, + 0.015709192, + 0.038958523, + 0.025529727, + 0.0011087238, + 0.034139954, + -0.041153044, + 7.248747e-05, + -0.013538489, + 0.034983985, + -0.03167844, + 0.006001715, + 0.011474295, + -0.025602113, + 0.041790005, + -0.04383271, + -0.03146408, + 0.019360892, + 0.021181574, + -0.03244357, + 0.024868248, + 0.06547852, + 0.054668125, + 0.02574924, + -0.07522572, + 0.024262998, + 0.009693023, + -0.053664465, + -0.014158788, + 0.006301218, + 0.018056067, + -0.01387482, + 0.01243781, + 0.030744387, + -0.004012412, + -0.0046153706, + -0.06561852, + -0.03304356, + -0.04152046, + -0.019557185, + 0.043041006, + 0.03866911, + 0.02212306, + -0.01403974, + 0.047055535, + 0.023601428, + -0.017732145, + -0.0052129487, + 0.019759769, + -0.017544763, + 0.01409893, + 0.0053531453, + 0.02123914, + -0.049547847, + 0.0027636248, + -0.026355125, + 0.04712941, + 0.0746566, + 0.019260941, + -0.017720697, + -0.025329527, + 0.00083697174, + -0.045841433, + -0.004654644, + 0.005010162, + 0.08976771, + 0.06082453, + -0.009662354, + -0.02357495, + -0.036994833, + 0.0038613915, + 0.0023254908, + -0.036620934, + -0.0316217, + -0.011200648, + -0.022778248, + 0.038814247, + -0.008324994, + 0.020946918, + -0.01160711, + -0.016260482, + 0.040330227, + 0.008681942, + -0.04711567, + 0.020017864, + -0.022032628, + -0.05305055, + -0.009351179, + -0.003969348, + -0.012647862, + -0.0841881, + -0.043206286, + 0.00039024177, + -0.027873224, + 0.012539036, + -0.012754074, + 0.006142704, + 0.008921453, + 0.016352238, + -0.01603935, + -0.06305153, + 0.026299356, + -0.018348286, + 0.015741874, + -0.03974086, + -0.024933865, + -0.029023254, + 0.029480303, + 0.043486238, + 0.0028853887, + -0.018682105, + 0.041582398, + 0.042745523, + -0.024219744, + -0.009566694, + -0.024050634, + -0.045929004, + -0.021876726, + 0.01919578, + -0.0043107793, + 0.07144085, + -0.03927294, + 0.029072465, + -0.01242181, + -0.062420227, + -0.02075848, + -0.028836468, + -0.017349612, + 0.008473315, + -0.09169363, + 0.008261454, + 0.0041077463, + -0.024940021, + -0.019034503, + -0.07001702, + 0.07905886, + 0.006459122, + 0.044268638, + -0.018026544, + 0.075073324, + 0.01739723, + 0.0080714105, + -0.0036457728, + -0.0013631854, + -0.010579732, + -0.03356311, + 0.07031985, + 0.049019683, + -0.025012767, + 0.0099630235, + -0.008354231, + 0.06401362, + 0.013553804, + -0.0031617547, + -0.016193528, + -0.009090595, + 0.0038680998, + -0.055363577, + 0.010253973, + -0.055407625, + 0.03389838, + 0.0015454039, + -0.031546198, + -0.0005414776, + -0.026229724, + 0.038999796, + -0.031095231, + -0.019630652, + -0.008376925, + 0.015468112, + -0.03895287, + -0.0070748604, + 0.027532699, + -0.019491317, + 0.04108672, + 0.008161922, + -0.0031511406, + 0.044425853, + -0.017700933, + -0.007980653, + 0.023274345, + 0.046487853, + 0.03471879, + 0.010230327, + 0.0031828017, + 0.006672395, + 0.03605906, + 0.029133542, + 0.0014969306, + 0.035186376, + -0.0063899746, + 0.027218578, + 0.01962848, + 0.003278733, + 0.018850114, + -0.005309846, + -0.006228935, + -0.009798265, + 0.021495217, + 0.021155192, + 0.035909783, + 0.0064114174, + 0.025744593, + -0.06996477, + 0.023757571, + -0.032764025, + 0.046303503, + 0.022086516, + -0.061329205, + -0.0038959188, + -0.020772403, + 0.017466955, + -0.025499884, + 0.033631153, + 0.031748734, + 0.030760456, + 0.07449202, + -0.008631091, + -0.0040144706, + -0.06421018, + -0.014998029, + 0.023082051, + 0.020373309, + 0.014085337, + 0.0047233365, + 0.051186115, + -0.031064488, + -0.060783137, + 0.064631596, + 0.07970026, + -0.0859436, + -0.041633032, + 0.04576333, + 0.022761064, + 0.041172378, + 0.054816168, + -0.0010178451, + 0.054900486, + 0.06938893, + 0.011092356, + 0.023084221, + 0.008477787, + 0.012277583, + -0.061230436, + -0.041977488, + 0.014609203, + -0.009039083, + 0.047072906, + 0.0026217499, + 0.002346493, + 0.013807635, + 0.014897043, + 0.017218841, + 0.008167489, + 0.0051184036, + -0.05173226, + 0.02537619, + -0.026887905, + 0.024533851, + -0.026184078, + 4.337919e-06, + -0.019333858, + 0.02483946, + -0.010537213, + -0.01118194, + 0.0036367723, + 0.06956419, + 0.0012046917, + -0.010689593, + -0.0020579803, + 0.04023002, + 0.06398481, + 0.056065474, + 0.022608029, + -0.0626965, + -0.017795788, + -0.01942348, + 0.050164446, + 0.06857079, + -0.03798158, + 0.04222684, + 0.056028176, + 0.021425853, + -0.06262715, + 0.033327498, + -0.0063682394, + 0.05426928, + 0.0071679456, + -0.044264685, + 0.033509832, + -0.08663339, + -0.02044763, + -0.004278769, + -0.016582211, + 0.040397443, + 0.028066564, + -0.04313839, + 0.006021971, + -0.041008733, + -0.017053153, + 0.0012048176, + 0.011767791, + -0.03934562, + 0.021038145, + -0.043585647, + -0.039542057, + 0.039277136, + 0.0036594416, + 0.03957194, + -0.024657233, + -0.018028215, + -0.0684359, + 0.016607657, + -0.0045250803, + 0.027660444, + 0.026975967, + -0.020686872, + 0.0024752545, + 0.0024451965, + 0.04661728, + 0.016602026, + -0.031881746, + -0.035724096, + 0.0144901285, + 0.049197443, + 0.04488291, + -0.003303905, + -0.099433415, + 0.011097523, + 0.00320524, + 0.028129525, + 0.0075848796, + -0.02279956, + 0.04123358, + -0.022186093, + -0.01293531, + -0.034378804, + 0.04033256, + 0.030032586, + -0.07468312, + -0.041661263, + 0.0109480405, + 0.009071749, + 0.12433727, + 0.09973111, + -0.054878768, + -0.03317987, + 0.021019341, + -0.0116514135, + 0.011784185, + 0.037445106, + 0.020518389, + 0.07042429, + -0.02184055, + 0.03269863, + -0.015035146, + -0.028951302, + 0.016295578, + -0.0048200455, + -0.007875158, + 0.04198207, + 0.009505547, + 0.036958206, + -0.01866339, + -0.023273798, + -0.034359016, + 0.008387715, + 0.04231039, + -0.043605886, + -0.07009143, + 0.009971756, + -0.044503756, + 0.025999283, + 0.0024455637, + -0.026667075, + 0.02802616, + -0.012283179, + 0.0133811785, + 0.036217358, + -0.0011184465, + -0.024779204, + -0.036003612, + 0.04252001, + -0.022647075, + 0.0149444295, + 0.023047846, + 0.053789124, + 0.0011415931, + 0.05018589, + 0.030243864, + 0.03817859, + 0.03446338, + -0.016619235, + -0.0038703512, + -2.0666994e-05, + -0.044015624, + 0.0005112809, + -0.0072718635, + 0.03345332, + 0.0014647617, + 0.017212892, + -0.016033418, + -0.010406269, + -0.028657235, + 0.061219696, + -0.055064574, + -0.09664645, + -0.0022612263, + -0.052812897, + -0.030513687, + 0.013788782, + 0.008325146, + 0.09239658, + 0.01875119, + 0.054816615, + 0.0026312424, + -0.017264068, + 0.033101432, + 0.032369398, + -0.0026768087, + 0.044131674, + -0.02088573, + -0.0908362, + 0.046782516, + -0.0058770734, + -0.021163514, + 0.0725615, + 0.06186809, + 0.024326341, + -0.014987368, + -0.026708616, + -0.014812596, + -0.011183411, + -0.028519396, + -0.038318202, + 0.004128375, + -0.026169067, + 0.05174254, + -0.055490565, + -0.024956698, + 0.0032059692, + -0.03628709, + 0.025491342, + -0.02761026, + -0.034416933, + 0.013399064, + 0.011611679, + -0.072546415, + 0.019527245, + -0.06418547, + -0.035796244, + 0.00036897397, + 0.028034288, + -0.053006664, + -0.0018525898, + -0.013585913, + -0.0015293089, + -0.03510647, + 0.028231863, + -0.012119517, + -0.014743964, + 0.008213916, + 0.033391416, + -0.052264515, + -0.017212661, + 0.05579771, + 0.004817519, + 0.006249046, + 0.01783206, + -0.002318341, + 0.020627039, + -0.009174975, + -0.018746354, + 0.011747633, + 0.03141387, + 0.06260081, + -0.012938999, + -0.042090695, + 0.027790453, + 0.0047257664, + 0.020296283, + 0.044449627, + -0.012014592, + 0.04040857, + 0.02798724, + -0.015463413, + 0.038524404, + -0.0473671, + -0.024188412, + -0.024593337, + -0.007593123, + -0.014510966, + 0.0028438137, + -0.003239326, + -0.026789932, + -0.029136864, + -0.008876209, + -0.007620919, + -0.0037196758, + 0.014970946, + 0.0030524326, + -0.03568412, + -0.029864434, + -0.004848136, + 0.0067182956, + 0.018654956, + -0.00949501, + -0.0025919783, + 0.009048538, + -0.0182436, + -0.068973206, + 0.024227621, + -0.008147425, + -0.06350101, + 0.047484804, + -0.037748843, + -0.007375619, + -0.04371151, + 0.034315757, + -0.04585421, + 0.025775425, + -0.063119255, + -0.009300389, + -0.020812837, + -0.020029085, + 0.022032183, + 0.06860325, + 0.06424052, + -0.049892932, + 0.014119809, + -0.04557806, + -0.046123583, + -0.06433866, + -0.0063503794, + -0.047135483, + 0.00067991717, + 0.032673378, + 0.05956459, + 0.023172665, + 0.042158186, + -0.05268741, + -0.040922828, + 0.011885759, + 0.030535745, + 0.004635422, + 0.034165785, + 0.014199844, + -0.025018243, + 0.057514813, + 0.08756219, + 0.047963317, + -0.009710153, + -0.023915116, + 0.010460915, + 0.046477184, + -0.04078571, + -0.043531638, + -0.07993793, + 0.004456714, + 0.028488033, + -0.04320458, + 0.009695843, + 0.015289058, + 0.03448123, + -0.023646127, + -0.042910237, + -0.0096746925, + -0.06978396, + 0.026618667, + 0.0291927, + 0.03171987, + 0.016602611, + -0.03240222, + 0.032926932, + 0.05055636, + 0.06262419, + -0.00013886456, + -0.034675006, + -0.00961105, + -0.05237188, + 0.06638755, + -0.0026642946, + 0.028138902, + -0.05798804, + 0.0005645832, + -0.061619475, + -0.03186171, + 0.00937182, + -0.011398456, + 0.012080062, + -0.03316856, + -0.057394188, + -0.03404147, + 0.01295309, + 0.049814716, + -0.012333008, + -0.00506317, + 0.035571773, + 0.024830997, + 0.03291683, + -0.0001456186, + 0.043829933, + -0.033254717, + -0.015285826, + 0.037344154, + 0.011482764, + -0.06270073, + -0.07531468, + 0.029484127, + 0.009518985, + -0.014699304, + 0.07791403, + -0.034256108, + 0.0066609154, + -0.012805655, + 0.023969293, + 0.01172725, + 0.00090381934, + 0.05709565, + 0.026351225, + -0.053378, + 0.021405071, + -0.0025499696, + -0.044654485, + 0.014522269, + -0.032441314, + 0.036319192, + -0.04386052, + -0.040971655, + -0.02020775, + -0.0158068, + -0.0010571782, + -0.017165141, + -1.1923823e-05, + -0.009702131, + -0.02107794, + -0.0011055174, + -0.0006082575, + 0.016337639, + 0.037438143, + -0.019170996, + -0.0035745776, + -0.06409524, + -0.00542057, + -0.039134588, + 0.019707208, + 0.018634733, + 0.0006694254, + 0.012619041, + -0.039410323, + 0.0022495922, + 0.010932078, + 0.014833157, + -0.04761616, + -0.012361174, + -0.0036678137, + 0.07954227, + -0.026129803, + -0.008247221, + -0.018357046, + 0.013871769, + 0.002373308, + -0.010947702, + -0.08565451, + -0.0002473432, + -0.03802552 + ], + "index": 0, + "object": "embedding" + } + ], + "model": "nomic-embed-text:137m-v1.5-fp16", + "object": "list", + "usage": { + "prompt_tokens": 6, + "total_tokens": 6 + } + } + }, + "is_streaming": false + } +} diff --git a/tests/integration/recordings/responses/b81284317242.json b/tests/integration/recordings/responses/b81284317242.json new file mode 100644 index 000000000..9d37432ca --- /dev/null +++ b/tests/integration/recordings/responses/b81284317242.json @@ -0,0 +1,806 @@ +{ + "request": { + "method": "POST", + "url": "http://0.0.0.0:11434/v1/v1/embeddings", + "headers": {}, + "body": { + "model": "nomic-embed-text:137m-v1.5-fp16", + "input": [ + "artificial intelligence" + ], + "encoding_format": "float" + }, + "endpoint": "/v1/embeddings", + "model": "nomic-embed-text:137m-v1.5-fp16" + }, + "response": { + "body": { + "__type__": "openai.types.create_embedding_response.CreateEmbeddingResponse", + "__data__": { + "data": [ + { + "embedding": [ + 0.0022366138, + 0.08461147, + -0.11874114, + -0.0052518453, + 0.07118406, + 0.049483486, + -0.015876217, + -0.0012008038, + -0.0033942908, + 0.05494602, + 0.030520875, + 0.05008958, + 0.09317201, + 0.032156132, + -0.004377338, + -0.03848804, + -0.018956302, + -0.0236095, + 0.022911306, + -0.03110393, + 0.028829137, + -0.016230786, + 0.008753911, + 0.057506666, + 0.10936682, + 0.005825114, + -0.0074997484, + 0.020811856, + 0.010388324, + -0.010141114, + 0.021874895, + -0.019713985, + 0.027533287, + 0.026793962, + -0.044568222, + -0.044519402, + 0.08357342, + 0.012445136, + 0.010518916, + 0.038442865, + -0.030536616, + 0.05906662, + -0.010392797, + -0.022087235, + 0.05343208, + 0.055654023, + -0.0044453666, + -0.036988884, + 0.063930705, + -0.032284323, + 0.032489978, + 0.0055931634, + -0.032375008, + -0.004497235, + 0.09392279, + 0.006754915, + -0.032268003, + 0.00835217, + 0.014370032, + -0.036483698, + 0.08912018, + 0.05955014, + -0.019408967, + 0.06350465, + 0.047744956, + -0.027341131, + 0.006552131, + 0.04953885, + 0.010574868, + 0.02235948, + -0.02321165, + -0.027353264, + 0.038480133, + 0.02281572, + -0.024038436, + -0.001306909, + -0.0061844047, + -0.017209949, + -0.0030420008, + 0.10509315, + 0.042954266, + -0.06901838, + 0.024718743, + -0.024710549, + 0.0343398, + 0.0020979699, + -0.06263484, + -0.029716684, + 0.011262075, + 0.078764975, + 0.033562943, + 0.035133224, + 0.0320457, + 0.00027186406, + -0.036529467, + -0.0016409303, + -0.081980266, + 0.016165322, + -0.0660322, + -0.02935759, + -0.04723506, + 0.025335161, + 0.026269158, + -0.0513352, + 0.045357753, + -0.014988144, + -0.013024993, + -0.03038292, + -0.008367398, + 0.0056260712, + 0.020680085, + 0.028618533, + 0.029874317, + -0.031997733, + -0.00076006126, + -0.034168944, + -0.02590518, + -0.0076284576, + 0.022651166, + 0.018386483, + -0.021787772, + -0.040447697, + 0.0047820276, + -0.009597712, + -0.035957053, + 0.005328606, + -0.057489593, + 0.06073504, + -0.020800686, + -0.029272858, + 0.0163452, + -0.03862363, + -0.02247747, + -0.020445915, + -0.036009513, + 0.059558164, + -0.03033286, + -0.069230184, + 0.033652306, + 0.036894094, + 0.03370458, + 0.027705852, + 0.015187954, + -0.018007543, + -0.01165972, + -0.02008793, + 0.040926944, + 0.021693092, + -0.10439988, + 0.038911153, + -0.0014781221, + 0.035699833, + -0.009698822, + -0.02926835, + -0.0069360486, + 0.014233733, + -0.017313404, + 0.014706464, + 0.0038458246, + -0.022818988, + 0.041648272, + -0.02098679, + -0.027581805, + 0.03756714, + -0.0037085882, + 0.027596122, + 0.04056782, + 0.0034392772, + 0.037615757, + 0.025776071, + -0.026982538, + 0.005852495, + -0.0039863046, + 0.005656856, + 0.06277659, + 0.0043406086, + -0.0297926, + -0.06708285, + 0.050012793, + -0.07488783, + 0.011569169, + -0.0756103, + 0.027647655, + 0.041902207, + -0.022105526, + -0.033318907, + -0.031793807, + -0.015916783, + -0.027008306, + -0.018171852, + 0.006252427, + 0.026597168, + -0.019817233, + -0.040594563, + -0.039668392, + -0.015794825, + 0.029146893, + 0.008342654, + 0.035202503, + -0.008702159, + -0.015769526, + -0.025469974, + -0.0586123, + -0.042902436, + -0.015211353, + 0.014261047, + 0.025996149, + -0.017377071, + -0.037808437, + -0.03520045, + 0.07131968, + 0.05654339, + 0.016483534, + -0.01876786, + -0.038460378, + -0.012577459, + 0.0064103696, + -0.062101442, + -0.00660067, + -0.027731637, + 0.06374957, + 0.026982041, + 0.024285842, + -0.018742703, + -0.012524679, + 0.013434072, + -0.055756543, + -0.027415525, + -0.03675257, + 0.017529571, + 0.02477561, + -0.03045127, + 0.06855323, + -0.010209082, + 0.031148888, + 0.021571951, + 0.023731954, + 0.054307498, + 0.03100052, + 0.026400942, + -0.04622913, + 0.04047185, + -0.033045094, + 0.009662064, + -0.047404494, + -0.021189788, + -0.02399669, + -0.055832874, + -0.017241064, + 0.012543915, + -0.008548619, + 0.02192726, + -0.059385594, + 0.014223978, + 0.0034782523, + -0.014986028, + 0.009467993, + 0.025945617, + 0.017788455, + -0.017890496, + 0.037027203, + -0.062437646, + 0.054516815, + 0.0072062453, + 0.036869206, + -0.012679324, + 0.013426369, + 0.0063931644, + 0.013034126, + -0.0054964176, + 0.029703952, + 0.015483862, + 0.037053373, + 0.015184287, + 0.0015051999, + 0.03155224, + -0.034007262, + -0.01062121, + -0.0065257372, + -0.036016863, + -0.02398522, + 0.0002925773, + -0.04639047, + 0.00067234266, + 0.0051879333, + 0.0022854244, + 0.019890914, + 0.055556163, + 0.00015714756, + 0.012443668, + 0.0008963305, + -0.00070220826, + -0.050769955, + -0.017256442, + -0.027077246, + 0.05331934, + 0.034037035, + 0.02592324, + 0.048169997, + -0.008394459, + 0.021370936, + -0.029176475, + 0.043719027, + -0.005602416, + 0.049327727, + -0.016994191, + -0.019547777, + -0.007292355, + 0.022185003, + 0.0021891743, + -0.03477908, + 0.0066157207, + 0.01569508, + 0.0068082223, + 0.0056947717, + 0.0010003493, + -0.044438407, + 0.013787266, + 0.04122305, + 0.028625388, + 0.030242013, + -0.06857352, + -0.06352003, + 0.013763704, + 0.039651092, + 0.07492188, + -0.0053706495, + 0.035465065, + -0.059376698, + -0.06497839, + 0.004327192, + 0.0267945, + 0.015040646, + -0.020788817, + -0.051962562, + -0.01921375, + 0.018850269, + 0.031000722, + -0.018221682, + 0.009267403, + 0.06973425, + -0.025806738, + 0.026600223, + -0.022368405, + -0.040353984, + 0.02531925, + 0.034998856, + 0.013047638, + -0.009365667, + 0.0013648598, + -0.03051494, + 0.03722371, + 0.008678353, + -0.01722393, + 0.019971238, + -0.00760562, + 0.009754185, + 0.08358501, + 0.03864254, + -0.0032530357, + 0.028376041, + -0.038566697, + 0.023307664, + 0.004626837, + -0.011370534, + -0.0077850833, + 0.0050342744, + 0.0030030971, + 0.00605339, + 0.015904339, + 0.022334864, + -0.02215339, + 0.00095908146, + 0.061905097, + -0.008258138, + 0.0005605451, + -0.054997843, + -0.04336385, + -0.019704789, + -0.021770332, + -0.040157095, + 0.03560317, + -0.012980766, + 0.016729578, + 0.040847357, + -0.01233236, + -0.02141919, + -0.06613447, + -0.02145993, + -0.029881824, + -0.012548473, + -0.045113426, + -0.05410633, + -0.050498877, + 0.0017322625, + -0.010467805, + -0.025641298, + -0.045313217, + -0.004778442, + 0.01708526, + -0.034309763, + -0.041960593, + 0.012388626, + -0.039192248, + -0.015190208, + -0.006606051, + -0.01538265, + -0.0532569, + 0.06667949, + 0.028025586, + 0.0058680964, + 0.02157653, + 0.01722739, + -0.08740455, + 0.020562567, + -0.04073606, + 0.031959366, + 0.016461657, + -0.03277063, + 0.009070761, + 0.025736198, + -0.006719338, + 0.026993962, + 0.026991637, + -0.03802627, + 0.015317921, + -0.016529806, + 0.043788806, + -0.006503039, + -0.03839264, + 0.035212778, + -0.029066656, + -0.03686405, + -0.030157154, + -0.022428561, + 0.05858354, + 0.026042566, + 0.03547472, + 0.02563004, + 0.042611666, + 0.019815635, + 0.003058494, + -0.009443615, + -0.034674164, + 0.035445154, + 0.10798093, + 0.038721245, + 0.0016377034, + -0.06430824, + 0.042132918, + 0.010504483, + 0.024581155, + 0.012019827, + 0.030755972, + 0.026534388, + -0.02885229, + -0.019706503, + 0.046450213, + 0.026275348, + 0.04946407, + -0.007464721, + 0.00794922, + -0.08535301, + 0.02541005, + -0.017998746, + -0.009416071, + 0.016700648, + -0.03542828, + 0.027435834, + 0.03758757, + 0.0041925805, + 0.043872304, + 0.011266653, + -0.03867743, + -0.01193984, + 0.0073895175, + -0.044121254, + -0.00873277, + 0.012664631, + 0.035640765, + -0.00072544283, + -0.061218876, + -0.015022522, + -0.0322976, + -0.010083825, + 0.029629998, + -0.03543853, + 0.02555725, + 0.0051406357, + -0.038534507, + 0.040804803, + 0.0036758485, + 0.021139948, + -0.044177193, + -0.05692792, + -0.046873756, + -0.097377434, + 0.040344633, + 0.018246876, + 0.023228467, + -0.0040318235, + -0.0070896745, + -0.040837582, + -0.0021164624, + -0.043553185, + 0.008691869, + 0.043227255, + -0.10591166, + -0.058253914, + 0.07945284, + 0.0055897078, + 0.0023664695, + 0.043260083, + 0.01711786, + 0.009498194, + -0.022812163, + 0.027058931, + 0.005396622, + -0.0931436, + -0.012700624, + 0.050613508, + 0.001651129, + -0.005244997, + -0.005993222, + -0.048681, + 0.013741692, + 0.024419071, + -0.044938207, + 0.024652004, + -0.0090823565, + 0.009084302, + 0.007980511, + -0.03202634, + -0.045257688, + 0.0023523772, + -0.015082915, + -0.04028791, + -0.044669308, + 0.05234696, + 0.02510421, + 0.062450916, + 0.02111679, + 0.006334921, + -0.012903392, + 0.010148576, + -0.038433332, + -0.041481566, + 0.06477058, + -0.006061863, + -0.08530247, + 0.04810012, + -0.048599683, + -0.0005365218, + 0.0040615113, + 0.011245283, + -0.035306197, + -0.008921519, + -0.01795086, + 0.005678066, + -0.032920655, + -0.048789356, + 0.010845612, + 0.03411874, + -0.011378207, + -0.056814976, + -0.006532135, + -0.0050057303, + -0.019771084, + 0.0091395695, + 0.031342167, + 0.023269448, + -0.03736886, + 0.0019668897, + 0.0074416464, + -0.0019287739, + -0.023238849, + 0.0005433489, + -0.024418414, + -0.05959036, + 0.017759146, + 0.048834063, + -0.08515415, + 0.021934256, + 0.030728595, + 0.049638256, + 0.019994117, + -0.04717042, + 0.0015763802, + 0.033468403, + -0.06731834, + -0.00681266, + 0.021093257, + -0.01041348, + -0.055003677, + -0.051734563, + 0.02995711, + -0.02678245, + 0.0045354315, + -0.027154865, + -0.04995867, + -0.0011973461, + -0.033825804, + 0.041500945, + 0.012434426, + 0.020051895, + 0.012731558, + 0.004626874, + 0.047176465, + 0.038083524, + -0.03400733, + 0.011142505, + 0.012283894, + -0.015379302, + 0.007730181, + 0.07565572, + -0.035731222, + 0.08118149, + -0.09431516, + -0.08810903, + 0.01146403, + -0.029304102, + -0.08639211, + 0.0341667, + -0.0052170665, + 0.09311439, + -0.010057816, + 0.021880865, + -0.0047650035, + 0.001162741, + 0.09254362, + -0.038753066, + 0.06454391, + 0.023767488, + -0.030262474, + -0.011110613, + -0.0074149664, + -0.03007684, + 0.020606792, + 0.04930669, + 0.07281914, + -0.0039625484, + -0.0016324545, + -0.03596851, + 0.039473955, + 0.020002823, + -0.0054762294, + 0.040199697, + 0.109564506, + -0.009766631, + -0.040412877, + 0.040181432, + 0.03771873, + 0.013992633, + -0.030444501, + -0.07115155, + 0.042908143, + -0.012742061, + -0.001440587, + 0.012808517, + -0.029983656, + 0.00488665, + 0.006281797, + -0.005707157, + 0.009824824, + 0.037697576, + -0.03704277, + -0.0075235907, + 0.0113789765, + -0.054945026, + -0.04243903, + 0.023500174, + -0.011036614, + 0.016815342, + -0.0697076, + 0.008619862, + 0.06272668, + 0.03931336, + 0.016410746, + -0.006864617, + -0.008319184, + -0.009145009, + -0.02897438, + 0.039978817, + -0.033102676, + -0.036361784, + -0.011318566, + 0.03892114, + -0.0075466223, + 0.026960738, + -0.0726453, + -0.014178968, + -0.054352228, + -0.017428732, + 0.0074234335, + -0.006251338, + 0.025898894, + -0.057475954, + 0.018578822, + 0.0290711, + 0.059306774, + -0.009857875, + 0.052424155, + 0.057722762, + 0.039911784, + -0.04026031, + -0.008285909, + -0.0033879017, + 0.029076183, + -0.010721028, + -0.0005562793, + -0.001604114, + 0.030403664, + 0.0042645643, + 0.058851115, + -0.039981343, + -0.027790371, + -0.0327743, + -0.023301579, + -0.021286374, + 0.012392469, + 0.048142795, + -0.049542453, + -0.042852707, + -0.0013391685, + -0.025826424, + 0.008100482, + 0.049525622, + -0.03799743, + 0.012587347, + -0.03135462, + 0.0391294, + -0.02423877, + -0.059276436, + 0.021265157, + -0.009490031, + 0.010039646, + -0.05740955, + -0.043233834, + -0.031231066, + 0.029870564, + 0.019918723, + -0.0030282692, + 0.040403277, + 0.032559145, + 0.0036333718, + -0.035210673, + -0.018083818, + 0.028045155, + 0.026430579, + -0.0024856809, + 0.02103473, + 0.018243128, + -0.042539034, + -0.001484943, + -0.015580981, + 0.05004955, + -0.045361407, + 0.05247213, + 0.0752267, + -0.014999207, + 0.032288983, + -0.06401884, + 0.014476272, + -0.014107892, + -0.03501588, + -0.03343625, + -0.04675748, + 0.013430127 + ], + "index": 0, + "object": "embedding" + } + ], + "model": "nomic-embed-text:137m-v1.5-fp16", + "object": "list", + "usage": { + "prompt_tokens": 2, + "total_tokens": 2 + } + } + }, + "is_streaming": false + } +} diff --git a/tests/integration/recordings/responses/b91f1fb4aedb.json b/tests/integration/recordings/responses/b91f1fb4aedb.json deleted file mode 100644 index dccb05cce..000000000 --- a/tests/integration/recordings/responses/b91f1fb4aedb.json +++ /dev/null @@ -1,221 +0,0 @@ -{ - "request": { - "method": "POST", - "url": "http://localhost:11434/api/generate", - "headers": {}, - "body": { - "model": "llama3.2:3b-instruct-fp16", - "raw": true, - "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant. You have access to functions, but you should only use them if they are required.\nYou are an expert in composing functions. You are given a question and a set of possible functions.\nBased on the question, you may or may not need to make one function/tool call to achieve the purpose.\n\nIf you decide to invoke any of the function(s), you MUST put it in the format of [func_name1(params_name1=params_value1, params_name2=params_value2...), func_name2(params)]\nIf you decide to invoke a function, you SHOULD NOT include any other text in the response. besides the function call in the above format.\nFor a boolean parameter, be sure to use `True` or `False` (capitalized) for the value.\n\n\nHere is a list of functions in JSON format that you can invoke.\n\n[\n {\n \"name\": \"get_weather\",\n \"description\": \"Get the current weather\",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"location\"],\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state (both required), e.g. San Francisco, CA.\"\n }\n }\n }\n }\n]\n\nYou can answer general questions or invoke tools when necessary.\nIn addition to tool calls, you should also augment your responses by using the tool outputs.\nPretend you are a weather assistant.\nYou MUST use one of the provided functions/tools to answer the user query.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhat's the weather like in San Francisco?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", - "options": { - "temperature": 0.0 - }, - "stream": true - }, - "endpoint": "/api/generate", - "model": "llama3.2:3b-instruct-fp16" - }, - "response": { - "body": [ - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:52.232108Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "[", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:52.278231Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "get", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:52.324826Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "_weather", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:52.371742Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "(location", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:52.420615Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "=\"", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:52.467321Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "San", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:52.514894Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " Francisco", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:52.562247Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ",", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:52.608002Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " CA", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:52.656949Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "\")]", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:52.704421Z", - "done": true, - "done_reason": "stop", - "total_duration": 731562041, - "load_duration": 115199875, - "prompt_eval_count": 339, - "prompt_eval_duration": 136000000, - "eval_count": 11, - "eval_duration": 478000000, - "response": "", - "thinking": null, - "context": null - } - } - ], - "is_streaming": true - } -} diff --git a/tests/integration/recordings/responses/bac8c9e59dda.json b/tests/integration/recordings/responses/bac8c9e59dda.json new file mode 100644 index 000000000..cad2b16c0 --- /dev/null +++ b/tests/integration/recordings/responses/bac8c9e59dda.json @@ -0,0 +1,806 @@ +{ + "request": { + "method": "POST", + "url": "http://0.0.0.0:11434/v1/v1/embeddings", + "headers": {}, + "body": { + "model": "nomic-embed-text:137m-v1.5-fp16", + "input": [ + "What is Python programming language?" + ], + "encoding_format": "float" + }, + "endpoint": "/v1/embeddings", + "model": "nomic-embed-text:137m-v1.5-fp16" + }, + "response": { + "body": { + "__type__": "openai.types.create_embedding_response.CreateEmbeddingResponse", + "__data__": { + "data": [ + { + "embedding": [ + -0.021546068, + 0.074560724, + -0.08982851, + -0.072915256, + 0.068179905, + 0.025194727, + -0.059721366, + -0.019729408, + -0.026566949, + -0.0814989, + -0.0041806637, + 0.028886959, + 0.040315505, + -0.04661567, + -0.01359174, + -0.10503699, + 0.010832964, + -0.070984155, + -0.010333181, + 0.07324054, + 0.019907007, + -0.041668113, + 0.037937418, + -0.010709144, + 0.12387491, + 0.017573757, + 0.015332567, + -0.017744586, + 0.005326792, + 0.0042512724, + -0.0524661, + 0.0074178437, + 0.0063705305, + -0.024192266, + -0.050366107, + -0.044823464, + 0.06449614, + -0.020831475, + 0.045796607, + 0.03806062, + -0.061222635, + 0.009117029, + 0.06460812, + -0.025770003, + 0.08559993, + -0.04834556, + -0.008501713, + -0.033264425, + -0.051362645, + 0.012586095, + -0.01979581, + -0.050605588, + -0.034403108, + -0.0009926605, + 0.092792325, + 0.03726236, + 0.022629326, + 0.018068956, + 0.0007351709, + -0.04420681, + 0.08045181, + 0.08086262, + -0.08094867, + 0.056096286, + 0.048190814, + -0.04007904, + -0.00068744185, + 0.017544271, + -0.028859643, + -0.0023468533, + 0.03184891, + -0.0701028, + 0.035644103, + -0.0011666699, + -0.03371971, + -0.005051391, + 0.0006552744, + -0.042400498, + 0.026204336, + 0.04615671, + 0.0011726943, + 0.0097871255, + -0.031032644, + 0.029188057, + 0.01711068, + -0.047375336, + -0.038350254, + 0.00039953407, + -0.051105857, + 0.04309587, + -0.06075672, + -0.015162731, + -0.033168647, + -0.011193022, + -0.074920416, + 0.032251537, + -0.050895285, + 0.008220374, + 0.045626145, + -0.008325549, + 0.0011991832, + -0.01571779, + 0.048682336, + -0.053987786, + 0.03146934, + 0.05443348, + 0.038964823, + -0.039737243, + -0.037973408, + -0.0074592913, + -0.0013195083, + 0.046643768, + -0.017327698, + -0.02375174, + -0.04692965, + 0.0009863627, + 0.034537937, + -0.028689977, + 0.057742324, + 0.043029614, + 0.008388772, + -0.02354485, + 0.039006133, + 0.042976316, + -0.031192042, + 0.021574797, + -0.058445938, + 0.013146902, + -0.001762306, + -0.0019140284, + 0.055225994, + -0.016387893, + -0.04440063, + -0.024267718, + -0.032193165, + 0.050777517, + -0.04420101, + -0.020931559, + 0.057991426, + 0.0039969725, + 0.02675994, + 0.019815518, + -0.039617598, + -0.0077555506, + 0.0403523, + -0.015241225, + 0.016795931, + 0.025783498, + 0.0003180923, + 0.024080968, + 0.025404796, + 0.051466335, + -0.0024837458, + 0.022598268, + -0.0063381153, + 0.00178073, + 0.008649395, + 0.012480427, + 0.06648376, + -0.006340787, + 0.09942581, + 0.020740815, + -0.01303556, + 0.028734032, + -0.049742807, + -0.018621337, + 0.019707767, + 0.0024019873, + -0.019140033, + 0.006168636, + -0.022380529, + -0.045453127, + 0.0046049356, + -0.014006226, + 0.0137364585, + 0.018493537, + -0.009292852, + -0.012699987, + 0.03493919, + -0.017692508, + -0.026819916, + -0.04762562, + 0.043674517, + 0.05260871, + -0.071350336, + 0.027072797, + -0.010277009, + -0.049245734, + -0.015018402, + -0.007073371, + -0.03457621, + 0.035879534, + -0.028602535, + -0.06730413, + -0.028733432, + -0.038961537, + -0.0057807537, + 0.00372536, + 0.06245435, + -0.065824784, + -0.04148837, + 0.007765619, + -0.07265677, + 0.0019346873, + -0.062358093, + 0.00810802, + -0.011082361, + 0.018727938, + -0.047425367, + 0.03615319, + 0.08879678, + 0.010909796, + -0.012883642, + 0.06262381, + 0.0018163526, + -0.050652664, + -0.020225566, + 0.0011867806, + 0.0032017208, + 0.023490198, + 0.043380897, + -0.011456759, + 0.010590333, + 0.013845344, + 0.021412425, + 0.023646325, + -0.06570232, + 0.00337852, + -0.06377051, + 0.024256472, + 0.001187985, + -0.048088033, + -0.0069261147, + 0.036105778, + 0.028764868, + 0.05908012, + 0.05558998, + 0.036441114, + -0.015726635, + -0.064335406, + -0.025329076, + 0.00019383182, + -0.011378782, + 0.054639373, + -0.0037547597, + 0.011015431, + 0.000934317, + -0.01849728, + -0.030297678, + 0.03176694, + -0.02555499, + -0.06718673, + 0.0020684605, + 0.052554794, + 0.028028563, + 0.03433696, + 0.04029666, + -0.0036450662, + 0.043685105, + -0.024197102, + 0.049198944, + -0.027780259, + -0.0064086183, + 0.007958985, + -0.0011884172, + 0.003618347, + 0.0014725004, + 0.036448352, + 0.0029523035, + -0.034259275, + 0.0105523765, + 0.003530901, + 0.02014434, + -0.043443486, + -0.009125803, + -0.030205054, + 0.018637808, + -0.036032073, + -0.0015491933, + 0.013146738, + 0.030867452, + -0.054905258, + -0.04119182, + 0.03441207, + -0.0119431075, + 0.01545849, + 0.025236556, + 0.008381556, + -0.019275825, + -0.008869993, + 0.057761963, + -0.025082579, + -0.036088195, + -0.03204259, + -0.04041649, + 0.029196605, + 0.045382887, + 0.029454553, + 0.04492332, + -0.016683882, + -0.02644347, + 0.028141662, + 0.05314023, + 0.03233055, + 0.027191106, + -0.027797569, + 0.03171752, + 0.0037958317, + -0.03329865, + -0.020423438, + -0.049809493, + 0.02449613, + -0.03092182, + 0.054525003, + -0.071543515, + 0.058733195, + 0.022018934, + 0.01895145, + 0.026739271, + -0.030747537, + -0.032640383, + -0.098711535, + 0.03642346, + -0.025105536, + 0.015529013, + 0.033251774, + 0.00061906496, + 0.032490347, + 0.018841397, + -0.044984948, + -0.01088912, + -0.0014662399, + 0.000600829, + -0.020325039, + -0.044821136, + -0.008952123, + 0.00048635676, + 0.0002996866, + 0.028668651, + 0.008523237, + 0.01740213, + -0.036633056, + 0.036423907, + -0.02399914, + -0.00761653, + 0.0080245435, + 0.030071083, + -0.058886718, + 0.054297958, + 0.0384154, + 0.018548818, + 0.0436371, + -0.03401102, + 0.003966358, + -0.0090571735, + -0.040655836, + 0.036741752, + -0.021231106, + -0.014417626, + 0.007866179, + 0.0023743121, + -0.021706948, + 0.023308808, + -0.04261524, + -0.013106814, + 0.002184174, + 0.050090536, + -0.037111517, + -0.023020454, + -0.0024899256, + -0.04742312, + -0.051621903, + -0.017614607, + 0.010287463, + -0.016888812, + 0.004063667, + -0.07840794, + -0.013906328, + -0.0200006, + 0.028768701, + 0.0066835126, + -0.0326639, + -0.006753341, + 0.0329794, + 0.0031677445, + -0.05393366, + -0.012149459, + -0.004631686, + 0.050669383, + 0.035566613, + 0.017487023, + -0.035065696, + -0.04345706, + 0.01815283, + 0.046942756, + -0.0049857013, + -0.008515865, + 0.01118123, + -0.02188685, + 0.002976573, + -0.06334929, + -0.06789715, + 0.01847861, + -0.03287031, + -0.028844338, + 0.023312278, + 0.0038410265, + -0.024155468, + 0.03351136, + -0.006541151, + 0.001263295, + -0.0055405344, + 0.016552407, + -0.03261208, + -0.026238086, + 0.04746543, + 0.02347107, + 0.035490252, + -0.060608912, + 0.016866436, + 0.026428545, + 0.026161047, + 0.007885864, + 0.0068620075, + 0.007940054, + 0.0189847, + 0.034563005, + 0.060455717, + -0.0073703714, + -0.07424357, + 0.009194698, + 0.01957624, + 0.03634512, + 0.050949764, + -0.0074621546, + -0.0033942517, + 0.010825065, + 0.015471675, + -0.025703412, + 0.058908764, + 0.04182958, + -0.018113708, + -0.030571556, + 0.0041009923, + 0.017594837, + 0.034117155, + 0.09389374, + -0.022050945, + -0.059975427, + 0.033338364, + 0.0065869745, + 0.026182765, + 0.0017186876, + 0.02232096, + 0.06188853, + 0.048512295, + 0.007636763, + 0.0069405846, + -0.022830538, + 0.035081808, + -0.004960442, + -0.056260712, + -0.042973917, + 0.002066168, + -0.020543572, + -0.014692126, + -0.017611843, + -0.03076786, + -0.015931841, + -0.005772659, + -0.028766898, + 0.04064328, + 0.027844893, + -0.051655486, + -0.015146202, + -0.027285425, + -0.01650888, + 0.024931844, + 0.061224945, + -0.0052609993, + 0.0017036009, + 0.0017101183, + -0.07402718, + -0.0046175467, + -0.0037347435, + 0.027102442, + -0.01231545, + -0.0043430743, + -0.03162171, + -0.041315116, + 0.051363207, + 0.033102125, + 0.078014776, + 0.003990294, + -0.043985523, + -0.031838063, + -0.017765794, + 0.092724755, + 0.10341177, + 0.04103328, + 0.04242992, + 0.009500518, + -0.02362317, + 0.009298321, + 0.037858024, + -0.017323077, + 0.080899306, + -0.015377179, + -0.037678663, + 0.03252487, + 0.055421595, + 0.014384202, + -0.0029980945, + 0.01592118, + 0.04159952, + -0.028906226, + 0.021150941, + -0.02456114, + -0.07065143, + 0.015140283, + -0.012358318, + -0.021758601, + 0.003352868, + -0.020284064, + -0.047894873, + 0.04598992, + 0.03345185, + -0.0009485867, + -0.020016344, + -0.010583383, + 0.051091224, + -0.015766189, + -0.020620693, + -0.015895274, + -0.04726114, + -0.038228642, + -0.04013263, + 0.050451152, + 0.022228183, + -0.0021509614, + 0.06018162, + 0.031637225, + 0.028547807, + 0.008862995, + 0.044033833, + 0.025527734, + -0.032338947, + 0.00135775, + 0.00034528837, + -0.06598875, + 0.07682345, + -0.043039784, + 0.0146461055, + -0.019847354, + 0.008209687, + -0.038366668, + -0.014131546, + -0.030604836, + -0.0004435065, + -0.06457666, + -0.025515914, + 0.008653999, + -0.0116394805, + 0.0008473365, + 0.0153463585, + 0.03973972, + -0.013041565, + -0.024488818, + -0.012756945, + 0.033537187, + -0.035621975, + -0.0119243, + 0.0011147953, + 0.0105046285, + 0.01533771, + 0.026521815, + 0.01678699, + -0.04103264, + -0.06550719, + -0.013783735, + 0.07217273, + -0.046931844, + -0.0030693044, + 0.04330854, + -0.008973219, + 0.0008945983, + 0.01960475, + 0.014526533, + -0.029263442, + 0.011150001, + -0.020033691, + 0.007062613, + -0.025412586, + 0.016623255, + -0.009940003, + 0.031739928, + -0.07282793, + 0.0033635413, + -0.0066056317, + -0.048611987, + -0.010318079, + 0.002579417, + 0.04156733, + -0.017870948, + 0.019536346, + 0.08387811, + -0.019648192, + 0.038054984, + -0.035132788, + -0.017279526, + 0.0383533, + 0.012801995, + -0.018075908, + 0.0130297225, + 0.021892771, + -0.06141125, + 0.029645398, + 0.008496622, + 0.02177819, + -0.019490806, + 0.0006974178, + -0.039861027, + 0.036459584, + -0.03222778, + 0.041180477, + 0.006714091, + -0.03718948, + 0.030249462, + 0.039630912, + 0.06813552, + -0.012209333, + 0.003110101, + -0.059167832, + 0.005225335, + -0.013556482, + -0.0043863617, + -0.047241487, + 0.008726329, + 0.038735278, + 0.048531402, + 0.05609695, + -0.046623323, + -0.0014230527, + -0.002014954, + 0.0005761788, + -0.010059782, + 0.0174383, + 0.06899637, + -0.011378634, + -0.046830196, + 0.0368127, + 0.059148394, + -0.021287646, + 0.016477311, + 0.018321782, + 0.024926422, + 0.046934363, + -0.025329871, + -0.07640391, + -0.006766927, + -0.017800223, + -0.044743028, + -0.03266439, + 0.038117766, + 0.056827657, + 0.05824236, + -0.0018754685, + 0.008698947, + -0.046561655, + -0.03132563, + -0.02317277, + 0.028500559, + 0.0031641317, + -0.029203331, + 0.02452185, + 0.048750117, + 0.015500057, + -0.016405232, + -0.052083552, + -0.037663985, + 0.03548819, + -0.0006549693, + -0.012240439, + -0.01881079, + 0.0182572, + -0.045353204, + 0.03761795, + -0.03177843, + -0.042186324, + -0.07942117, + -0.032111816, + -0.029888583, + 0.005621708, + -0.042530198, + 0.039356336, + -0.026952052, + -0.018818732, + -0.005272515, + 0.0061625573, + 0.06742063, + 0.022745255, + 0.013821605, + 0.0065215286, + 0.050157912, + -0.039776325, + 0.011725213, + 0.03352152, + 0.042182356, + -0.006891993, + -0.043558784, + -0.033703547, + -0.012222863, + 0.044719968, + 0.049334057, + 0.0061253817, + 0.032853346, + -0.04907138, + -0.062765405, + -0.052750662, + -0.004355708, + 0.0736285, + -0.0034912885, + -0.015804427, + 0.017614808, + -0.028311133, + 0.008187972, + 0.0018999455, + -0.060287938, + 0.013549575, + 0.00073760696, + 0.0059351497, + 0.030927684, + -0.041412465, + 0.031267673, + -0.014439369, + 0.062310357, + -0.019379897, + -0.047648646, + -0.040443134, + 0.015140276, + 0.039490506, + 0.050446603, + -0.0037692762, + 0.045585785, + -0.008795989, + -0.03142311, + -0.024086813, + 0.05972485, + 0.042766098, + -0.034053776, + -0.025232067, + 0.0039050994, + -0.035978347, + 0.094223164, + -0.0074676285, + -0.032635022, + -0.025624894, + 0.08395464, + 0.049035463, + -0.004117194, + 0.008665336, + -0.0086079845, + 0.0062034726, + -0.025399568, + -0.042293865, + 0.0014890308, + -0.034284014, + -0.024277046 + ], + "index": 0, + "object": "embedding" + } + ], + "model": "nomic-embed-text:137m-v1.5-fp16", + "object": "list", + "usage": { + "prompt_tokens": 6, + "total_tokens": 6 + } + } + }, + "is_streaming": false + } +} diff --git a/tests/integration/recordings/responses/bbd0637dce16.json b/tests/integration/recordings/responses/bbd0637dce16.json deleted file mode 100644 index b05f5c934..000000000 --- a/tests/integration/recordings/responses/bbd0637dce16.json +++ /dev/null @@ -1,4145 +0,0 @@ -{ - "request": { - "method": "POST", - "url": "http://localhost:11434/api/generate", - "headers": {}, - "body": { - "model": "llama3.2:3b-instruct-fp16", - "raw": true, - "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nPretend you are a weather assistant.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhat's the weather like in San Francisco?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", - "options": { - "temperature": 0.0 - }, - "stream": true - }, - "endpoint": "/api/generate", - "model": "llama3.2:3b-instruct-fp16" - }, - "response": { - "body": [ - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:53.073246Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "San", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:53.123061Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " Francisco", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:53.180905Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "!", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:53.232132Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " The", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:53.282297Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " City", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:53.332959Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " by", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:53.382245Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " the", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:53.43236Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " Bay", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:53.488034Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " is", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:53.560318Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " known", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:53.609316Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " for", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:53.679583Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " its", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:53.754028Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " unique", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:53.815078Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " and", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:53.864498Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " often", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:53.920528Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " unpredictable", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:53.971546Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " weather", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:54.028526Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ".\n\n", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:54.090548Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "As", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:54.140592Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " I", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:54.190503Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " check", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:54.247254Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " the", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:54.296415Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " current", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:54.357187Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " conditions", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:54.408666Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ",", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:54.464649Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " I", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:54.517253Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " see", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:54.580587Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " that", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:54.634609Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " it", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:54.689092Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "'s", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:54.737491Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " currently", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:54.799419Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ":\n\n", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:54.852253Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "**", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:54.914508Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "Part", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:54.9647Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "ly", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:55.014746Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " Cloud", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:55.063861Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "y", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:55.113356Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " with", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:55.163516Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " a", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:55.220768Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " High", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:55.285346Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " of", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:55.335656Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " ", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:55.385525Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "58", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:55.448385Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "\u00b0F", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:55.502557Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " (", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:55.554511Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "14", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:55.608495Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "\u00b0C", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:55.65582Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ")", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:55.70258Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " and", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:55.748656Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " a", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:55.793429Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " Low", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:55.840362Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " of", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:55.886535Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " ", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:55.932966Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "45", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:55.979079Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "\u00b0F", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:56.025463Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " (", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:56.071487Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "7", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:56.118372Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "\u00b0C", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:56.163759Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ")**", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:56.208Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "\n\n", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:56.256042Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "The", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:56.30261Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " skies", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:56.348739Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " are", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:56.393332Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " mostly", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:56.440274Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " cloudy", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:56.487668Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ",", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:56.534721Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " but", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:56.579311Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " there", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:56.631181Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "'s", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:56.672535Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " a", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:56.720305Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " gentle", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:56.766504Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " breeze", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:56.810873Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " blowing", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:56.85671Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " in", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:56.903626Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " from", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:56.951644Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " the", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:56.997692Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " Pacific", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:57.042867Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " Ocean", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:57.090092Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " at", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:57.13756Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " about", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:57.185504Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " ", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:57.233795Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "5", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:57.279091Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " mph", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:57.324796Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ".", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:57.371362Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " The", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:57.417466Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " sun", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:57.462505Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " is", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:57.508191Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " shining", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:57.554807Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " through", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:57.601115Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " the", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:57.651194Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " gaps", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:57.703043Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " in", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:57.752817Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " the", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:57.805119Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " clouds", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:57.855864Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ",", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:57.918946Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " casting", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:57.971018Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " a", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:58.02062Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " warm", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:58.068911Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " glow", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:58.118087Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " over", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:58.166806Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " the", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:58.212336Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " city", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:58.259037Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ".\n\n", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:58.305923Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "However", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:58.35316Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ",", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:58.400577Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " I", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:58.445727Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " must", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:58.493492Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " note", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:58.540334Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " that", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:58.587262Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " San", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:58.636491Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " Francisco", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:58.686605Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " is", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:58.734904Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " famous", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:58.78326Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " for", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:58.82962Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " its", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:58.877323Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " fog", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:58.925591Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ",", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:58.973271Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " and", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:59.020603Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " it", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:59.068361Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " can", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:59.116357Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " roll", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:59.165208Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " in", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:59.214665Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " quickly", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:59.260891Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ",", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:59.312078Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " especially", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:59.363408Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " in", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:59.412871Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " the", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:59.45986Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " mornings", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:59.507267Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " and", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:59.55667Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " evenings", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:59.604314Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ".", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:59.651999Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " So", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:59.700667Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ",", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:59.747038Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " if", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:59.794568Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " you", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:59.845606Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "'re", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:59.895248Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " planning", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:59.941987Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " outdoor", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:56:59.989983Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " activities", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:00.038147Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ",", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:00.086828Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " be", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:00.137594Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " sure", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:00.19098Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " to", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:00.241959Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " pack", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:00.292166Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " layers", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:00.339299Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "!\n\n", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:00.387333Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "Additionally", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:00.43431Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ",", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:00.480342Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " there", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:00.52752Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "'s", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:00.57551Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " a", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:00.622747Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " slight", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:00.672919Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " chance", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:00.722642Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " of", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:00.771249Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " scattered", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:00.819848Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " showers", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:00.86932Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " later", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:00.917756Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " this", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:00.969615Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " afternoon", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:01.021786Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ",", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:01.073794Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " with", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:01.133868Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " a", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:01.183531Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " ", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:01.234668Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "20", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:01.284889Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "%", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:01.333911Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " chance", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:01.38265Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " of", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:01.434784Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " precipitation", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:01.48788Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ".\n\n", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:01.538129Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "Overall", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:01.587274Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ",", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:01.635903Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " it", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:01.685825Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "'s", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:01.735734Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " a", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:01.78513Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " lovely", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:01.835305Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " day", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:01.882976Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " to", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:01.931504Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " explore", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:01.981052Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " San", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:02.034601Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " Francisco", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:02.089694Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "'s", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:02.147879Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " iconic", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:02.197159Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " landmarks", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:02.245344Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " like", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:02.297014Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " the", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:02.346106Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " Golden", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:02.393734Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " Gate", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:02.442589Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " Bridge", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:02.491403Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ",", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:02.541047Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " Al", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:02.591264Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "cat", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:02.639813Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "raz", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:02.69062Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " Island", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:02.7394Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ",", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:02.78855Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " or", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:02.837222Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " take", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:02.886652Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " a", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:02.935063Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " stroll", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:02.984436Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " through", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:03.034983Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " Fish", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:03.08462Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "erman", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:03.136737Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "'s", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:03.187148Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " Wh", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:03.238025Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "arf", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:03.287384Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ".", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:03.335964Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " Just", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:03.385297Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " don", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:03.435051Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "'t", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:03.48456Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " forget", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:03.533001Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " your", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:03.586034Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " umbrella", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:03.637732Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "!\n\n", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:03.687711Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "Would", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:03.736053Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " you", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:03.785848Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " like", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:03.83515Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " me", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:03.885366Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " to", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:03.935525Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " check", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:03.988044Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " the", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:04.039953Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " weather", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:04.088637Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " forecast", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:04.136695Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " for", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:04.186737Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " a", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:04.235917Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " specific", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:04.282422Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " date", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:04.329468Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " or", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:04.378301Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " location", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:04.427438Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "?", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-08-01T20:57:04.475807Z", - "done": true, - "done_reason": "stop", - "total_duration": 11588890291, - "load_duration": 85257500, - "prompt_eval_count": 34, - "prompt_eval_duration": 95000000, - "eval_count": 229, - "eval_duration": 11407000000, - "response": "", - "thinking": null, - "context": null - } - } - ], - "is_streaming": true - } -} diff --git a/tests/integration/recordings/responses/bc581d1d19f9.json b/tests/integration/recordings/responses/bc581d1d19f9.json new file mode 100644 index 000000000..51e870ed5 --- /dev/null +++ b/tests/integration/recordings/responses/bc581d1d19f9.json @@ -0,0 +1,806 @@ +{ + "request": { + "method": "POST", + "url": "http://0.0.0.0:11434/v1/v1/embeddings", + "headers": {}, + "body": { + "model": "nomic-embed-text:137m-v1.5-fp16", + "input": [ + "How do systems learn automatically?" + ], + "encoding_format": "float" + }, + "endpoint": "/v1/embeddings", + "model": "nomic-embed-text:137m-v1.5-fp16" + }, + "response": { + "body": { + "__type__": "openai.types.create_embedding_response.CreateEmbeddingResponse", + "__data__": { + "data": [ + { + "embedding": [ + -0.00428149, + 0.02407125, + -0.1332138, + 0.0049487473, + 0.073026754, + -0.0033538076, + 0.04288422, + -0.033756636, + -0.020148698, + -0.029086374, + -0.026594821, + 0.0491011, + 0.11988463, + 0.07824526, + 0.0070956615, + -0.012669163, + 0.008139979, + -0.04938827, + 0.013677458, + 0.027183838, + 0.034600288, + -0.031530242, + -0.0016821623, + 0.019251885, + 0.08406186, + 0.05699986, + -0.021502802, + -0.04496157, + 0.0106643615, + 0.008963991, + 0.020009708, + -0.01691365, + 0.020409556, + -0.03680993, + -0.040421132, + -0.043416277, + 0.03750667, + -0.041974973, + -0.0014707688, + 0.036682874, + -0.0418393, + -0.0025643362, + 0.033818632, + 0.004418005, + 0.029838623, + -0.009352448, + 0.008466692, + -0.018111689, + 0.01584755, + 0.013171241, + 0.061980456, + -0.069145404, + -0.008550795, + 0.03166987, + 0.07030618, + 0.050118607, + 0.0077106315, + 0.051082145, + 0.0076379525, + -0.12136735, + 0.0949581, + 0.047785405, + -0.024135714, + 0.03949768, + -0.00998136, + 0.009925407, + 0.0024552627, + 0.074248135, + -0.020262156, + 0.025166985, + 0.043061364, + -0.00020012973, + -0.0013722081, + -0.036943354, + 0.00038265405, + -0.019521076, + -0.00899439, + -0.030687673, + -0.021156238, + 0.08929159, + 0.076894514, + -0.044162292, + 0.044842854, + -0.04710164, + 0.047927003, + 0.043319575, + -0.025170114, + -0.050350837, + -0.049965464, + 0.106085554, + 0.0105728125, + 0.028446438, + 0.012516686, + 0.02272991, + -0.0699857, + 0.0090155825, + -0.047980662, + 0.026107809, + -0.015327817, + -0.024888223, + -0.048073135, + -0.021106714, + -0.035433546, + -0.06532197, + 0.046712816, + 0.05556861, + 0.026862264, + -0.016994625, + -0.018469553, + 0.022816217, + -0.004126572, + 0.0112463245, + -0.041334957, + 0.013304708, + -0.040029723, + -0.023817563, + 0.031692363, + -0.03722668, + -0.0014856787, + 0.0038255276, + -0.04752098, + -0.02851394, + -0.061403427, + 0.008843585, + 0.017438399, + 0.07924388, + -0.022398552, + -0.023760876, + 0.012586873, + 0.00013913387, + -0.017331297, + -0.023813803, + -0.05011878, + -0.03890656, + 0.04468097, + 0.064255364, + -0.008867073, + -0.048514213, + 0.039790582, + 0.026003322, + 0.027585011, + 0.050736748, + -0.0406184, + 0.0036706005, + 0.011977381, + -0.027149582, + 0.0045547825, + -0.019476876, + -0.024368003, + -0.012050432, + -0.020125346, + 0.064718515, + -0.04762536, + -0.016224585, + 0.030977147, + 0.008130414, + 0.0003577489, + -0.009716708, + 0.047520906, + -0.023345266, + 0.07156089, + 0.00560899, + -0.059684724, + 0.009787788, + -0.039778, + -0.047962077, + 0.0151202, + 0.021638919, + 0.009691277, + 0.011461687, + -0.058961295, + -0.0021215482, + -0.020346558, + 0.031748556, + 0.01978428, + 0.04272435, + 0.059866656, + -0.028556414, + 0.053447437, + -0.050291624, + 0.043037664, + -0.05916949, + 0.006200961, + 0.032881115, + 0.029740918, + 0.04163254, + -0.07064391, + 0.017124165, + -0.026459662, + -0.017939264, + -0.0049217865, + 0.004892696, + -0.02395917, + -0.039323617, + -0.04584698, + -0.01582084, + 0.0040600323, + 0.021148082, + 0.045447603, + -0.0034679722, + -0.0022344757, + -0.013239739, + -0.056449797, + -0.013114313, + -0.03516612, + 0.04855227, + -0.022413462, + -0.023173615, + -0.05311571, + 0.050527163, + 0.10950742, + 0.025504153, + -0.07088534, + -0.013840008, + 0.014794675, + -0.048666134, + -0.004081256, + 0.03079063, + 0.03826126, + -0.004722943, + -0.037695494, + -0.0012323718, + 0.011781598, + -0.0008649358, + 0.009486067, + -0.047584575, + -0.032011673, + -0.0071835704, + -0.026329862, + 0.0610994, + 0.005951907, + -0.05746216, + 0.049042497, + 0.01942778, + 0.02466324, + 0.037137028, + -0.005733832, + 0.0050964127, + 0.011975964, + 0.01827365, + 0.0364417, + 0.0054482464, + 0.017727714, + 0.026096473, + -0.03864051, + -0.027607258, + 0.064083986, + -0.021064874, + -0.07236599, + -0.009461691, + -0.004503321, + 0.07727144, + -0.021993937, + -0.041066013, + 0.007837953, + -0.012733127, + -0.023929356, + 0.024026997, + 0.029644636, + -0.03580834, + 0.049579863, + -0.008306231, + 0.0033716194, + 0.023994723, + 0.0016040959, + -0.06757932, + -0.01725457, + -0.0018347696, + -0.014079332, + -0.037564423, + 0.0021168434, + 0.022626605, + 0.017065872, + 0.028187625, + -0.017432727, + -0.00060995156, + -0.0050884592, + -0.026294366, + -0.005138151, + 0.024878688, + -0.047285795, + -0.05343155, + -0.05923142, + -0.048198592, + 0.029171238, + -0.014015087, + 0.034630585, + 0.017745048, + 0.004982567, + -0.029875325, + 0.016022105, + -0.011249133, + -0.022620039, + 0.050667416, + -0.055142168, + 0.053712547, + 0.05209018, + -0.0030329423, + -0.03460956, + -0.008600882, + 0.03018812, + 0.03301259, + 0.055056907, + 0.016398128, + -0.051274415, + -0.012549744, + -0.0131849535, + -0.020003958, + 0.021637436, + 0.0044468357, + -0.016667124, + -0.014434915, + -0.020033175, + 0.011097635, + -0.0104253795, + 0.040533286, + -0.0003543454, + 0.018132562, + 0.016767971, + -0.02853769, + -0.03855733, + -0.051239323, + -0.03282561, + -0.022864738, + -0.020809682, + 0.0331824, + -0.03188178, + -0.029670365, + -0.014644772, + -0.032294247, + 0.052761924, + 0.020352883, + -0.04178145, + -0.025883485, + -0.009779321, + -0.035340283, + -4.3197328e-05, + 0.014557154, + -0.026777798, + 0.03430408, + -0.013001561, + -0.0180639, + -0.017124854, + -0.012680865, + -0.033448033, + 0.006832241, + 0.018108014, + -0.029847402, + 0.029681118, + -0.0019150219, + 0.010268849, + 0.02234804, + -0.044627994, + 0.014515216, + -0.024069967, + 0.040975504, + 0.018334284, + 0.06858303, + 0.031183977, + -0.018035553, + 0.0012376573, + -0.040480535, + 0.011860962, + 0.008761476, + 0.013253703, + 0.048430983, + 0.024999872, + 0.003414671, + 0.036289666, + 0.005700741, + -0.037498105, + 0.007829068, + -0.031861316, + 0.04227996, + 0.026684696, + -0.020258412, + -0.04468171, + 0.02324706, + 0.011862285, + -0.0061922455, + -0.008237774, + -0.0097581735, + 0.011954634, + -0.044554517, + 0.064815395, + 0.034289274, + 0.021234674, + -0.006408982, + -0.0070845615, + 0.09382454, + 0.048409455, + -0.05691485, + -0.026065106, + 0.010707884, + 0.0017449469, + -0.0078919, + 0.030506298, + 0.01389418, + 0.008356455, + 0.012116216, + -0.044730872, + -0.04150543, + -0.013844061, + -0.0045930077, + 0.0221899, + 0.03366275, + -0.03881418, + -0.044890568, + -0.00854704, + 0.01113163, + 0.056899447, + 0.0049619614, + -0.009287256, + -0.04973473, + -0.002274902, + -0.010802974, + 0.019276256, + 0.051969297, + -0.062228583, + -0.015458839, + 0.0016319213, + 0.011429133, + 0.037918244, + -0.004828408, + -0.035008963, + 0.017727211, + -0.0029278435, + 0.029832216, + 0.025300818, + -0.085215725, + 0.028157715, + -0.037113056, + 0.022304408, + -0.016299961, + -0.037999555, + -0.004712907, + 0.046835583, + 0.055619333, + 3.6547885e-05, + 0.05205659, + 0.047921646, + 0.008702412, + -0.05138415, + -0.020239344, + 0.039232746, + 0.06896306, + 0.058982562, + 0.03473404, + -0.056870822, + 0.024006031, + -0.013754174, + 0.024787294, + 0.05111505, + 0.0111331595, + 0.07829041, + -0.05210541, + -0.08635686, + 0.0026925444, + 0.028652523, + 0.0054272353, + 0.022821547, + -0.038695633, + -0.064750284, + 0.03735705, + -0.035864174, + -0.019625148, + 0.019032817, + -0.015487316, + 0.010431493, + 0.060512472, + -0.023324054, + 0.02824, + 0.04017302, + 0.024951972, + -0.026328666, + -0.057480592, + -0.027944664, + -0.027240178, + 0.10017138, + 0.055556547, + 0.005724635, + -0.0664801, + -0.037868008, + -0.0064106854, + -0.031640884, + 0.05590782, + -0.018710261, + 0.009431387, + 0.032639552, + -0.025173835, + 0.032886345, + 0.03646426, + 0.0029133258, + -0.041243024, + -0.07930791, + -0.075010434, + -0.074865736, + -0.006846306, + 0.045394387, + -0.0069568427, + -0.02888041, + 0.055638384, + -0.004655212, + 0.021350808, + 0.027616587, + -0.02519815, + 0.050839994, + -0.058958888, + -0.06744275, + 0.06294673, + 0.017970167, + 0.03081954, + 0.039258115, + 0.030206023, + 0.037268274, + -0.12227476, + -0.027840136, + 0.031151181, + -0.02353207, + -0.0045231637, + -0.0029906975, + 0.038490243, + -0.035881314, + 0.0012044089, + -0.06954653, + -0.001324146, + -0.008361788, + -0.01764601, + 0.011135384, + 0.009530937, + 0.07548827, + 0.026028562, + -0.0050113667, + 0.046487052, + 0.010139422, + 0.013521331, + 0.016400773, + 0.044519138, + 0.010799146, + 0.033334833, + 0.02863783, + -0.0137955565, + 0.013563769, + -0.01717276, + 0.026185095, + -0.018329982, + 0.015020572, + 0.009428841, + 0.0706339, + -0.036201842, + -0.027024077, + -0.019520734, + -0.008670405, + -0.024960307, + -0.026179617, + 0.026087483, + -0.05252428, + -0.0229573, + -0.035547692, + -0.01852853, + 0.043040182, + 0.0037711465, + 0.08104828, + -0.0009224388, + -0.031166729, + 0.016368993, + 0.008481886, + 0.014682696, + 0.06879207, + 0.07771774, + 0.034957133, + -0.04902316, + -0.0067222845, + -0.0150945, + -0.011978907, + -0.019786322, + -0.031629253, + 0.007955772, + 0.0036231026, + -0.046276536, + 0.01276116, + -0.052814208, + 0.036858033, + -0.016896809, + 0.011148679, + -0.009529029, + -0.022465233, + -0.004244614, + 0.008439518, + -0.005623781, + -0.028603744, + -0.034281965, + -0.010800054, + -0.032598462, + -0.025653053, + 0.038314216, + -0.0288694, + 0.0009420499, + 0.035861664, + -0.00015698255, + -0.057694875, + -0.00212551, + 0.0697879, + -0.07035993, + -0.015376516, + 0.1053229, + -0.0030419535, + 0.056434374, + 0.034484025, + -0.003987501, + -0.037906058, + 0.022804463, + -0.00015382255, + 0.012649136, + 0.041817613, + -0.0030757599, + 0.03920111, + -0.008302305, + -0.022637676, + 0.011213054, + -0.03463392, + -0.062593475, + 0.04490034, + -0.049543373, + 0.03427962, + -0.012201502, + -0.03728584, + -0.024322258, + 0.057880796, + 0.028249184, + -0.020159418, + 0.029815175, + -0.070027076, + -0.034782086, + -0.009831017, + 0.04126681, + 0.0102781225, + 0.0045355903, + 0.0022249392, + 0.021429095, + 0.029994996, + -0.028526725, + -0.02694864, + 0.020876277, + 0.051576857, + -0.02663821, + 0.007916328, + 0.031338222, + 0.0011062028, + -0.021790367, + 0.04348595, + 0.04889843, + 0.043898094, + 0.015051696, + -0.0031638998, + 0.027447224, + 0.004035756, + -0.02270146, + 0.009923461, + 0.0071001905, + -0.0024750312, + -0.004354693, + -0.011137099, + 0.022133583, + 0.007143121, + -0.006542333, + -0.0035875533, + -0.03104829, + -0.023976129, + -0.034237478, + 0.00353826, + 0.046956386, + 0.047808655, + -0.009622124, + -0.019816758, + 0.036042444, + 0.0074496916, + 0.015117541, + -0.0069881775, + -0.020962749, + -0.027847344, + -0.0110671045, + 0.051426794, + -0.011348545, + -0.017289529, + -0.017414175, + 0.0044310116, + 0.00334495, + -0.02571939, + -0.08204306, + -0.03615147, + -0.04363827, + -0.018072678, + 0.0042690565, + -0.023174929, + 0.001252396, + 0.029551307, + 0.019155787, + 0.027948458, + 0.025480693, + -0.010069296, + 0.017918479, + -0.02440271, + 0.045908872, + 0.018629733, + -0.028871888, + 0.0032536213, + -0.012329758, + -0.033727482, + -0.021467274, + -0.03815194, + -0.033245903, + -0.034001675, + 0.01439367, + -0.025495326, + -0.0057980763, + 0.013447159, + -0.0061734873, + -0.03993734, + 0.04075683, + -0.020366007, + 0.0036329266, + -0.048996653, + -0.008861363, + -0.012075161, + 0.02958152, + 0.04170489, + -0.11561458, + 0.00078936014, + 0.014332291, + -0.03146352, + -0.015674343, + -0.014992681, + 0.009472547, + -0.0041671344, + -0.021322032, + -0.0016242207, + -0.03700226, + -0.11647651, + -0.006232428, + -0.031109286, + 0.014464355, + 0.034407333, + 0.024211535, + 0.06314624, + -0.01320869, + -0.0028783486, + 0.08477521, + 0.026424106, + -0.04939683, + -0.035553195, + -0.012495481, + -0.016439108, + -0.010666291, + -0.012672077, + 0.0020947906, + -0.024717389, + 0.0035311815, + 0.07439823, + 0.035552412, + -0.019250356, + -0.014858424, + 0.007450147, + -0.054126002, + 0.0117400475, + -0.0292314, + -0.020184005, + -0.010763533 + ], + "index": 0, + "object": "embedding" + } + ], + "model": "nomic-embed-text:137m-v1.5-fp16", + "object": "list", + "usage": { + "prompt_tokens": 6, + "total_tokens": 6 + } + } + }, + "is_streaming": false + } +} diff --git a/tests/integration/recordings/responses/bd356b27a085.json b/tests/integration/recordings/responses/bd356b27a085.json deleted file mode 100644 index f372e5af9..000000000 --- a/tests/integration/recordings/responses/bd356b27a085.json +++ /dev/null @@ -1,167 +0,0 @@ -{ - "request": { - "method": "POST", - "url": "http://localhost:11434/api/generate", - "headers": {}, - "body": { - "model": "llama3.2:3b-instruct-fp16", - "raw": true, - "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant. You have access to functions, but you should only use them if they are required.\nYou are an expert in composing functions. You are given a question and a set of possible functions.\nBased on the question, you may or may not need to make one function/tool call to achieve the purpose.\n\nIf you decide to invoke any of the function(s), you MUST put it in the format of [func_name1(params_name1=params_value1, params_name2=params_value2...), func_name2(params)]\nIf you decide to invoke a function, you SHOULD NOT include any other text in the response. besides the function call in the above format.\nFor a boolean parameter, be sure to use `True` or `False` (capitalized) for the value.\n\n\nHere is a list of functions in JSON format that you can invoke.\n\n[\n {\n \"name\": \"greet_everyone\",\n \"description\": \"\",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"url\"],\n \"properties\": {\n \"url\": {\n \"type\": \"string\",\n \"description\": \"\"\n }\n }\n }\n },\n {\n \"name\": \"get_boiling_point\",\n \"description\": \"\n Returns the boiling point of a liquid in Celsius or Fahrenheit.\n\n :param liquid_name: The name of the liquid\n :param celsius: Whether to return the boiling point in Celsius\n :return: The boiling point of the liquid in Celcius or Fahrenheit\n \",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"liquid_name\", \"celsius\"],\n \"properties\": {\n \"liquid_name\": {\n \"type\": \"string\",\n \"description\": \"\"\n },\n \"celsius\": {\n \"type\": \"boolean\",\n \"description\": \"\"\n }\n }\n }\n }\n]\n\nYou can answer general questions or invoke tools when necessary.\nIn addition to tool calls, you should also augment your responses by using the tool outputs.\nYou are a helpful assistant.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nSay hi to the world. Use tools to do so.<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n[greet_everyone(url=\"world\")]<|eot_id|><|start_header_id|>ipython<|end_header_id|>\n\nHello, world!<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", - "options": { - "temperature": 0.0 - }, - "stream": true - }, - "endpoint": "/api/generate", - "model": "llama3.2:3b-instruct-fp16" - }, - "response": { - "body": [ - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:34:22.916043Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "How", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:34:22.957379Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " can", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:34:23.00029Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " I", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:34:23.043332Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " assist", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:34:23.085324Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " you", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:34:23.128181Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " further", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:34:23.172026Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "?", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:34:23.216706Z", - "done": true, - "done_reason": "stop", - "total_duration": 516060000, - "load_duration": 127260334, - "prompt_eval_count": 479, - "prompt_eval_duration": 87107292, - "eval_count": 8, - "eval_duration": 299381042, - "response": "", - "thinking": null, - "context": null - } - } - ], - "is_streaming": true - } -} diff --git a/tests/integration/recordings/responses/bd656a9e3f8f.json b/tests/integration/recordings/responses/bd656a9e3f8f.json new file mode 100644 index 000000000..35a201532 --- /dev/null +++ b/tests/integration/recordings/responses/bd656a9e3f8f.json @@ -0,0 +1,806 @@ +{ + "request": { + "method": "POST", + "url": "http://0.0.0.0:11434/v1/v1/embeddings", + "headers": {}, + "body": { + "model": "nomic-embed-text:137m-v1.5-fp16", + "input": [ + "What makes Python different from other languages?" + ], + "encoding_format": "float" + }, + "endpoint": "/v1/embeddings", + "model": "nomic-embed-text:137m-v1.5-fp16" + }, + "response": { + "body": { + "__type__": "openai.types.create_embedding_response.CreateEmbeddingResponse", + "__data__": { + "data": [ + { + "embedding": [ + 0.0046769786, + 0.083690464, + -0.11982049, + -0.050078377, + 0.07618569, + 0.055943117, + -0.06147888, + -0.006356616, + -0.02980319, + -0.04645953, + -0.020679861, + 0.04556243, + 0.057300676, + -0.0035848457, + 0.0230642, + -0.09632374, + 0.026833246, + -0.06233201, + 0.020290313, + 0.10720468, + -0.024168964, + -0.0012473708, + 0.004914762, + -0.02155512, + 0.08849714, + -0.007135749, + -0.0038326771, + 0.0069581103, + -0.0074268873, + 0.013409611, + 0.010099577, + -0.025109533, + -0.003233865, + -0.007914921, + -0.020222431, + -0.03304812, + 0.056438155, + -0.02873586, + 0.023246638, + 0.06580444, + -0.017076816, + 0.032818917, + 0.033706866, + 0.027439306, + 0.08495476, + -0.059326306, + -0.028659344, + -0.009344298, + -0.00028624074, + -0.022933884, + -0.00515618, + -0.049101423, + -0.05928526, + -0.023545984, + 0.081459105, + 0.021571912, + -0.016101, + 0.040869456, + 0.056534253, + -0.030151509, + 0.009962059, + 0.036012027, + -0.07711307, + 0.08302933, + 0.0227325, + -0.02606058, + 0.009178087, + 0.053695664, + -0.038264044, + 0.0068369326, + 0.0065288646, + -0.0552765, + 0.03865418, + -0.01567221, + -0.060309917, + 0.0010711496, + -0.047535334, + -0.030803464, + 0.0045822156, + 0.07728093, + -0.011466593, + 0.054215208, + -0.021875659, + 0.023540711, + 0.01867942, + -0.017167076, + 0.019128326, + 0.008091631, + -0.03849017, + 0.04898976, + -0.028525505, + -0.065653615, + 0.027817613, + 0.03276224, + -0.09881923, + 0.04162109, + -0.032707293, + 0.047908768, + 0.015856905, + -0.023583382, + 0.031512305, + 0.014515255, + 0.041903667, + -0.046402343, + 0.045323893, + 0.018747462, + -0.0013544654, + -0.019731803, + -0.06693634, + -0.023983508, + 0.01199707, + 0.051562272, + -0.04148846, + -0.02059173, + -0.0023412316, + -0.013479597, + 0.03306875, + -0.024780301, + 0.04983078, + 0.0022185023, + -0.0014982268, + -0.038073156, + -0.025834907, + 0.007876299, + -0.019942068, + 0.02281191, + 0.008688617, + -0.0060313637, + 0.043387514, + -0.040785804, + 0.05154224, + -0.005883679, + -0.049592912, + 0.0010802841, + -0.008244391, + 0.0059353155, + -0.03393454, + -0.025106676, + 0.0619323, + 0.0072672744, + 0.03592506, + 0.020506766, + -0.025028136, + -0.034375858, + 0.025218893, + -0.035614785, + 0.015943734, + 0.02356935, + -0.034355003, + 0.042679872, + 0.018376308, + 0.04828793, + 0.013157428, + 0.082592666, + -0.0032569305, + 0.0036007413, + 0.0014685044, + 0.026219074, + 0.033264782, + -0.017953578, + 0.06869738, + -0.038852017, + 0.0011227716, + 0.061297636, + -0.018883126, + -0.025346823, + 0.023695529, + 0.016965017, + -0.027433833, + -0.018658942, + -0.038259037, + -0.0201669, + -0.010763363, + -0.017361904, + 0.0027696996, + 0.032333463, + -0.0059774434, + -0.057706878, + 0.053628284, + -0.01144307, + -0.029257657, + -0.056920953, + 0.033485316, + 0.013542015, + -0.018080134, + 0.043140866, + -0.0034580003, + -0.037477978, + -0.058190405, + -0.035952277, + -0.0014575764, + 0.023698332, + -0.052652635, + -0.06774504, + -0.04264479, + -0.038268574, + -0.03422374, + -0.02019695, + -0.0007224252, + -0.05120822, + -0.09243153, + 0.017078334, + -0.055175755, + -0.027441327, + -0.0548805, + 0.00024373078, + -0.056404747, + 0.01639788, + -0.008110089, + 0.017016128, + 0.06111775, + -0.019643141, + -0.028601874, + 0.017119596, + 0.007050336, + -0.03558983, + 0.019803075, + 0.0048035244, + 0.025111655, + 0.023278559, + 0.042801682, + -0.024930278, + -0.002696923, + 0.0003183538, + 0.022027316, + 0.0038433624, + -0.04479033, + 0.0047468934, + -0.044116203, + 0.03062775, + -0.019926922, + -0.08737841, + 0.046494182, + 0.036260393, + 0.006753454, + 0.03020523, + 0.080529645, + 0.033337522, + 0.0046576452, + -0.041016728, + -0.005623168, + -0.045591753, + -0.02996265, + 0.051140346, + -0.019263566, + -0.016980316, + -0.01215931, + -0.010660377, + -0.039426908, + 0.024758589, + -0.06272833, + -0.00047994126, + -0.019837916, + 0.053189985, + 0.018557988, + -0.0043275678, + 0.029666577, + -0.01110632, + 0.04881236, + -0.007268525, + 0.002341546, + -0.030267036, + -0.017919833, + 0.017845307, + -0.016560584, + 0.030018363, + -0.022505458, + 0.01932259, + -0.012229639, + -0.042308196, + -0.016230695, + 0.04054133, + 0.0012926994, + -0.01997304, + -0.03386475, + 0.011195352, + 0.050117347, + -0.030581629, + 0.003925074, + 0.0113576995, + -0.012875149, + -0.018951226, + -0.06956738, + 0.001481844, + 0.0062846313, + 0.042127434, + 0.037737373, + -0.015525513, + -0.01635555, + -0.0196644, + 0.0549525, + 0.0015289227, + -0.033364024, + -0.01210342, + 0.027240155, + 0.0204516, + 0.01342817, + 0.013682366, + 0.015533677, + -0.028971234, + 0.0049345517, + 0.025192147, + 0.071041234, + 0.07579864, + 0.04159731, + -0.03599089, + 0.023011135, + -0.022844052, + 0.034056503, + 0.00611017, + -0.008533525, + 0.006296338, + -0.025676649, + 0.054880682, + -0.055116627, + 0.07243938, + 0.014162865, + 0.030842772, + 0.04110178, + -0.007569799, + -0.0627285, + -0.09811596, + 0.013354445, + -0.035387635, + 0.012455037, + 0.023508446, + -0.01517758, + 0.031200051, + -0.038080446, + -0.023632461, + -0.01313721, + 0.044724084, + 0.01079242, + -0.042577203, + -0.093014725, + 0.021853799, + 0.017237827, + 0.00835688, + 0.038274225, + -0.003030852, + 0.033847835, + -0.0098942295, + 0.022144467, + -0.012889256, + -0.05197047, + -0.033751793, + 0.014369912, + -0.0348941, + 0.03833189, + 0.05389039, + -0.019246621, + 0.029542712, + -0.0066530085, + 0.012444892, + 0.008934373, + -0.038265448, + 0.014598134, + 0.005870603, + -0.024180869, + -0.0013095264, + 0.07556661, + -0.023697974, + 0.015573299, + -0.04490378, + -0.021133035, + 0.029217301, + 0.03514109, + -0.036599603, + -0.01649445, + -0.035163913, + -0.06490779, + 0.00017416089, + -0.03385753, + -0.0057173762, + 0.022871815, + 0.0011777632, + -0.05306106, + 0.01771125, + -0.032820936, + 0.023362804, + 0.0029813135, + -0.04775915, + -0.035883203, + -0.0013802864, + 0.018004319, + -0.06613522, + -0.026787223, + 0.015061619, + 0.0048732595, + 0.011704616, + 0.0068861824, + -0.034187183, + -0.03897478, + 0.043694627, + 0.048718087, + -0.016888587, + 0.066222705, + 0.007551523, + -0.0071170144, + 0.013470767, + -0.09279557, + -0.073159575, + 0.022802284, + -0.06531729, + -0.017087476, + -0.0062160357, + 0.025067216, + -0.0141074145, + 0.027660044, + -0.019831946, + -0.014867193, + 0.013818542, + 0.021023916, + -0.012632161, + -0.04154114, + 0.023770317, + 0.032076716, + 0.039769586, + -0.050506808, + -0.034958333, + 0.019621266, + 0.03992471, + -0.01429077, + 0.006854892, + 0.04805887, + 0.0347616, + -0.00159377, + 0.046118367, + -0.008223981, + -0.063480705, + 0.049171273, + 0.045540314, + 0.041054647, + -0.0044349367, + -0.00057917647, + -0.011215353, + 0.020706484, + 0.020172067, + 0.0001999814, + 0.07558801, + 0.056141127, + 0.0021616986, + -0.06750322, + -0.03253715, + 0.03148045, + 0.07361791, + 0.048109554, + 0.0015175714, + -0.08388102, + 0.052223753, + -0.021618556, + 0.0011163169, + 0.03180002, + 0.014868306, + 0.07418754, + -0.001809872, + 0.007974625, + -0.019393556, + -0.0064754495, + 0.0058915988, + 0.007833064, + -0.029894123, + -0.03208613, + 0.015242572, + -0.007863448, + 0.011586947, + -0.011296612, + 0.019095654, + 0.011060441, + 0.036481753, + -0.021954166, + 0.043565758, + 0.026696721, + -0.015212072, + -0.01388709, + -0.005076162, + -0.004764351, + 0.02277143, + 0.015940938, + -0.012273592, + -0.0113236215, + -0.009349015, + -0.023159903, + 0.034299444, + 0.0051811906, + 0.02457953, + -0.00336759, + -0.010487071, + 0.0027819932, + -0.0166476, + 0.051722072, + 0.01953157, + 0.042633582, + -0.0075797215, + -0.0037860046, + -0.0019558403, + 0.02796527, + 0.07925882, + 0.08442935, + 0.03597555, + 0.035355035, + 0.04274225, + -0.028919257, + -0.01390327, + 0.05817449, + -0.01081491, + 0.08801434, + -0.01752534, + -0.012958594, + 0.015158736, + 0.022571595, + -0.031161658, + -0.01663387, + 0.03960943, + 0.070396766, + -0.019201908, + 0.017662441, + -0.01813925, + -0.04914818, + -0.022708714, + 0.003170524, + -0.05194188, + 0.018866621, + -0.047192633, + -0.031068562, + 0.015747234, + 0.021172306, + -0.043017026, + -0.04114877, + -0.008187472, + 0.03578638, + 0.0014854743, + -0.0091289375, + 0.030439813, + -0.006482316, + -0.048376027, + -0.048143737, + 0.05094739, + 0.0019916256, + -0.019090299, + 0.09083704, + -0.011921242, + 0.01555412, + 0.014025174, + 0.03928094, + 0.016697882, + 0.008364265, + -0.0044548362, + -0.021938786, + -0.049410958, + 0.057301793, + -0.012661886, + 0.014062223, + 0.0046853907, + 0.008254278, + -0.043336876, + 0.0006073866, + -0.0042262096, + -0.02371089, + -0.050750397, + -0.007564976, + 0.010089996, + 0.02333583, + -0.0052094185, + 0.03494318, + -0.0021578325, + -0.036945812, + 0.013057502, + -0.01541567, + 0.023513883, + -0.03691971, + -0.017823482, + 0.025533495, + 0.0035812904, + 0.008482279, + -0.0016294529, + -0.027481427, + -0.028350944, + -0.04687361, + -0.0009943155, + 0.014044526, + -0.030604992, + -0.0043712286, + 0.028413586, + -0.024108026, + -0.005640293, + 0.0015994613, + 0.0014173193, + 0.013369295, + -0.02437893, + -0.013210499, + -0.017440163, + 0.020522058, + -0.018700741, + 0.0011646106, + 0.0008340312, + -0.10092263, + -0.02366156, + -0.013975101, + -0.05893237, + 0.034923963, + 0.016745148, + 0.07198604, + -0.010349937, + 0.0020174542, + 0.10199023, + -0.020444227, + 0.03846847, + 0.00402589, + -0.016277963, + 0.038777675, + 0.027252568, + -0.017871046, + 0.002508591, + 0.0016636356, + -0.081348985, + 0.01521606, + 0.026763946, + -0.0026202078, + -0.021634903, + 0.019835912, + -0.056225803, + -0.009446153, + -0.04976095, + 0.07484465, + -0.0064382763, + -0.10152314, + 0.02162658, + 0.0162603, + 0.034870964, + -0.019684168, + 0.038379937, + -0.07608127, + 0.01170732, + -0.024826946, + 0.0028120677, + -0.044688802, + 0.00983268, + 0.0083624115, + 0.029636618, + 0.03864257, + -0.032289203, + 0.032004982, + -0.01724803, + 0.05689035, + 0.025517073, + 0.049366903, + 0.036741164, + -0.020827103, + -0.02858439, + 0.039771907, + 0.06253526, + 0.009690641, + 0.016788358, + 0.03696011, + 0.024056204, + 0.04996488, + -0.029877296, + -0.05051683, + -0.005531692, + -0.016483683, + -0.013373561, + -0.045278877, + 0.07791228, + 0.06894905, + 0.025117228, + -0.029928861, + -0.0034376658, + -0.06184184, + 0.009840523, + 0.0073680477, + -0.012487849, + -0.0033177931, + -0.03780593, + 0.030924184, + 0.03155251, + 0.012302111, + -0.0058943485, + -0.0511734, + 0.002576594, + 0.034169413, + -0.0012890521, + -0.0011859316, + 0.0019937826, + -0.012383855, + -0.03501946, + 0.015286534, + -0.035822354, + -0.024596563, + -0.0588515, + -0.0075659747, + -0.04447766, + -0.0053720693, + 0.026699372, + 0.0029689881, + -0.011552407, + 0.0004428281, + -0.0026276393, + -0.0118419165, + 0.03530749, + 0.041233983, + 0.009662047, + 0.006017802, + 0.020814791, + -0.011202684, + 0.010287828, + 0.018114299, + 0.03387944, + -0.018922666, + -0.019546792, + 0.014142722, + 0.024568362, + 0.04800171, + 0.039308336, + 0.036034845, + 2.7852648e-06, + -0.048231635, + -0.084290236, + -0.06439334, + -0.007185233, + 0.06345774, + -0.04148515, + -0.053612724, + -0.028786143, + 0.014472016, + -0.022519154, + 0.019259013, + -0.064776696, + 0.00025910756, + 0.041818283, + -0.010330904, + 0.021645231, + -0.04928375, + 0.025375145, + -0.05574737, + 0.031576894, + -0.0131033845, + -0.04442265, + -0.06874675, + -0.048191894, + -0.027934281, + 0.07388608, + 0.003174666, + 0.0461046, + -0.035721015, + -0.024965782, + -0.013885509, + 0.08637276, + 0.0209963, + -0.0411877, + -0.017168613, + -0.029813036, + -0.05661447, + 0.08469515, + -0.027904486, + 0.007161427, + -0.026347049, + 0.0725012, + 0.06476124, + -0.012442011, + 0.00563372, + 0.0109798275, + 0.014453135, + 0.011751716, + -0.015325462, + 0.03465245, + -0.034183756, + -0.028540483 + ], + "index": 0, + "object": "embedding" + } + ], + "model": "nomic-embed-text:137m-v1.5-fp16", + "object": "list", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + }, + "is_streaming": false + } +} diff --git a/tests/integration/recordings/responses/c2ac76cbf66d.json b/tests/integration/recordings/responses/c2ac76cbf66d.json index 34f0c4a1d..496f41815 100644 --- a/tests/integration/recordings/responses/c2ac76cbf66d.json +++ b/tests/integration/recordings/responses/c2ac76cbf66d.json @@ -21,7 +21,7 @@ "body": { "__type__": "openai.types.chat.chat_completion.ChatCompletion", "__data__": { - "id": "chatcmpl-963", + "id": "chatcmpl-876", "choices": [ { "finish_reason": "stop", @@ -38,7 +38,7 @@ } } ], - "created": 1759245073, + "created": 1759282400, "model": "llama-guard3:1b", "object": "chat.completion", "service_tier": null, diff --git a/tests/integration/recordings/responses/c31a86ea6c58.json b/tests/integration/recordings/responses/c31a86ea6c58.json deleted file mode 100644 index b8d109ddd..000000000 --- a/tests/integration/recordings/responses/c31a86ea6c58.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "request": { - "method": "POST", - "url": "http://localhost:11434/api/generate", - "headers": {}, - "body": { - "model": "llama3.2:3b", - "raw": true, - "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nTest metrics generation 0<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", - "options": { - "temperature": 0.0 - }, - "stream": false - }, - "endpoint": "/api/generate", - "model": "llama3.2:3b" - }, - "response": { - "body": { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b", - "created_at": "2025-08-11T15:56:06.703788Z", - "done": true, - "done_reason": "stop", - "total_duration": 2722294000, - "load_duration": 9736083, - "prompt_eval_count": 21, - "prompt_eval_duration": 113000000, - "eval_count": 324, - "eval_duration": 2598000000, - "response": "Here are some test metrics that can be used to evaluate the performance of a system:\n\n1. **Accuracy**: The proportion of correct predictions made by the model.\n2. **Precision**: The ratio of true positives (correctly predicted instances) to total positive predictions.\n3. **Recall**: The ratio of true positives to the sum of true positives and false negatives (missed instances).\n4. **F1-score**: The harmonic mean of precision and recall, providing a balanced measure of both.\n5. **Mean Squared Error (MSE)**: The average squared difference between predicted and actual values.\n6. **Mean Absolute Error (MAE)**: The average absolute difference between predicted and actual values.\n7. **Root Mean Squared Percentage Error (RMSPE)**: A variation of MSE that expresses the error as a percentage.\n8. **Coefficient of Determination (R-squared, R2)**: Measures how well the model explains the variance in the data.\n9. **Mean Absolute Percentage Error (MAPE)**: The average absolute percentage difference between predicted and actual values.\n10. **Mean Squared Logarithmic Error (MSLE)**: A variation of MSE that is more suitable for skewed distributions.\n\nThese metrics can be used to evaluate different aspects of a system's performance, such as:\n\n* Classification models: accuracy, precision, recall, F1-score\n* Regression models: MSE, MAE, RMSPE, R2, MSLE\n* Time series forecasting: MAPE, RMSPE\n\nNote that the choice of metric depends on the specific problem and data.", - "thinking": null, - "context": null - } - }, - "is_streaming": false - } -} diff --git a/tests/integration/recordings/responses/c7582fa7c2c4.json b/tests/integration/recordings/responses/c7582fa7c2c4.json deleted file mode 100644 index d1edd7336..000000000 --- a/tests/integration/recordings/responses/c7582fa7c2c4.json +++ /dev/null @@ -1,347 +0,0 @@ -{ - "request": { - "method": "POST", - "url": "http://localhost:11434/api/generate", - "headers": {}, - "body": { - "model": "llama3.2:3b-instruct-fp16", - "raw": true, - "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant. You have access to functions, but you should only use them if they are required.\nYou are an expert in composing functions. You are given a question and a set of possible functions.\nBased on the question, you may or may not need to make one function/tool call to achieve the purpose.\n\nIf you decide to invoke any of the function(s), you MUST put it in the format of [func_name1(params_name1=params_value1, params_name2=params_value2...), func_name2(params)]\nIf you decide to invoke a function, you SHOULD NOT include any other text in the response. besides the function call in the above format.\nFor a boolean parameter, be sure to use `True` or `False` (capitalized) for the value.\n\n\nHere is a list of functions in JSON format that you can invoke.\n\n[\n {\n \"name\": \"greet_everyone\",\n \"description\": \"\",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"url\"],\n \"properties\": {\n \"url\": {\n \"type\": \"string\",\n \"description\": \"\"\n }\n }\n }\n },\n {\n \"name\": \"get_boiling_point\",\n \"description\": \"\nReturns the boiling point of a liquid in Celsius or Fahrenheit.\n\n:param liquid_name: The name of the liquid\n:param celsius: Whether to return the boiling point in Celsius\n:return: The boiling point of the liquid in Celcius or Fahrenheit\n\",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"liquid_name\", \"celsius\"],\n \"properties\": {\n \"liquid_name\": {\n \"type\": \"string\",\n \"description\": \"\"\n },\n \"celsius\": {\n \"type\": \"boolean\",\n \"description\": \"\"\n }\n }\n }\n }\n]\n\nYou can answer general questions or invoke tools when necessary.\nIn addition to tool calls, you should also augment your responses by using the tool outputs.\nYou are a helpful assistant.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nSay hi to the world. Use tools to do so.<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n[greet_everyone(url=\"world\")]<|eot_id|><|start_header_id|>ipython<|end_header_id|>\n\nHello, world!<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\nHow can I assist you further?<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhat is the boiling point of polyjuice? Use tools to answer.<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", - "options": { - "temperature": 0.0 - }, - "stream": true - }, - "endpoint": "/api/generate", - "model": "llama3.2:3b-instruct-fp16" - }, - "response": { - "body": [ - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-07-29T23:26:18.64197Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "[", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-07-29T23:26:18.687885Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "get", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-07-29T23:26:18.73112Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "_bo", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-07-29T23:26:18.774191Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "iling", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-07-29T23:26:18.816695Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "_point", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-07-29T23:26:18.859121Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "(", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-07-29T23:26:18.901585Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "liquid", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-07-29T23:26:18.943788Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "_name", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-07-29T23:26:18.986429Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "='", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-07-29T23:26:19.029894Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "poly", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-07-29T23:26:19.073113Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "ju", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-07-29T23:26:19.116671Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "ice", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-07-29T23:26:19.159456Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "',", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-07-29T23:26:19.203354Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " c", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-07-29T23:26:19.246192Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "elsius", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-07-29T23:26:19.290499Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "=True", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-07-29T23:26:19.334562Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ")]", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-07-29T23:26:19.380415Z", - "done": true, - "done_reason": "stop", - "total_duration": 881889250, - "load_duration": 69966916, - "prompt_eval_count": 503, - "prompt_eval_duration": 70368167, - "eval_count": 18, - "eval_duration": 740885458, - "response": "", - "thinking": null, - "context": null - } - } - ], - "is_streaming": true - } -} diff --git a/tests/integration/recordings/responses/c8234a1171f3.json b/tests/integration/recordings/responses/c8234a1171f3.json index 6bfe929b4..241e998e1 100644 --- a/tests/integration/recordings/responses/c8234a1171f3.json +++ b/tests/integration/recordings/responses/c8234a1171f3.json @@ -21,7 +21,7 @@ "body": { "__type__": "openai.types.chat.chat_completion.ChatCompletion", "__data__": { - "id": "chatcmpl-240", + "id": "chatcmpl-306", "choices": [ { "finish_reason": "stop", @@ -38,7 +38,7 @@ } } ], - "created": 1759245081, + "created": 1759282478, "model": "llama-guard3:1b", "object": "chat.completion", "service_tier": null, diff --git a/tests/integration/recordings/responses/c9cba6f3ee38.json b/tests/integration/recordings/responses/c9cba6f3ee38.json index 02363c70e..1ba23221e 100644 --- a/tests/integration/recordings/responses/c9cba6f3ee38.json +++ b/tests/integration/recordings/responses/c9cba6f3ee38.json @@ -20,15 +20,15 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama-guard3:1b", - "created_at": "2025-09-03T17:38:03.002753Z", + "created_at": "2025-09-30T17:40:02.587880074Z", "done": true, "done_reason": "stop", - "total_duration": 334941166, - "load_duration": 149512166, + "total_duration": 2895949169, + "load_duration": 45631237, "prompt_eval_count": 219, - "prompt_eval_duration": 173843500, + "prompt_eval_duration": 2801365130, "eval_count": 2, - "eval_duration": 11119166, + "eval_duration": 48315364, "response": "safe", "thinking": null, "context": null diff --git a/tests/integration/recordings/responses/cbd6b65e0622.json b/tests/integration/recordings/responses/cbd6b65e0622.json new file mode 100644 index 000000000..9a77e7349 --- /dev/null +++ b/tests/integration/recordings/responses/cbd6b65e0622.json @@ -0,0 +1,98 @@ +{ + "request": { + "method": "POST", + "url": "http://0.0.0.0:11434/v1/v1/chat/completions", + "headers": {}, + "body": { + "model": "llama3.2:3b-instruct-fp16", + "messages": [ + { + "role": "user", + "content": "what's the current time? You MUST call the `get_current_time` function to find out." + } + ], + "stream": true, + "tools": [ + { + "type": "function", + "function": { + "type": "function", + "name": "get_current_time", + "description": "Get the current time", + "parameters": {}, + "strict": null + } + } + ] + }, + "endpoint": "/v1/chat/completions", + "model": "llama3.2:3b-instruct-fp16" + }, + "response": { + "body": [ + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-979", + "choices": [ + { + "delta": { + "content": "", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": [ + { + "index": 0, + "id": "call_ik598ri6", + "function": { + "arguments": "{}", + "name": "get_current_time" + }, + "type": "function" + } + ] + }, + "finish_reason": null, + "index": 0, + "logprobs": null + } + ], + "created": 1759282380, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + }, + { + "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", + "__data__": { + "id": "chatcmpl-979", + "choices": [ + { + "delta": { + "content": "", + "function_call": null, + "refusal": null, + "role": "assistant", + "tool_calls": null + }, + "finish_reason": "tool_calls", + "index": 0, + "logprobs": null + } + ], + "created": 1759282380, + "model": "llama3.2:3b-instruct-fp16", + "object": "chat.completion.chunk", + "service_tier": null, + "system_fingerprint": "fp_ollama", + "usage": null + } + } + ], + "is_streaming": true + } +} diff --git a/tests/integration/recordings/responses/cd094caaf1c0.json b/tests/integration/recordings/responses/cd094caaf1c0.json deleted file mode 100644 index 70a3d334d..000000000 --- a/tests/integration/recordings/responses/cd094caaf1c0.json +++ /dev/null @@ -1,7115 +0,0 @@ -{ - "request": { - "method": "POST", - "url": "http://localhost:11434/api/generate", - "headers": {}, - "body": { - "model": "llama3.2:3b-instruct-fp16", - "raw": true, - "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nPretend you are a weather assistant.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhat's the weather like in San Francisco, CA?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", - "options": { - "temperature": 0.0 - }, - "stream": true - }, - "endpoint": "/api/generate", - "model": "llama3.2:3b-instruct-fp16" - }, - "response": { - "body": [ - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:21.138019Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "I", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:21.179853Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "'d", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:21.220635Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " be", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:21.261418Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " happy", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:21.301991Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " to", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:21.3425Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " give", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:21.38302Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " you", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:21.423862Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " an", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:21.464611Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " update", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:21.505714Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " on", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:21.547075Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " the", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:21.588896Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " current", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:21.629146Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " weather", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:21.669722Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " conditions", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:21.710707Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " in", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:21.751267Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " San", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:21.791565Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " Francisco", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:21.83176Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ",", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:21.872029Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " CA", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:21.914066Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ".\n\n", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:21.955317Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "As", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:21.995588Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " of", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:22.03605Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " now", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:22.076924Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " (", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:22.117922Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "just", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:22.158925Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " kidding", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:22.199113Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ",", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:22.239797Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " I", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:22.280592Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " don", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:22.321607Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "'t", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:22.36237Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " have", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:22.402735Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " real", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:22.44328Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "-time", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:22.48369Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " access", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:22.524383Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " to", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:22.564975Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " current", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:22.605886Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " weather", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:22.646199Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " data", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:22.686594Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "),", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:22.726941Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " let", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:22.767696Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " me", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:22.810962Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " provide", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:22.851903Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " you", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:22.892412Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " with", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:22.932877Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " a", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:22.973247Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " general", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:23.013989Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " overview", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:23.054251Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " of", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:23.094676Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " what", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:23.135452Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " the", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:23.176336Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " typical", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:23.216888Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " weather", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:23.257355Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " is", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:23.297487Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " like", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:23.337777Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " in", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:23.37817Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " San", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:23.418119Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " Francisco", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:23.458074Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " during", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:23.498828Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " different", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:23.539337Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " times", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:23.579947Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " of", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:23.620572Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " the", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:23.661884Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " year", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:23.703234Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ":\n\n", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:23.743994Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "**", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:23.784238Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "Current", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:23.824425Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " Weather", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:23.864711Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " Conditions", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:23.904729Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ":", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:23.944762Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "**\n\n", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:23.985199Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "Since", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:24.025821Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " I", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:24.066639Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "'m", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:24.109215Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " not", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:24.15123Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " connected", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:24.192856Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " to", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:24.23433Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " real", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:24.275212Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "-time", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:24.315722Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " weather", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:24.355996Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " data", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:24.396181Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ",", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:24.43716Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " I", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:24.478009Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "'ll", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:24.519697Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " give", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:24.562228Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " you", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:24.604366Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " an", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:24.645258Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " example", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:24.686966Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " of", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:24.726702Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " what", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:24.766742Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " the", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:24.806841Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " weather", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:24.846655Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " might", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:24.886602Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " be", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:24.926582Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " like", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:24.966301Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " on", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:25.006614Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " a", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:25.046631Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " typical", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:25.086885Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " day", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:25.127555Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " in", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:25.168437Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " San", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:25.20913Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " Francisco", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:25.249991Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ".", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:25.29007Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " Keep", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:25.331038Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " in", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:25.37155Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " mind", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:25.413816Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " that", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:25.457114Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " this", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:25.49976Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " is", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:25.540794Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " just", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:25.581085Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " a", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:25.62194Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " hypothetical", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:25.66242Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " scenario", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:25.702827Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ".\n\n", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:25.743383Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "**", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:25.785523Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "Season", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:25.828276Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "al", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:25.871231Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " Break", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:25.913246Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "down", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:25.955162Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ":", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:25.997821Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "**\n\n", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:26.03971Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "*", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:26.082988Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " **", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:26.126136Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "Summer", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:26.168484Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " (", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:26.210934Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "June", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:26.25385Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " to", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:26.295017Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " August", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:26.335776Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "):", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:26.377421Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "**", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:26.419324Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " Warm", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:26.460598Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " and", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:26.502926Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " sunny", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:26.545467Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " with", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:26.587384Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " average", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:26.628641Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " highs", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:26.669783Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " around", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:26.710862Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " ", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:26.751949Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "73", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:26.793375Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "\u00b0F", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:26.835697Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " (", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:26.876139Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "23", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:26.917322Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "\u00b0C", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:26.958405Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ")", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:26.999602Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " and", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:27.041369Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " lows", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:27.082117Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " around", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:27.124286Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " ", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:27.165354Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "58", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:27.206517Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "\u00b0F", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:27.247418Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " (", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:27.288727Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "14", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:27.32952Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "\u00b0C", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:27.37057Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ").", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:27.413166Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " Expect", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:27.453878Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " fog", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:27.495693Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "gy", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:27.536879Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " mornings", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:27.578071Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ",", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:27.619459Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " but", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:27.660329Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " clear", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:27.701195Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " skies", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:27.74184Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " during", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:27.782435Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " the", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:27.822698Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " day", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:27.863482Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ".\n", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:27.904189Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "*", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:27.944927Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " **", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:27.985583Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "Fall", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:28.026811Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " (", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:28.067929Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "September", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:28.108844Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " to", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:28.149655Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " November", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:28.190377Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "):", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:28.230919Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "**", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:28.271506Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " Mild", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:28.313533Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " and", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:28.356508Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " pleasant", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:28.397379Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " with", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:28.438016Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " average", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:28.47858Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " highs", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:28.519407Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " around", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:28.560412Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " ", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:28.601727Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "68", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:28.64332Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "\u00b0F", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:28.683692Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " (", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:28.724325Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "20", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:28.764731Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "\u00b0C", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:28.805214Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ")", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:28.845962Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " and", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:28.886874Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " lows", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:28.927442Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " around", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:28.967837Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " ", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:29.008786Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "52", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:29.049817Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "\u00b0F", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:29.090455Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " (", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:29.131723Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "11", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:29.172582Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "\u00b0C", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:29.214861Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ").\n", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:29.256056Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "*", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:29.296825Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " **", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:29.337822Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "Winter", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:29.378894Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " (", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:29.419586Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "December", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:29.459743Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " to", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:29.500928Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " February", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:29.541823Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "):", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:29.583225Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "**", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:29.62471Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " Cool", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:29.665624Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " and", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:29.706601Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " wet", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:29.747221Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " with", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:29.787753Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " average", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:29.828297Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " highs", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:29.86906Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " around", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:29.909608Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " ", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:29.950119Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "58", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:29.990856Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "\u00b0F", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:30.031737Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " (", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:30.072804Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "14", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:30.115879Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "\u00b0C", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:30.157268Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ")", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:30.198026Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " and", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:30.238729Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " lows", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:30.279348Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " around", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:30.31988Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " ", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:30.360471Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "45", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:30.401158Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "\u00b0F", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:30.441986Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " (", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:30.482303Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "7", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:30.523844Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "\u00b0C", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:30.564853Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ").", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:30.605812Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " Expect", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:30.646752Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " fog", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:30.68766Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "gy", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:30.728603Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " mornings", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:30.769336Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ",", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:30.80994Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " but", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:30.850918Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " some", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:30.89149Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " sunny", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:30.932133Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " days", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:30.97327Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " during", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:31.016238Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " the", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:31.057488Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " day", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:31.097989Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ".\n", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:31.13892Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "*", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:31.179559Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " **", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:31.220282Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "Spring", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:31.260847Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " (", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:31.301689Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "March", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:31.342413Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " to", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:31.383094Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " May", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:31.424087Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "):", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:31.465298Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "**", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:31.506962Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " Mild", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:31.548213Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " and", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:31.589913Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " pleasant", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:31.630948Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " with", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:31.672087Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " average", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:31.713337Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " highs", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:31.754423Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " around", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:31.795742Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " ", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:31.836637Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "62", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:31.878115Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "\u00b0F", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:31.919569Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " (", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:31.960615Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "17", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:32.001695Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "\u00b0C", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:32.042291Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ")", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:32.082564Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " and", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:32.123962Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " lows", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:32.164847Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " around", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:32.205607Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " ", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:32.246372Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "50", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:32.287091Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "\u00b0F", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:32.32769Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " (", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:32.368571Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "10", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:32.409389Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "\u00b0C", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:32.450109Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ").\n\n", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:32.491077Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "**", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:32.532737Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "Current", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:32.572701Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " Weather", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:32.614093Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " Conditions", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:32.655113Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ":", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:32.696438Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "**\n\n", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:32.73788Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "Let", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:32.780775Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "'s", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:32.823196Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " say", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:32.86428Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " it", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:32.905305Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "'s", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:32.946086Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " a", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:32.986849Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " typical", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:33.028251Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " San", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:33.069225Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " Francisco", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:33.110717Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " morning", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:33.151703Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " in", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:33.192643Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " late", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:33.233604Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " spring", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:33.274665Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ".", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:33.315311Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " The", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:33.356272Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " temperature", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:33.397164Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " is", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:33.438163Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " around", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:33.478995Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " ", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:33.520178Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "58", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:33.561169Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "\u00b0F", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:33.602614Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " (", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:33.643517Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "14", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:33.69501Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "\u00b0C", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:33.744642Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "),", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:33.788023Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " with", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:33.830123Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " a", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:33.873234Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " gentle", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:33.91574Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " breeze", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:33.958165Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " blowing", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:34.000544Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " at", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:34.043824Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " about", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:34.086339Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " ", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:34.128863Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "5", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:34.171675Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " mph", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:34.214025Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " (", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:34.256135Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "8", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:34.298571Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " km", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:34.340742Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "/h", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:34.38192Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ").", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:34.423807Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " There", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:34.465059Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "'s", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:34.506527Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " a", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:34.547797Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " slight", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:34.589189Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " chance", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:34.632479Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " of", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:34.673914Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " light", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:34.714561Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " dr", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:34.755794Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "izzle", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:34.797365Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ",", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:34.839305Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " but", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:34.881479Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " the", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:34.923518Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " sun", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:34.964593Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " will", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:35.005594Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " break", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:35.047897Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " through", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:35.088945Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " the", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:35.130496Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " clouds", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:35.171697Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " later", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:35.212785Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " in", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:35.254Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " the", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:35.294945Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " day", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:35.335904Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ".\n\n", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:35.376911Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "Please", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:35.417931Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " note", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:35.45891Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " that", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:35.501211Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " this", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:35.543696Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " is", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:35.584233Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " just", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:35.626596Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " an", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:35.667752Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " example", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:35.70907Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " and", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:35.749741Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " actual", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:35.79089Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " weather", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:35.832516Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " conditions", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:35.874088Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " may", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:35.915661Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " vary", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:35.95745Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " depending", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:35.998856Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " on", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:36.040666Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " the", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:36.082075Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " time", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:36.123665Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " of", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:36.164998Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " year", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:36.206212Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " and", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:36.24761Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " other", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:36.288872Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " factors", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:36.330688Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ".", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:36.372212Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " If", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:36.415315Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " you", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:36.458461Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " need", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:36.501868Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " more", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:36.544291Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " up", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:36.58593Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "-to", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:36.627055Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "-date", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:36.668404Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " information", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:36.709546Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ",", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:36.750533Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " I", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:36.792039Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " recommend", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:36.833512Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " checking", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:36.875114Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " a", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:36.916425Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " reliable", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:36.959229Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " weather", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:37.000732Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " website", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:37.042352Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " or", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:37.083572Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " app", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:37.125478Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " for", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:37.166749Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " the", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:37.207713Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " latest", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:37.249261Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": " forecast", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:37.291638Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": ".", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:37.333479Z", - "done": true, - "done_reason": "stop", - "total_duration": 16422193500, - "load_duration": 146702667, - "prompt_eval_count": 36, - "prompt_eval_duration": 78361500, - "eval_count": 394, - "eval_duration": 16196482750, - "response": "", - "thinking": null, - "context": null - } - } - ], - "is_streaming": true - } -} diff --git a/tests/integration/recordings/responses/cd294c2e0038.json b/tests/integration/recordings/responses/cd294c2e0038.json index cad7814b3..985cfa1bb 100644 --- a/tests/integration/recordings/responses/cd294c2e0038.json +++ b/tests/integration/recordings/responses/cd294c2e0038.json @@ -21,7 +21,7 @@ "body": { "__type__": "openai.types.chat.chat_completion.ChatCompletion", "__data__": { - "id": "chatcmpl-325", + "id": "chatcmpl-251", "choices": [ { "finish_reason": "stop", @@ -38,7 +38,7 @@ } } ], - "created": 1759247860, + "created": 1759282591, "model": "llama-guard3:1b", "object": "chat.completion", "service_tier": null, diff --git a/tests/integration/recordings/responses/cf776b1aa432.json b/tests/integration/recordings/responses/cf776b1aa432.json index c7449427a..3b08967d5 100644 --- a/tests/integration/recordings/responses/cf776b1aa432.json +++ b/tests/integration/recordings/responses/cf776b1aa432.json @@ -21,7 +21,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-78", + "id": "chatcmpl-615", "choices": [ { "delta": { @@ -36,7 +36,7 @@ "logprobs": null } ], - "created": 1759259077, + "created": 1759282661, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -47,7 +47,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-78", + "id": "chatcmpl-615", "choices": [ { "delta": { @@ -62,7 +62,7 @@ "logprobs": null } ], - "created": 1759259077, + "created": 1759282661, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -73,7 +73,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-78", + "id": "chatcmpl-615", "choices": [ { "delta": { @@ -88,7 +88,7 @@ "logprobs": null } ], - "created": 1759259077, + "created": 1759282661, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -99,7 +99,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-78", + "id": "chatcmpl-615", "choices": [ { "delta": { @@ -114,7 +114,7 @@ "logprobs": null } ], - "created": 1759259077, + "created": 1759282661, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -125,7 +125,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-78", + "id": "chatcmpl-615", "choices": [ { "delta": { @@ -140,7 +140,7 @@ "logprobs": null } ], - "created": 1759259077, + "created": 1759282661, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -151,7 +151,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-78", + "id": "chatcmpl-615", "choices": [ { "delta": { @@ -166,7 +166,7 @@ "logprobs": null } ], - "created": 1759259077, + "created": 1759282662, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -177,7 +177,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-78", + "id": "chatcmpl-615", "choices": [ { "delta": { @@ -192,7 +192,7 @@ "logprobs": null } ], - "created": 1759259077, + "created": 1759282662, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -203,7 +203,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-78", + "id": "chatcmpl-615", "choices": [ { "delta": { @@ -218,7 +218,7 @@ "logprobs": null } ], - "created": 1759259077, + "created": 1759282662, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, diff --git a/tests/integration/recordings/responses/d0ac68cbde69.json b/tests/integration/recordings/responses/d0ac68cbde69.json index 4dcc6a69b..b37962fb6 100644 --- a/tests/integration/recordings/responses/d0ac68cbde69.json +++ b/tests/integration/recordings/responses/d0ac68cbde69.json @@ -11,27 +11,7 @@ "body": { "__type__": "ollama._types.ProcessResponse", "__data__": { - "models": [ - { - "model": "llama3.2:3b-instruct-fp16", - "name": "llama3.2:3b-instruct-fp16", - "digest": "195a8c01d91ec3cb1e0aad4624a51f2602c51fa7d96110f8ab5a20c84081804d", - "expires_at": "2025-09-30T14:29:52.682809-07:00", - "size": 8581748736, - "size_vram": 8581748736, - "details": { - "parent_model": "", - "format": "gguf", - "family": "llama", - "families": [ - "llama" - ], - "parameter_size": "3.2B", - "quantization_level": "F16" - }, - "context_length": null - } - ] + "models": [] } }, "is_streaming": false diff --git a/tests/integration/recordings/responses/d7caf68e394e.json b/tests/integration/recordings/responses/d7caf68e394e.json index acabcaa04..2347344c1 100644 --- a/tests/integration/recordings/responses/d7caf68e394e.json +++ b/tests/integration/recordings/responses/d7caf68e394e.json @@ -21,7 +21,7 @@ "body": { "__type__": "openai.types.chat.chat_completion.ChatCompletion", "__data__": { - "id": "chatcmpl-56", + "id": "chatcmpl-480", "choices": [ { "finish_reason": "stop", @@ -38,7 +38,7 @@ } } ], - "created": 1759245088, + "created": 1759282535, "model": "llama-guard3:1b", "object": "chat.completion", "service_tier": null, diff --git a/tests/integration/recordings/responses/dac7a32e5db9.json b/tests/integration/recordings/responses/dac7a32e5db9.json deleted file mode 100644 index 97d1fccfc..000000000 --- a/tests/integration/recordings/responses/dac7a32e5db9.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "request": { - "method": "POST", - "url": "http://localhost:11434/api/generate", - "headers": {}, - "body": { - "model": "llama3.2:3b-instruct-fp16", - "raw": true, - "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhat is the capital of France?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", - "options": { - "temperature": 0.0 - }, - "stream": false - }, - "endpoint": "/api/generate", - "model": "llama3.2:3b-instruct-fp16" - }, - "response": { - "body": { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:39:36.919474Z", - "done": true, - "done_reason": "stop", - "total_duration": 470635833, - "load_duration": 113755958, - "prompt_eval_count": 23, - "prompt_eval_duration": 67480542, - "eval_count": 8, - "eval_duration": 288746541, - "response": "The capital of France is Paris.", - "thinking": null, - "context": null - } - }, - "is_streaming": false - } -} diff --git a/tests/integration/recordings/responses/dd226d71f844.json b/tests/integration/recordings/responses/dd226d71f844.json index ba2810bc9..aa4d64da7 100644 --- a/tests/integration/recordings/responses/dd226d71f844.json +++ b/tests/integration/recordings/responses/dd226d71f844.json @@ -22,7 +22,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:05.682744Z", + "created_at": "2025-10-01T01:36:39.731839864Z", "done": false, "done_reason": null, "total_duration": null, @@ -40,7 +40,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:05.72605Z", + "created_at": "2025-10-01T01:36:39.927398349Z", "done": false, "done_reason": null, "total_duration": null, @@ -58,7 +58,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:05.770654Z", + "created_at": "2025-10-01T01:36:40.131176362Z", "done": false, "done_reason": null, "total_duration": null, @@ -76,7 +76,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:05.819087Z", + "created_at": "2025-10-01T01:36:40.3289863Z", "done": false, "done_reason": null, "total_duration": null, @@ -94,7 +94,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:05.862915Z", + "created_at": "2025-10-01T01:36:40.527460869Z", "done": false, "done_reason": null, "total_duration": null, @@ -112,7 +112,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:05.913209Z", + "created_at": "2025-10-01T01:36:40.722852039Z", "done": false, "done_reason": null, "total_duration": null, @@ -130,7 +130,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:05.951646Z", + "created_at": "2025-10-01T01:36:40.922357134Z", "done": false, "done_reason": null, "total_duration": null, @@ -148,7 +148,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:05.996738Z", + "created_at": "2025-10-01T01:36:41.142449109Z", "done": false, "done_reason": null, "total_duration": null, @@ -166,7 +166,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:06.046726Z", + "created_at": "2025-10-01T01:36:41.34351538Z", "done": false, "done_reason": null, "total_duration": null, @@ -184,7 +184,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:06.08508Z", + "created_at": "2025-10-01T01:36:41.544611985Z", "done": false, "done_reason": null, "total_duration": null, @@ -202,7 +202,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:06.128566Z", + "created_at": "2025-10-01T01:36:41.746118193Z", "done": false, "done_reason": null, "total_duration": null, @@ -220,7 +220,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:06.173309Z", + "created_at": "2025-10-01T01:36:41.949240209Z", "done": false, "done_reason": null, "total_duration": null, @@ -238,15 +238,15 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:06.218818Z", + "created_at": "2025-10-01T01:36:42.151060868Z", "done": true, "done_reason": "stop", - "total_duration": 755252250, - "load_duration": 141479625, + "total_duration": 4482970180, + "load_duration": 43494552, "prompt_eval_count": 402, - "prompt_eval_duration": 76304166, + "prompt_eval_duration": 2018500580, "eval_count": 13, - "eval_duration": 536202125, + "eval_duration": 2420393884, "response": "", "thinking": null, "context": null diff --git a/tests/integration/recordings/responses/dd9e7d5913e9.json b/tests/integration/recordings/responses/dd9e7d5913e9.json deleted file mode 100644 index e3d8b41f5..000000000 --- a/tests/integration/recordings/responses/dd9e7d5913e9.json +++ /dev/null @@ -1,59 +0,0 @@ -{ - "request": { - "method": "POST", - "url": "http://localhost:11434/api/generate", - "headers": {}, - "body": { - "model": "llama3.2:3b-instruct-fp16", - "raw": true, - "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant. You have access to functions, but you should only use them if they are required.\nYou are an expert in composing functions. You are given a question and a set of possible functions.\nBased on the question, you may or may not need to make one function/tool call to achieve the purpose.\n\nIf you decide to invoke any of the function(s), you MUST put it in the format of [func_name1(params_name1=params_value1, params_name2=params_value2...), func_name2(params)]\nIf you decide to invoke a function, you SHOULD NOT include any other text in the response. besides the function call in the above format.\nFor a boolean parameter, be sure to use `True` or `False` (capitalized) for the value.\n\n\nHere is a list of functions in JSON format that you can invoke.\n\n[\n {\n \"name\": \"get_object_namespace_list\",\n \"description\": \"Get the list of objects in a namespace\",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"kind\", \"namespace\"],\n \"properties\": {\n \"kind\": {\n \"type\": \"string\",\n \"description\": \"the type of object\"\n },\n \"namespace\": {\n \"type\": \"string\",\n \"description\": \"the name of the namespace\"\n }\n }\n }\n }\n]\n\nYou can answer general questions or invoke tools when necessary.\nIn addition to tool calls, you should also augment your responses by using the tool outputs.\nYou are a helpful assistant.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhat pods are in the namespace openshift-lightspeed?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n[get_object_namespace_list(kind=\"pod\", namespace=\"openshift-lightspeed\")]<|eot_id|><|start_header_id|>ipython<|end_header_id|>\n\nthe objects are pod1, pod2, pod3<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", - "options": { - "temperature": 0.0 - }, - "stream": true - }, - "endpoint": "/api/generate", - "model": "llama3.2:3b-instruct-fp16" - }, - "response": { - "body": [ - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:40.972565Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "[]", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:36:41.014682Z", - "done": true, - "done_reason": "stop", - "total_duration": 693115125, - "load_duration": 114019375, - "prompt_eval_count": 386, - "prompt_eval_duration": 535931209, - "eval_count": 2, - "eval_duration": 42505166, - "response": "", - "thinking": null, - "context": null - } - } - ], - "is_streaming": true - } -} diff --git a/tests/integration/recordings/responses/decfd950646c.json b/tests/integration/recordings/responses/decfd950646c.json index c46fa8686..1c2934ab5 100644 --- a/tests/integration/recordings/responses/decfd950646c.json +++ b/tests/integration/recordings/responses/decfd950646c.json @@ -44,22 +44,32 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-202", + "id": "chatcmpl-163", "choices": [ { "delta": { - "content": "{\"name\":\"get_weather\",\"parameters{\"key\"]=\"Tokyo\"}}", + "content": "", "function_call": null, "refusal": null, "role": "assistant", - "tool_calls": null + "tool_calls": [ + { + "index": 0, + "id": "call_5gqadim6", + "function": { + "arguments": "{\"city\":\"Tokyo\"}", + "name": "get_weather" + }, + "type": "function" + } + ] }, "finish_reason": null, "index": 0, "logprobs": null } ], - "created": 1756921363, + "created": 1759254129, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, @@ -70,7 +80,7 @@ { "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__data__": { - "id": "chatcmpl-202", + "id": "chatcmpl-163", "choices": [ { "delta": { @@ -80,12 +90,12 @@ "role": "assistant", "tool_calls": null }, - "finish_reason": "stop", + "finish_reason": "tool_calls", "index": 0, "logprobs": null } ], - "created": 1756921363, + "created": 1759254129, "model": "llama3.2:3b-instruct-fp16", "object": "chat.completion.chunk", "service_tier": null, diff --git a/tests/integration/recordings/responses/e19cd96d3d9f.json b/tests/integration/recordings/responses/e19cd96d3d9f.json new file mode 100644 index 000000000..e68a3fef7 --- /dev/null +++ b/tests/integration/recordings/responses/e19cd96d3d9f.json @@ -0,0 +1,806 @@ +{ + "request": { + "method": "POST", + "url": "http://0.0.0.0:11434/v1/v1/embeddings", + "headers": {}, + "body": { + "model": "nomic-embed-text:137m-v1.5-fp16", + "input": [ + "This is a test file" + ], + "encoding_format": "float" + }, + "endpoint": "/v1/embeddings", + "model": "nomic-embed-text:137m-v1.5-fp16" + }, + "response": { + "body": { + "__type__": "openai.types.create_embedding_response.CreateEmbeddingResponse", + "__data__": { + "data": [ + { + "embedding": [ + 0.053758882, + 0.038832866, + -0.14896753, + -0.05763937, + 0.046078444, + -0.03673306, + 0.03443965, + 0.0035839507, + -0.046247713, + -0.057672556, + -0.0029053201, + 0.03271797, + 0.008142858, + -0.0054671364, + -0.05689011, + -0.04021888, + 0.06676909, + -0.07054023, + 0.008608768, + -0.03578119, + 0.021355929, + -0.034052633, + -0.08896779, + 0.0051109465, + 0.12570412, + 0.02139755, + -0.046905495, + 0.02842989, + -0.06747682, + -0.0058463546, + 0.0481647, + -0.01887986, + 0.020494882, + -0.023393275, + -0.021654177, + -0.057471123, + 0.026497748, + 0.03751032, + 0.038979724, + 0.029206974, + -0.02912504, + -0.0066743814, + -0.018511254, + -0.0048742057, + 0.032597076, + 0.019944616, + -0.00939136, + 0.05675954, + -0.021450477, + -0.0011022915, + -0.00854399, + 0.0071911, + -0.0158938, + 0.016827852, + 0.050103787, + -0.026179831, + 0.014221046, + -0.0003115159, + -0.019583391, + -0.07569287, + 0.036399294, + 0.03607082, + -0.07833437, + 0.054612152, + 0.0069902637, + -0.07138526, + -0.04489236, + -0.0015609767, + -0.005164461, + 0.02771437, + 0.09080423, + 0.019013625, + 0.016519958, + -0.019777367, + 0.0024592814, + -0.04387287, + -0.005836657, + -0.063302755, + -0.071804225, + -0.015422637, + 0.0700607, + 0.01462268, + -0.0075372704, + 0.059862956, + 0.081774905, + -0.040090047, + -0.044520658, + -0.014827226, + 0.008794842, + 0.02768928, + 0.040841054, + 0.03498003, + 0.044498052, + -0.02172259, + -0.026720297, + 0.008463096, + 0.014429588, + 0.06089317, + -0.009845722, + 0.0063866396, + 0.010393747, + 0.020182539, + 0.03181014, + -0.023324894, + 0.028979924, + 0.018914852, + -0.019926151, + 0.0128603885, + -0.04318784, + -0.015088658, + 0.0056466036, + 0.041816916, + -0.037344925, + -0.004126689, + 0.011575758, + -0.01598143, + 0.020690521, + -0.04184528, + -0.042596396, + 0.024362125, + 0.017174868, + -0.0012244079, + 0.007195055, + 0.04446234, + 0.01828835, + 0.04812283, + -0.03951256, + 0.042883415, + 0.017657666, + -0.04830957, + -0.0015999862, + 0.0142018, + -0.016914146, + -0.023650466, + 0.02889179, + 0.045774486, + 0.0025694002, + -0.008831675, + -0.059108555, + -0.009949093, + -0.03725936, + -0.01088702, + 0.029935138, + 0.042665828, + 0.034854196, + -0.012590703, + 0.024468226, + 0.025324184, + -0.004415537, + 0.0036964733, + 0.037010476, + 0.010400129, + 0.014211147, + 0.016792757, + 0.019303495, + -0.05781278, + -0.005105199, + -0.015839323, + 0.033342622, + 0.07257149, + 0.00089130324, + -0.0337523, + -0.016002623, + 0.01755833, + -0.06125777, + -0.046952333, + 0.0041778465, + 0.104189105, + 0.065975755, + -0.02490904, + -0.030258112, + -0.042782586, + 0.002475365, + -0.004088971, + -0.060251836, + -0.029733855, + 0.010537102, + -0.036400363, + 0.050550237, + -0.009534188, + 0.048663102, + -0.012078062, + 0.011420914, + 0.01801528, + 0.0053786607, + -0.040858243, + 0.0062899343, + -0.035764158, + -0.028465275, + 0.003017353, + -0.007869094, + -0.030625286, + -0.09092833, + -0.04718793, + 0.011549368, + -0.028128764, + 0.00030076268, + -0.0177743, + 0.01952984, + -0.0073801214, + 0.005680257, + -0.007859802, + -0.06409156, + 0.034170788, + -0.026292793, + 0.0049399645, + -0.04899549, + -0.032840755, + -0.03316707, + 0.0127454, + 0.07625459, + -0.006468158, + -0.018757073, + 0.039154533, + 0.035096716, + -0.016726742, + -0.0060864873, + -0.029742138, + -0.029156253, + -0.01496455, + 0.024316646, + -0.031520814, + 0.023276668, + -0.032704417, + 0.006193504, + -0.037157167, + -0.06893218, + -0.026257787, + -0.01227152, + -0.031095559, + -0.0048738606, + -0.080599256, + 0.022100152, + 0.017628722, + -0.018785588, + -0.017143749, + -0.04749942, + 0.06745294, + -0.016267797, + 0.0373475, + -0.023250228, + 0.042334173, + -0.020025365, + -0.007763279, + -0.023800656, + 0.015743172, + 0.005240379, + -0.056436196, + 0.059064813, + 0.03735957, + -0.013201106, + 0.043321673, + 0.028031837, + 0.07712444, + 0.020895857, + 0.0033679043, + -0.021562262, + -0.037665877, + 0.016047759, + -0.038291715, + 0.012231696, + -0.04138876, + 0.023888383, + -0.004567559, + -0.035839446, + 0.006351312, + -0.028676957, + 0.041284245, + -0.03021304, + -0.024045503, + -0.01343801, + 0.033740558, + 0.030106168, + -0.02504732, + 0.029200288, + -0.019623024, + 0.013830142, + 0.027436886, + 0.0049833255, + 0.030972818, + -0.020466058, + 0.000773597, + 0.010922725, + 0.0283304, + 0.016188335, + 0.02424716, + 0.03911355, + 0.01550475, + 0.042709596, + 0.036275722, + -0.00046863785, + 0.03285776, + -0.013077435, + 0.021609226, + 0.0008685554, + 0.01708775, + 0.068446875, + -0.017360637, + -0.003488762, + 0.011598318, + -0.0058523375, + 0.013691473, + 0.045294084, + 0.018984735, + 0.0275332, + -0.037544344, + 0.036346726, + -0.033725083, + 0.022936849, + 0.0215334, + -0.075951464, + -0.009648661, + -0.036136348, + 0.021613814, + -0.02455763, + 0.04924421, + 0.016531106, + 0.02405064, + 0.07053475, + -0.036349453, + 0.0016287306, + -0.06446291, + -0.028437959, + 0.010191873, + 0.012296818, + 0.012329564, + 0.013915074, + 0.048434693, + -0.03590033, + -0.0525744, + 0.05558266, + 0.07321991, + -0.054426316, + -0.030174559, + 0.02285781, + 0.039927386, + 0.035223886, + 0.049555033, + 0.007374941, + 0.044193067, + 0.06786747, + 0.00036152382, + 0.027464418, + 0.016859235, + 0.01616493, + -0.038499907, + -0.02291476, + 0.024937056, + 0.0041996776, + 0.0698748, + 0.0015127198, + 0.013325001, + 0.030350806, + -0.023846446, + 0.025110258, + 0.0054002786, + 0.019181678, + -0.031506006, + 0.05752808, + -0.010405221, + 0.023109913, + -0.023511393, + -0.0049008867, + -0.021419058, + 0.013513006, + 0.030098746, + -0.018317498, + 0.026702078, + 0.075319916, + 0.008198215, + -0.01715998, + -0.013291193, + 0.044264887, + 0.07020028, + 0.061081603, + 0.0417841, + -0.06894315, + -0.03422526, + 0.0012161441, + 0.034968503, + 0.058317643, + -0.025475413, + 0.027475594, + 0.049771804, + 0.035385806, + -0.035563156, + 0.023909466, + -0.005192664, + 0.05775682, + 0.02994165, + -0.030322695, + 0.021936368, + -0.07662721, + 0.004190903, + -0.009891469, + -0.016764412, + 0.022064973, + 0.012029886, + -0.046792373, + 0.0044136844, + -0.00946375, + -0.026822358, + -0.00050651265, + 0.01757855, + -0.022725847, + 0.00879324, + -0.043154534, + -0.061548065, + 0.029624073, + -0.024554785, + 0.05105945, + -0.05148312, + -0.03555139, + -0.052438557, + -0.010544604, + 0.020527197, + 0.030215781, + 0.018875282, + -0.01664549, + -0.005204754, + 0.009743897, + 0.023518153, + 0.02128166, + -0.022251425, + -0.04094683, + 0.0139064565, + 0.03803237, + 0.06790909, + -0.001843859, + -0.08696959, + -0.00012469757, + -0.0008513802, + -0.005044505, + -0.0075445618, + -0.015664855, + 0.0692631, + -0.020855572, + -0.03539066, + -0.016617907, + 0.051752944, + 0.034464356, + -0.073461555, + -0.015417356, + -0.007742076, + -0.017683357, + 0.12933765, + 0.09461965, + -0.044114266, + -0.053821612, + -0.008163221, + -0.008447408, + 0.0076388875, + -0.015357782, + 0.034570407, + 0.07185514, + -0.028936882, + 0.0531398, + -0.030973969, + -0.0032165123, + 0.045826234, + -0.012802924, + 0.018516479, + 0.05869127, + 0.041928004, + 0.030072877, + 0.0042537972, + 0.018244978, + -0.04296889, + 0.015562498, + 0.042186752, + -0.0015617026, + -0.063013196, + 0.024385404, + -0.032713488, + 0.010211183, + -0.0069401376, + -0.02364344, + 0.02480353, + -0.02844019, + 0.016215922, + 0.0252478, + -0.0037265052, + -0.030359179, + -0.025395883, + 0.015926762, + 0.020716459, + 0.025846127, + 0.018661655, + 0.0241015, + -0.0039253472, + 0.053291462, + 0.0075271, + 0.04915547, + 0.030260459, + 0.00963137, + -0.038408153, + -0.0284138, + -0.039237533, + -0.005525457, + 0.014672727, + 0.029539606, + -0.008607205, + 0.0152245145, + -0.030883666, + -0.016499644, + -0.0109075885, + 0.007604617, + -0.032032408, + -0.09308442, + -0.01050685, + -0.03883002, + -0.018666804, + 0.02166306, + 0.041098118, + 0.04546551, + -0.014216274, + 0.011799548, + 0.0071188095, + -0.025481777, + 0.018403957, + 0.02617805, + 0.0055660508, + 0.008809895, + -0.020674, + -0.098965384, + 0.03985033, + 0.022548705, + -0.01459568, + 0.07178989, + 0.061437577, + 0.009772697, + -0.0059043677, + 0.004458944, + -0.0090488745, + -0.033203818, + -0.015282819, + -0.044177573, + 0.011769875, + -0.0011643603, + 0.061295986, + -0.04839425, + -0.031219115, + 0.0024838632, + -0.032175247, + 0.007275243, + -0.027875084, + -0.06356691, + 0.01175946, + 0.0006294221, + -0.05412901, + 0.01858117, + -0.033687256, + -0.05291359, + -0.0069765327, + 0.040133674, + -0.04281862, + -0.0018926514, + -0.028072793, + -0.036874, + -0.047816034, + 0.05245003, + 0.0010536157, + -0.01319925, + 0.017749405, + 0.033703025, + -0.024302596, + -0.002920313, + 0.011033847, + -0.013011603, + -0.0105831595, + 0.013745272, + -0.0046018655, + -0.008408154, + -0.0147772925, + -0.03542984, + 0.017276762, + 0.038967792, + 0.06198965, + -0.032134645, + -0.022995302, + 0.06386363, + -0.028955221, + 0.021770647, + 0.037283987, + -0.0063682087, + -0.0019520292, + 0.0082411785, + -0.0080857165, + 0.03140237, + -0.039429568, + -0.042378973, + -0.020186571, + -0.0033806555, + 0.011414012, + 0.010418005, + 0.011475544, + -0.009851655, + -0.043615747, + 0.008853348, + -0.025179809, + -0.004863447, + 0.036882065, + -0.0019433503, + -0.048919167, + -0.04550448, + -0.004460618, + 0.03360312, + 0.027988102, + -0.016884074, + -0.024569506, + 0.048515636, + -0.013583301, + -0.07463627, + 0.01852176, + -0.012442827, + -0.061967682, + 0.059691124, + -0.050810352, + -0.018428395, + -0.022910368, + 0.011185239, + -0.028457617, + 0.06059784, + -0.016440384, + -0.0031041217, + -0.024506314, + -0.05280125, + 0.032860003, + 0.041123923, + 0.054165002, + -0.06297606, + 0.04966855, + -0.062108725, + -0.0644873, + -0.06372453, + 0.011317424, + -0.06354954, + 0.016408185, + 0.077334605, + 0.080707446, + 0.035989966, + 0.020155272, + -0.03928742, + -0.025508054, + -0.003647622, + 0.032227226, + -0.00080238096, + 0.025645627, + 0.029319866, + -0.063444436, + 0.06238845, + 0.0857085, + 0.03239185, + -0.011074311, + -0.0030367048, + 0.02812013, + 0.0406857, + -0.035966817, + -0.058475945, + -0.08341111, + -0.01660168, + 0.020067537, + -0.03546514, + -0.010423842, + 0.032722004, + 0.031745553, + -0.021651376, + -0.02822335, + -0.004464206, + -0.06761355, + 0.021431813, + 0.01613369, + 0.05481661, + 0.023063073, + -0.019324815, + 0.024383735, + 0.04141192, + 0.07242811, + -0.01618665, + -0.028350264, + -0.029206932, + -0.027982049, + 0.046629075, + 0.020287214, + 0.036934398, + -0.08857218, + 0.0026579907, + -0.05456532, + -0.031724136, + 0.0018138097, + -0.020164374, + 0.03203404, + -0.020969884, + -0.051650107, + -0.017484171, + 0.012802554, + 0.057993267, + -0.02748192, + 0.011279883, + 0.042745125, + 0.012816452, + 0.046430167, + 0.0040667434, + 0.04381184, + -0.02901727, + -0.0037176237, + 0.005408482, + 0.015330155, + -0.068073936, + -0.053268924, + 0.031550363, + -0.004767886, + -0.006504093, + 0.06489545, + -0.013510619, + 0.032298867, + -0.011263598, + -0.0030225017, + -0.011116073, + -0.03667866, + 0.06385139, + 0.025419476, + -0.042022824, + -0.0067015574, + -0.00083755056, + -0.033694033, + -0.002498642, + -0.028272718, + 0.061338726, + -0.06347687, + -0.025900617, + -0.03831271, + -0.020736072, + 0.011711141, + -0.023294803, + -0.02037071, + -0.008424271, + -0.014250913, + 0.005901058, + 0.025783215, + 0.014446211, + 0.029651158, + -0.039294545, + -0.017202891, + -0.026003383, + 0.013907814, + -0.02433525, + -0.00025631147, + -0.016748777, + 0.01577136, + 0.03785109, + -0.04441154, + 0.00446964, + 0.015128182, + -0.024619348, + -0.02516635, + -0.011604469, + -0.002341862, + 0.07883857, + -0.022424331, + -0.003427902, + -0.027802102, + 0.03210735, + 0.015019108, + -0.003994307, + -0.0668317, + 0.010897627, + -0.03735794 + ], + "index": 0, + "object": "embedding" + } + ], + "model": "nomic-embed-text:137m-v1.5-fp16", + "object": "list", + "usage": { + "prompt_tokens": 5, + "total_tokens": 5 + } + } + }, + "is_streaming": false + } +} diff --git a/tests/integration/recordings/responses/ed4d1f04922a.json b/tests/integration/recordings/responses/ed4d1f04922a.json new file mode 100644 index 000000000..221a3e8bd --- /dev/null +++ b/tests/integration/recordings/responses/ed4d1f04922a.json @@ -0,0 +1,806 @@ +{ + "request": { + "method": "POST", + "url": "http://0.0.0.0:11434/v1/v1/embeddings", + "headers": {}, + "body": { + "model": "nomic-embed-text:137m-v1.5-fp16", + "input": [ + "test query" + ], + "encoding_format": "float" + }, + "endpoint": "/v1/embeddings", + "model": "nomic-embed-text:137m-v1.5-fp16" + }, + "response": { + "body": { + "__type__": "openai.types.create_embedding_response.CreateEmbeddingResponse", + "__data__": { + "data": [ + { + "embedding": [ + 0.021632178, + 0.027914394, + -0.1697706, + -0.005746459, + 0.081694774, + -0.036242362, + 0.044110596, + -0.010040523, + 0.05094842, + -0.034714997, + 0.00067446794, + 0.059252825, + 0.045464963, + -0.019745745, + -0.09469374, + -0.055485737, + 0.04956198, + -0.07061811, + 0.004430253, + -0.0013650421, + 0.0039823176, + -0.016534736, + -0.06654952, + 0.007747924, + 0.13796963, + -0.049733665, + -0.05554854, + 0.040059894, + -0.03410629, + -0.0174845, + 0.0012421905, + -0.008054571, + 0.05028361, + -0.06035659, + -0.03602028, + -0.007468131, + 0.019489577, + 0.05546567, + -0.01528942, + 0.016373884, + 0.0512837, + 0.005612254, + 0.019506592, + -0.043891408, + 0.05861537, + 0.004661528, + 0.02987339, + 0.04815755, + 0.041287735, + -0.06544313, + -0.060593937, + -0.044734612, + 0.04862789, + 0.00040237635, + 0.036487125, + 0.02125163, + -0.02205709, + 0.01653302, + 0.014464717, + -0.017106015, + 0.008528484, + 0.011147511, + -0.05461941, + 0.044410925, + 0.041690536, + -0.07552042, + -0.01458748, + 0.015171144, + -0.020879392, + 0.023344515, + 0.024334745, + 0.0007479308, + 0.03372315, + -0.02907623, + -0.026213601, + -0.04394315, + -0.041222204, + -0.033026088, + -0.016983762, + 0.019402906, + 0.050808404, + 0.008200248, + 0.032658946, + 0.02592705, + 0.065451615, + -0.009648091, + -0.026338676, + -0.045090627, + 0.008955429, + 0.054003514, + 0.070887536, + 0.011170758, + 0.05319236, + 0.02647423, + -0.023234531, + 0.0429655, + 0.010425875, + 0.008766717, + -0.007743366, + -0.022178784, + 0.014454298, + 0.008048641, + -0.014602866, + -0.02104439, + -0.0015444545, + 0.02550411, + 0.00640798, + 0.022998009, + -0.023848126, + 0.0153519465, + -0.08472956, + 0.088503994, + -0.05605452, + -0.0031228412, + -0.0146102775, + -0.011359548, + 0.036800005, + -0.002228197, + -0.019166265, + 0.009962921, + 0.011201131, + 0.06257485, + -0.04013102, + 0.07524311, + -0.06695553, + 0.046410732, + -0.06721607, + 0.070392214, + 0.020210113, + 0.030616906, + -0.010176257, + -0.04437035, + -0.04073405, + -0.005545895, + -0.014319286, + -0.0108559, + 0.015160815, + 0.0038574256, + -0.038591065, + -0.028480537, + -0.0037603336, + -0.0026127263, + -0.016551336, + 0.0067131557, + 0.01880424, + -0.02975355, + 0.049555935, + 0.032004688, + -0.02247247, + 0.01246225, + 0.0014132276, + -0.04564078, + 0.073596075, + -0.016278256, + 0.02661505, + -0.071765706, + -0.008734087, + 0.0059228106, + 0.019815922, + 0.03195911, + 0.034110207, + 0.002186661, + -0.027157558, + 0.022563938, + 0.004371381, + -0.095353276, + 0.0126491375, + 0.07152678, + 0.052476395, + 0.01687662, + -0.055740036, + -0.08706196, + 0.014729762, + -0.02758909, + -0.03041602, + -0.013732155, + 0.02801321, + -0.03949483, + 0.05234382, + -0.022757512, + 0.044945277, + -0.03273144, + 0.051830135, + 0.04779128, + -0.0033031644, + -0.059135776, + 0.045916736, + -0.013965764, + -0.031585373, + -0.0348233, + -0.014461527, + -0.021362517, + -0.0933837, + -0.045136064, + -0.015860898, + -0.05576547, + 0.05323929, + 0.02853018, + 0.011573577, + -0.026535276, + -0.034710087, + 0.004239386, + -0.009515535, + 0.0073740263, + -0.03708428, + 0.005863241, + -0.0034215185, + -0.027957797, + 0.025702374, + 0.00027104435, + 0.053500094, + 0.013771332, + 0.0070968494, + 0.023770446, + 0.00059177354, + -0.018327447, + 0.018148914, + -0.05300124, + 0.011663108, + 0.0041946596, + 0.029597592, + -0.04498819, + -0.025770606, + -0.016552178, + 0.03649973, + -0.0026113144, + -0.029800741, + -0.0051037255, + -0.037785955, + -0.004011672, + 0.008388314, + -0.07386487, + 0.027827373, + -0.017644234, + 0.040156875, + 0.012558772, + -0.018537657, + 0.027227359, + 0.017754553, + -0.0023514442, + -0.00019146742, + 0.026330378, + 0.0048990417, + 0.001801477, + -0.021129632, + -0.019040564, + -0.00676009, + -0.01630914, + 0.03731455, + 0.03451654, + -0.011519037, + 0.034547996, + -0.013021845, + 0.06529378, + -0.0027941195, + -0.029327707, + -0.0015205761, + -0.00030807866, + 0.044125356, + -0.050125554, + -0.021474928, + -0.036387537, + 0.027332405, + -0.036275722, + -0.014284269, + -0.044650678, + -0.04752489, + -0.05118064, + -0.027629055, + -0.00840178, + 0.006526065, + 0.006029119, + 0.0515348, + 0.042522874, + 0.04250874, + -0.036549613, + 0.0040809833, + 0.007222438, + 0.0006154704, + -0.0011862804, + -0.049986668, + -0.012207448, + -0.012311223, + 0.0579436, + 0.017119106, + 0.044702828, + 0.018378116, + -0.042975478, + 0.011482488, + 0.03338398, + 0.029627593, + -0.003702722, + 0.013707621, + 0.0722397, + -0.04825861, + 0.002595163, + 0.05626591, + -0.05538993, + -0.014593107, + -0.030664815, + -0.0024281342, + 0.014381013, + 0.034984194, + 0.03836505, + -0.015559976, + -0.0178548, + 0.008508637, + -0.0420243, + 0.06886319, + 0.043678295, + -0.06081712, + -0.013053798, + -0.0144745, + 0.010727334, + -0.010015514, + 0.012619592, + 0.028617078, + 0.07104944, + 0.04651159, + -0.017558781, + -0.01964458, + -0.05832408, + -0.004396149, + -0.0094662085, + 2.9252704e-05, + 0.013188893, + 0.02073814, + 0.02572297, + -0.051345292, + -0.021314379, + 0.022341024, + 0.0504455, + -0.020129923, + -0.039247088, + 0.024191115, + 0.05492846, + -0.002607161, + 0.014393751, + -0.024947925, + 0.024203802, + 0.0459654, + -0.053469725, + 0.032838285, + -0.042045336, + -0.015527379, + 0.0037779824, + 0.011406948, + 0.025210217, + -0.004243978, + 0.04079417, + -0.07904523, + -0.017795421, + -0.030726308, + 0.004771128, + 0.04036818, + 0.009931332, + 0.049275525, + 0.0102964565, + 0.03184801, + 0.008870301, + 0.01113772, + -0.004711555, + 0.0020588748, + -0.02930364, + 0.022294488, + 0.04850413, + 0.004948362, + 0.033168487, + 0.03783192, + 0.008523242, + -0.038963992, + 0.010168049, + 0.0203781, + 0.0756254, + 0.028456664, + 0.024748417, + -0.11577714, + 0.0008548415, + -0.04344077, + 0.010738063, + 0.05030685, + 0.009963248, + 0.024150217, + -0.021010825, + 0.007167325, + -0.03658526, + 0.03546365, + -0.013390253, + -0.00047679353, + -0.012871292, + -0.017366923, + -0.02652982, + -0.10084066, + 0.045365952, + -0.011225272, + -0.04722176, + 0.015208917, + -0.005097921, + -0.053254534, + 0.047296874, + -0.006467315, + -0.028821256, + -0.011319134, + -0.017912796, + -0.027579976, + 0.0031363943, + -0.04184391, + -0.030255111, + 0.011568719, + -0.023129487, + 0.026739482, + -0.0010813978, + -0.03913729, + -0.070587024, + -0.012489462, + 0.014736244, + 0.05366716, + 0.012241483, + -0.049649883, + -0.023962388, + 0.02163842, + 0.032686006, + 0.03459904, + -0.026402587, + 0.0044370038, + -0.027385605, + 0.018681098, + 0.048191037, + 0.059637222, + -0.03564249, + -0.0019521543, + 0.0219619, + 0.010083207, + 0.026848417, + 0.00089960813, + 0.061644834, + -0.021003744, + 0.026093531, + 0.019745339, + -0.0146089345, + -0.015242125, + -0.023996552, + -0.028343257, + -0.009521382, + -0.029578319, + 0.14400594, + 0.015581283, + -0.034467764, + -0.006880407, + -0.009970346, + -0.025298554, + 0.03371621, + 0.014318882, + -0.019764632, + 0.029394012, + -0.027161736, + 0.05766742, + -0.013174107, + 0.01361745, + 0.0518315, + -0.020510731, + -0.038367324, + 0.0054897135, + 0.012048302, + 0.057837225, + 0.0002809129, + 0.01411825, + 0.005755715, + -0.013277922, + 0.040729128, + -0.060171172, + -0.045627464, + 0.09807252, + -0.024581103, + -0.019699901, + 0.006539341, + -0.0028708335, + 0.005088123, + -0.01271195, + -0.007571297, + 0.007648347, + 0.023475781, + -0.045742624, + -0.045924474, + 0.028220603, + -0.025765365, + 0.03592354, + -0.018265394, + 0.04365975, + -0.028916795, + 0.03883419, + -0.004361406, + 0.005958756, + -0.031304177, + -0.0055619157, + -0.043269638, + -0.0023650515, + 0.007091223, + -0.016107671, + -0.0366844, + 0.007879869, + 0.03495698, + 0.0249394, + 0.0061501376, + -0.023060488, + -0.03603689, + 0.014991053, + -0.08503254, + -0.047079965, + -0.030019848, + -0.04917001, + 0.0053022155, + 0.04246746, + 0.015400905, + 0.042199153, + -0.03104176, + 0.0063246605, + 0.013934042, + -0.03693995, + 0.014990398, + 0.045937918, + -0.008848052, + 0.012130271, + 0.012243711, + -0.020704841, + -0.0042310995, + -0.0041251397, + -0.013541171, + 0.031493492, + -0.018749801, + 0.0030738483, + 0.04378173, + -0.038163994, + -0.008642531, + -0.0305042, + -0.04021257, + -0.018450813, + -0.03135143, + 0.013296257, + 0.025800386, + -0.05494155, + -0.012517254, + -0.0090649035, + -0.017260345, + 0.05878396, + 0.013410502, + -0.043225475, + 0.0002207434, + -0.0111124255, + -0.06332898, + 0.006332248, + -0.035152115, + -0.013596385, + -0.03988788, + -0.0017467305, + -0.047944624, + 4.7393946e-06, + -0.023586897, + 0.00044445967, + -0.03773364, + 0.032983948, + -0.027387967, + 0.014769233, + 0.029572468, + 0.018302204, + -0.01802371, + -0.04651166, + 0.018814433, + 0.019259652, + 0.00054817594, + 0.011449949, + -0.045078974, + 0.0006457672, + -0.053020664, + -0.0231668, + 0.014171299, + 0.006371779, + 0.022455387, + -0.0058859503, + -0.016131831, + 0.063288294, + -0.041467346, + 0.016419899, + 0.0449162, + 0.022371383, + 0.030934192, + 0.01958713, + 0.0034458376, + 0.007896594, + -0.041903246, + -0.07885942, + -0.0062535186, + 0.037036378, + -0.015698483, + 0.0031851658, + 0.03698736, + -0.0034287323, + 0.057788305, + -0.004490319, + -0.016333936, + -0.01616403, + -0.018075457, + 0.038575064, + -0.04125684, + 0.020682124, + 0.059820678, + 0.03583978, + 0.04042488, + -0.010756013, + -0.010794641, + 0.015102441, + 0.010976761, + -0.029726021, + 0.028498048, + 0.0075484235, + -0.064335965, + 0.056632347, + -0.029801186, + -0.027019715, + -0.036960963, + 0.012310944, + -0.042235516, + -0.001544881, + -0.014797979, + 0.052466325, + -0.00024286266, + -0.03754242, + -0.015421819, + 0.003534513, + 0.06266017, + 0.0046598907, + 0.0014978345, + -0.06921345, + -0.08720752, + -0.07460715, + 0.018168034, + -0.010298518, + 0.035470948, + 0.027449265, + 0.059473775, + 0.047745705, + 0.023954853, + -0.07465851, + -0.0008280701, + 0.013957919, + -0.015527039, + 0.06325239, + 0.03698926, + 0.03978882, + -0.025689382, + 0.10221269, + 0.08092678, + -0.0019784777, + -0.0030553392, + 0.042616755, + 0.008439228, + 0.025174139, + -0.013808177, + -0.027050078, + -0.03330378, + -0.013690383, + 0.031109717, + -0.01655102, + 0.042509243, + 0.025645396, + 0.01402567, + -0.042015504, + -0.049581204, + 0.023375591, + -0.078371555, + 0.07512955, + 0.027381487, + 0.00063200365, + -0.0029287962, + 0.04701604, + 0.02639058, + 0.011139746, + 0.04040883, + -0.0071441066, + -0.0056353174, + -0.074339435, + -0.026178142, + 0.08239294, + -0.0037761934, + 0.0183341, + -0.025514184, + -0.019294523, + -0.031538356, + 0.056522004, + -0.026346192, + -0.02721649, + -0.011004155, + 0.0014263233, + -0.04426181, + 0.011661826, + -0.050124433, + 0.02323837, + -0.040722184, + 0.010695218, + 0.07903897, + -0.033937648, + 0.05980606, + 0.02400962, + 0.032865368, + -0.011959509, + -0.0031907223, + 0.0064875074, + 0.00028192427, + -0.034210965, + -0.012334535, + 0.0370763, + 0.03755404, + 0.014202811, + 0.06844249, + 0.047826856, + 0.024290472, + -0.03599299, + -0.034226857, + -0.010420723, + 0.009456614, + 0.03894145, + -0.007944157, + -0.013756447, + -0.00028296094, + -0.04642981, + -0.060828708, + 0.02868708, + 0.009584524, + 0.013988791, + -0.021147093, + 0.024150442, + -0.0026663612, + -0.044277743, + 0.03254617, + -0.013576191, + -0.008511846, + 0.0019493122, + -0.027675934, + -0.015192746, + 0.008880871, + -0.043167602, + 0.02659629, + -0.020771017, + -0.012428427, + 0.0021467921, + -0.009742878, + 0.002719498, + 0.057403937, + -0.00014457622, + -0.027382646, + 0.005770138, + -0.05894638, + -0.0128830215, + 0.04935907, + 0.0014768047, + 0.0110171735, + 0.00015632634, + 0.058845997, + 0.11715432, + 0.006725901, + 0.016365116, + 0.015296825, + 0.009938535, + 0.0054548862, + 0.00079685776, + -0.07801037, + -0.03931397, + -0.038229417 + ], + "index": 0, + "object": "embedding" + } + ], + "model": "nomic-embed-text:137m-v1.5-fp16", + "object": "list", + "usage": { + "prompt_tokens": 2, + "total_tokens": 2 + } + } + }, + "is_streaming": false + } +} diff --git a/tests/integration/recordings/responses/ed9e9b34008d.json b/tests/integration/recordings/responses/ed9e9b34008d.json deleted file mode 100644 index d0591dbc1..000000000 --- a/tests/integration/recordings/responses/ed9e9b34008d.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "request": { - "method": "POST", - "url": "http://localhost:11434/api/generate", - "headers": {}, - "body": { - "model": "llama3.2:3b-instruct-fp16", - "raw": true, - "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhat is the largest planet in our solar system?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", - "options": { - "temperature": 0.0 - }, - "stream": false - }, - "endpoint": "/api/generate", - "model": "llama3.2:3b-instruct-fp16" - }, - "response": { - "body": { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:39:48.030217Z", - "done": true, - "done_reason": "stop", - "total_duration": 9760536750, - "load_duration": 242188583, - "prompt_eval_count": 26, - "prompt_eval_duration": 83819333, - "eval_count": 232, - "eval_duration": 9434009042, - "response": "The largest planet in our solar system is Jupiter. It is a gas giant, meaning it is primarily composed of hydrogen and helium gases. Jupiter has a diameter of approximately 142,984 kilometers (88,846 miles), which is more than 11 times the diameter of Earth.\n\nJupiter is not only the largest planet in terms of size, but also the most massive planet in our solar system, with a mass that is more than 318 times that of Earth. It has a thick atmosphere and a strong magnetic field, and is known for its distinctive banded appearance, which is caused by strong winds in the upper atmosphere.\n\nJupiter's massive size and gravitational pull have a significant impact on the surrounding space, including the orbits of nearby planets and asteroids. Its moons are also notable, with four large ones: Io, Europa, Ganymede, and Callisto, which are known as the Galilean moons due to their discovery by Galileo Galilei in 1610.\n\nJupiter is a fascinating planet that continues to be studied by astronomers and space agencies around the world, offering insights into the formation and evolution of our solar system.", - "thinking": null, - "context": null - } - }, - "is_streaming": false - } -} diff --git a/tests/integration/recordings/responses/eee47930e3ae.json b/tests/integration/recordings/responses/eee47930e3ae.json index 283416a09..086ce18f8 100644 --- a/tests/integration/recordings/responses/eee47930e3ae.json +++ b/tests/integration/recordings/responses/eee47930e3ae.json @@ -22,7 +22,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:04.631107Z", + "created_at": "2025-10-01T01:36:34.037711241Z", "done": false, "done_reason": null, "total_duration": null, @@ -40,7 +40,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:04.673105Z", + "created_at": "2025-10-01T01:36:34.234670218Z", "done": false, "done_reason": null, "total_duration": null, @@ -58,7 +58,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:04.714459Z", + "created_at": "2025-10-01T01:36:34.430073402Z", "done": false, "done_reason": null, "total_duration": null, @@ -76,7 +76,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:04.755882Z", + "created_at": "2025-10-01T01:36:34.629562851Z", "done": false, "done_reason": null, "total_duration": null, @@ -94,7 +94,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:04.797494Z", + "created_at": "2025-10-01T01:36:34.828769603Z", "done": false, "done_reason": null, "total_duration": null, @@ -112,7 +112,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:04.839382Z", + "created_at": "2025-10-01T01:36:35.027101431Z", "done": false, "done_reason": null, "total_duration": null, @@ -130,7 +130,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:04.881062Z", + "created_at": "2025-10-01T01:36:35.228873906Z", "done": false, "done_reason": null, "total_duration": null, @@ -148,7 +148,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:04.921976Z", + "created_at": "2025-10-01T01:36:35.429147653Z", "done": false, "done_reason": null, "total_duration": null, @@ -166,7 +166,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:04.962922Z", + "created_at": "2025-10-01T01:36:35.626756664Z", "done": false, "done_reason": null, "total_duration": null, @@ -184,7 +184,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:05.00411Z", + "created_at": "2025-10-01T01:36:35.822847752Z", "done": false, "done_reason": null, "total_duration": null, @@ -202,7 +202,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:05.04532Z", + "created_at": "2025-10-01T01:36:36.021190515Z", "done": false, "done_reason": null, "total_duration": null, @@ -220,7 +220,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:05.086979Z", + "created_at": "2025-10-01T01:36:36.228035317Z", "done": false, "done_reason": null, "total_duration": null, @@ -238,7 +238,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:05.128195Z", + "created_at": "2025-10-01T01:36:36.424413535Z", "done": false, "done_reason": null, "total_duration": null, @@ -256,7 +256,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:05.169221Z", + "created_at": "2025-10-01T01:36:36.62756048Z", "done": false, "done_reason": null, "total_duration": null, @@ -274,7 +274,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:05.210938Z", + "created_at": "2025-10-01T01:36:36.828422414Z", "done": false, "done_reason": null, "total_duration": null, @@ -292,7 +292,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:05.252232Z", + "created_at": "2025-10-01T01:36:37.033389762Z", "done": false, "done_reason": null, "total_duration": null, @@ -310,7 +310,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:05.293529Z", + "created_at": "2025-10-01T01:36:37.239556153Z", "done": false, "done_reason": null, "total_duration": null, @@ -328,7 +328,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:05.334965Z", + "created_at": "2025-10-01T01:36:37.448526412Z", "done": false, "done_reason": null, "total_duration": null, @@ -346,15 +346,15 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:38:05.376741Z", + "created_at": "2025-10-01T01:36:37.648660737Z", "done": true, "done_reason": "stop", - "total_duration": 936717042, - "load_duration": 109245542, + "total_duration": 6101960547, + "load_duration": 42550477, "prompt_eval_count": 371, - "prompt_eval_duration": 80430583, + "prompt_eval_duration": 2446898261, "eval_count": 19, - "eval_duration": 746422917, + "eval_duration": 3611916940, "response": "", "thinking": null, "context": null diff --git a/tests/integration/recordings/responses/ef757a75ed08.json b/tests/integration/recordings/responses/ef757a75ed08.json deleted file mode 100644 index 05860c4bb..000000000 --- a/tests/integration/recordings/responses/ef757a75ed08.json +++ /dev/null @@ -1,185 +0,0 @@ -{ - "request": { - "method": "POST", - "url": "http://localhost:11434/api/generate", - "headers": {}, - "body": { - "model": "llama3.2:3b-instruct-fp16", - "raw": true, - "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant. You have access to functions, but you should only use them if they are required.\nYou are an expert in composing functions. You are given a question and a set of possible functions.\nBased on the question, you may or may not need to make one function/tool call to achieve the purpose.\n\nIf you decide to invoke any of the function(s), you MUST put it in the format of [func_name1(params_name1=params_value1, params_name2=params_value2...), func_name2(params)]\nIf you decide to invoke a function, you SHOULD NOT include any other text in the response. besides the function call in the above format.\nFor a boolean parameter, be sure to use `True` or `False` (capitalized) for the value.\n\n\nHere is a list of functions in JSON format that you can invoke.\n\n[\n {\n \"name\": \"greet_everyone\",\n \"description\": \"\",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"url\"],\n \"properties\": {\n \"url\": {\n \"type\": \"string\",\n \"description\": \"\"\n }\n }\n }\n },\n {\n \"name\": \"get_boiling_point\",\n \"description\": \"\n Returns the boiling point of a liquid in Celsius or Fahrenheit.\n\n :param liquid_name: The name of the liquid\n :param celsius: Whether to return the boiling point in Celsius\n :return: The boiling point of the liquid in Celcius or Fahrenheit\n \",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"liquid_name\", \"celsius\"],\n \"properties\": {\n \"liquid_name\": {\n \"type\": \"string\",\n \"description\": \"\"\n },\n \"celsius\": {\n \"type\": \"boolean\",\n \"description\": \"\"\n }\n }\n }\n }\n]\n\nYou can answer general questions or invoke tools when necessary.\nIn addition to tool calls, you should also augment your responses by using the tool outputs.\nYou are a helpful assistant.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nSay hi to the world. Use tools to do so.<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", - "options": { - "temperature": 0.0 - }, - "stream": true - }, - "endpoint": "/api/generate", - "model": "llama3.2:3b-instruct-fp16" - }, - "response": { - "body": [ - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:34:22.272912Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "[g", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:34:22.31501Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "reet", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:34:22.356888Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "_every", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:34:22.398576Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "one", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:34:22.440412Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "(url", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:34:22.482165Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "=\"", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:34:22.523773Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "world", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:34:22.565072Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "\")]", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:34:22.607117Z", - "done": true, - "done_reason": "stop", - "total_duration": 1386049708, - "load_duration": 96970583, - "prompt_eval_count": 456, - "prompt_eval_duration": 952471625, - "eval_count": 9, - "eval_duration": 335924459, - "response": "", - "thinking": null, - "context": null - } - } - ], - "is_streaming": true - } -} diff --git a/tests/integration/recordings/responses/f3c3afbd9b7e.json b/tests/integration/recordings/responses/f3c3afbd9b7e.json deleted file mode 100644 index a5aecf06f..000000000 --- a/tests/integration/recordings/responses/f3c3afbd9b7e.json +++ /dev/null @@ -1,59 +0,0 @@ -{ - "request": { - "method": "POST", - "url": "http://localhost:11434/api/generate", - "headers": {}, - "body": { - "model": "llama3.2:1b", - "raw": true, - "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant. You have access to functions, but you should only use them if they are required.\nYou are an expert in composing functions. You are given a question and a set of possible functions.\nBased on the question, you may or may not need to make one function/tool call to achieve the purpose.\n\nIf you decide to invoke any of the function(s), you MUST put it in the format of [func_name1(params_name1=params_value1, params_name2=params_value2...), func_name2(params)]\nIf you decide to invoke a function, you SHOULD NOT include any other text in the response. besides the function call in the above format.\nFor a boolean parameter, be sure to use `True` or `False` (capitalized) for the value.\n\n\nHere is a list of functions in JSON format that you can invoke.\n\n[\n {\n \"name\": \"greet_everyone\",\n \"description\": \"\",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"url\"],\n \"properties\": {\n \"url\": {\n \"type\": \"string\",\n \"description\": \"\"\n }\n }\n }\n },\n {\n \"name\": \"get_boiling_point\",\n \"description\": \"\nReturns the boiling point of a liquid in Celsius or Fahrenheit.\n\n:param liquid_name: The name of the liquid\n:param celsius: Whether to return the boiling point in Celsius\n:return: The boiling point of the liquid in Celcius or Fahrenheit\n\",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"liquid_name\", \"celsius\"],\n \"properties\": {\n \"liquid_name\": {\n \"type\": \"string\",\n \"description\": \"\"\n },\n \"celsius\": {\n \"type\": \"boolean\",\n \"description\": \"\"\n }\n }\n }\n }\n]\n\nYou can answer general questions or invoke tools when necessary.\nIn addition to tool calls, you should also augment your responses by using the tool outputs.\nYou are a helpful assistant.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nSay hi to the world. Use tools to do so.<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", - "options": { - "temperature": 0.0 - }, - "stream": true - }, - "endpoint": "/api/generate", - "model": "llama3.2:1b" - }, - "response": { - "body": [ - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:1b", - "created_at": "2025-07-29T23:23:09.553247Z", - "done": false, - "done_reason": null, - "total_duration": null, - "load_duration": null, - "prompt_eval_count": null, - "prompt_eval_duration": null, - "eval_count": null, - "eval_duration": null, - "response": "Hi", - "thinking": null, - "context": null - } - }, - { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:1b", - "created_at": "2025-07-29T23:23:09.564069Z", - "done": true, - "done_reason": "stop", - "total_duration": 2125493250, - "load_duration": 1610279708, - "prompt_eval_count": 448, - "prompt_eval_duration": 502413125, - "eval_count": 2, - "eval_duration": 11573709, - "response": "", - "thinking": null, - "context": null - } - } - ], - "is_streaming": true - } -} diff --git a/tests/integration/recordings/responses/f477c2fe1332.json b/tests/integration/recordings/responses/f477c2fe1332.json index d3c8e7176..bd5488354 100644 --- a/tests/integration/recordings/responses/f477c2fe1332.json +++ b/tests/integration/recordings/responses/f477c2fe1332.json @@ -22,7 +22,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:31.583665Z", + "created_at": "2025-10-01T01:38:14.816773611Z", "done": false, "done_reason": null, "total_duration": null, @@ -40,7 +40,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:31.625653Z", + "created_at": "2025-10-01T01:38:15.015836301Z", "done": false, "done_reason": null, "total_duration": null, @@ -58,7 +58,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:31.667189Z", + "created_at": "2025-10-01T01:38:15.213696526Z", "done": false, "done_reason": null, "total_duration": null, @@ -76,7 +76,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:31.708905Z", + "created_at": "2025-10-01T01:38:15.414929406Z", "done": false, "done_reason": null, "total_duration": null, @@ -94,7 +94,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:31.751003Z", + "created_at": "2025-10-01T01:38:15.611961584Z", "done": false, "done_reason": null, "total_duration": null, @@ -112,7 +112,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:31.792516Z", + "created_at": "2025-10-01T01:38:15.810925669Z", "done": false, "done_reason": null, "total_duration": null, @@ -130,7 +130,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:31.834194Z", + "created_at": "2025-10-01T01:38:16.024560322Z", "done": false, "done_reason": null, "total_duration": null, @@ -148,7 +148,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:31.878321Z", + "created_at": "2025-10-01T01:38:16.221109927Z", "done": false, "done_reason": null, "total_duration": null, @@ -166,7 +166,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:31.921552Z", + "created_at": "2025-10-01T01:38:16.417436307Z", "done": false, "done_reason": null, "total_duration": null, @@ -184,7 +184,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:31.963105Z", + "created_at": "2025-10-01T01:38:16.617952673Z", "done": false, "done_reason": null, "total_duration": null, @@ -202,7 +202,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:32.005494Z", + "created_at": "2025-10-01T01:38:16.813239478Z", "done": false, "done_reason": null, "total_duration": null, @@ -220,7 +220,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:32.047231Z", + "created_at": "2025-10-01T01:38:17.014012745Z", "done": false, "done_reason": null, "total_duration": null, @@ -238,7 +238,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:32.089031Z", + "created_at": "2025-10-01T01:38:17.21415578Z", "done": false, "done_reason": null, "total_duration": null, @@ -256,7 +256,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:32.130704Z", + "created_at": "2025-10-01T01:38:17.411442027Z", "done": false, "done_reason": null, "total_duration": null, @@ -274,7 +274,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:32.172183Z", + "created_at": "2025-10-01T01:38:17.610203746Z", "done": false, "done_reason": null, "total_duration": null, @@ -292,7 +292,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:32.21392Z", + "created_at": "2025-10-01T01:38:17.806756435Z", "done": false, "done_reason": null, "total_duration": null, @@ -310,7 +310,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:32.255392Z", + "created_at": "2025-10-01T01:38:18.009202601Z", "done": false, "done_reason": null, "total_duration": null, @@ -328,7 +328,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:32.297249Z", + "created_at": "2025-10-01T01:38:18.204934978Z", "done": false, "done_reason": null, "total_duration": null, @@ -346,7 +346,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:32.341358Z", + "created_at": "2025-10-01T01:38:18.402371167Z", "done": false, "done_reason": null, "total_duration": null, @@ -364,7 +364,7 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:32.384155Z", + "created_at": "2025-10-01T01:38:18.598001673Z", "done": false, "done_reason": null, "total_duration": null, @@ -382,15 +382,15 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama3.2:3b-instruct-fp16", - "created_at": "2025-09-03T17:42:32.426441Z", + "created_at": "2025-10-01T01:38:18.795317047Z", "done": true, "done_reason": "stop", - "total_duration": 1659557917, - "load_duration": 75341875, + "total_duration": 36201749155, + "load_duration": 41187586, "prompt_eval_count": 375, - "prompt_eval_duration": 740178250, + "prompt_eval_duration": 32180468680, "eval_count": 21, - "eval_duration": 843394541, + "eval_duration": 3979448369, "response": "", "thinking": null, "context": null diff --git a/tests/integration/recordings/responses/f6857bcea729.json b/tests/integration/recordings/responses/f6857bcea729.json deleted file mode 100644 index 404bfb987..000000000 --- a/tests/integration/recordings/responses/f6857bcea729.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "request": { - "method": "POST", - "url": "http://localhost:11434/api/generate", - "headers": {}, - "body": { - "model": "llama3.2:3b", - "raw": true, - "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nTest metrics generation 2<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", - "options": { - "temperature": 0.0 - }, - "stream": false - }, - "endpoint": "/api/generate", - "model": "llama3.2:3b" - }, - "response": { - "body": { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b", - "created_at": "2025-08-11T15:56:13.082679Z", - "done": true, - "done_reason": "stop", - "total_duration": 2606245291, - "load_duration": 9979708, - "prompt_eval_count": 21, - "prompt_eval_duration": 23000000, - "eval_count": 321, - "eval_duration": 2572000000, - "response": "Here are some test metrics that can be used to evaluate the performance of a system:\n\n1. **Accuracy**: Measures how close the predicted values are to the actual values.\n2. **Precision**: Measures the proportion of true positives among all positive predictions made by the model.\n3. **Recall**: Measures the proportion of true positives among all actual positive instances.\n4. **F1-score**: The harmonic mean of precision and recall, providing a balanced measure of both.\n5. **Mean Squared Error (MSE)**: Measures the average squared difference between predicted and actual values.\n6. **Mean Absolute Error (MAE)**: Measures the average absolute difference between predicted and actual values.\n7. **Root Mean Squared Percentage Error (RMSPE)**: A variation of MSE that expresses errors as a percentage of the actual value.\n8. **Coefficient of Determination (R-squared, R2)**: Measures how well the model explains the variance in the data.\n9. **Mean Absolute Percentage Error (MAPE)**: Measures the average absolute percentage difference between predicted and actual values.\n10. **Mean Squared Logarithmic Error (MSLE)**: A variation of MSE that is more suitable for skewed distributions.\n\nThese metrics can be used to evaluate different aspects of a system's performance, such as:\n\n* Classification models: accuracy, precision, recall, F1-score\n* Regression models: MSE, MAE, RMSPE, R2\n* Time series forecasting: MAPE, MSLE\n\nNote that the choice of metric depends on the specific problem and data.", - "thinking": null, - "context": null - } - }, - "is_streaming": false - } -} diff --git a/tests/integration/recordings/responses/f80b99430f7e.json b/tests/integration/recordings/responses/f80b99430f7e.json deleted file mode 100644 index 5b692f4ca..000000000 --- a/tests/integration/recordings/responses/f80b99430f7e.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "request": { - "method": "POST", - "url": "http://localhost:11434/api/generate", - "headers": {}, - "body": { - "model": "llama3.2:3b", - "raw": true, - "prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nTest metrics generation 1<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", - "options": { - "temperature": 0.0 - }, - "stream": false - }, - "endpoint": "/api/generate", - "model": "llama3.2:3b" - }, - "response": { - "body": { - "__type__": "ollama._types.GenerateResponse", - "__data__": { - "model": "llama3.2:3b", - "created_at": "2025-08-11T15:56:10.465932Z", - "done": true, - "done_reason": "stop", - "total_duration": 3745686709, - "load_duration": 9734584, - "prompt_eval_count": 21, - "prompt_eval_duration": 23000000, - "eval_count": 457, - "eval_duration": 3712000000, - "response": "Here are some test metrics that can be used to evaluate the performance of a system:\n\n**Primary Metrics**\n\n1. **Response Time**: The time it takes for the system to respond to a request.\n2. **Throughput**: The number of requests processed by the system per unit time (e.g., requests per second).\n3. **Error Rate**: The percentage of requests that result in an error.\n\n**Secondary Metrics**\n\n1. **Average Response Time**: The average response time for all requests.\n2. **Median Response Time**: The middle value of the response times, used to detect outliers.\n3. **99th Percentile Response Time**: The response time at which 99% of requests are completed within this time.\n4. **Request Latency**: The difference between the request arrival time and the response time.\n\n**User Experience Metrics**\n\n1. **User Satisfaction (USAT)**: Measured through surveys or feedback forms to gauge user satisfaction with the system's performance.\n2. **First Response Time**: The time it takes for a user to receive their first response from the system.\n3. **Time Spent in System**: The total amount of time a user spends interacting with the system.\n\n**System Resource Metrics**\n\n1. **CPU Utilization**: The percentage of CPU resources being used by the system.\n2. **Memory Usage**: The amount of memory being used by the system.\n3. **Disk I/O Wait Time**: The average time spent waiting for disk I/O operations to complete.\n\n**Security Metrics**\n\n1. **Authentication Success Rate**: The percentage of successful authentication attempts.\n2. **Authorization Success Rate**: The percentage of successful authorization attempts.\n3. **Error Rate (Security)**: The percentage of security-related errors.\n\n**Other Metrics**\n\n1. **Page Load Time**: The time it takes for a page to load.\n2. **Click-Through Rate (CTR)**: The percentage of users who click on a link or button after seeing an ad or notification.\n3. **Conversion Rate**: The percentage of users who complete a desired action (e.g., fill out a form, make a purchase).\n\nThese metrics can be used to evaluate the performance and effectiveness of various aspects of your system, from user experience to security and resource utilization.", - "thinking": null, - "context": null - } - }, - "is_streaming": false - } -} diff --git a/tests/integration/recordings/responses/fcdef245da95.json b/tests/integration/recordings/responses/fcdef245da95.json index d2801b9c6..0246d3481 100644 --- a/tests/integration/recordings/responses/fcdef245da95.json +++ b/tests/integration/recordings/responses/fcdef245da95.json @@ -20,15 +20,15 @@ "__type__": "ollama._types.GenerateResponse", "__data__": { "model": "llama-guard3:1b", - "created_at": "2025-09-03T17:37:44.986629Z", + "created_at": "2025-09-30T17:37:16.577132681Z", "done": true, "done_reason": "stop", - "total_duration": 285693167, - "load_duration": 110888542, + "total_duration": 4644975499, + "load_duration": 1639168216, "prompt_eval_count": 212, - "prompt_eval_duration": 163158250, + "prompt_eval_duration": 2946622894, "eval_count": 2, - "eval_duration": 11080125, + "eval_duration": 58451208, "response": "safe", "thinking": null, "context": null diff --git a/tests/integration/recordings/responses/models-bd032f995f2a-abd54ea0.json b/tests/integration/recordings/responses/models-bd032f995f2a-abd54ea0.json new file mode 100644 index 000000000..ad363fa2f --- /dev/null +++ b/tests/integration/recordings/responses/models-bd032f995f2a-abd54ea0.json @@ -0,0 +1,42 @@ +{ + "request": { + "method": "POST", + "url": "http://0.0.0.0:11434/v1/v1/models", + "headers": {}, + "body": {}, + "endpoint": "/v1/models", + "model": "" + }, + "response": { + "body": [ + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "llama-guard3:1b", + "created": 1753937098, + "object": "model", + "owned_by": "library" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "all-minilm:l6-v2", + "created": 1753936935, + "object": "model", + "owned_by": "library" + } + }, + { + "__type__": "openai.types.model.Model", + "__data__": { + "id": "llama3.2:3b-instruct-fp16", + "created": 1753936925, + "object": "model", + "owned_by": "library" + } + } + ], + "is_streaming": false + } +} diff --git a/tests/unit/providers/agents/meta_reference/test_openai_responses.py b/tests/unit/providers/agents/meta_reference/test_openai_responses.py index 5e5914a03..5ddc1bda8 100644 --- a/tests/unit/providers/agents/meta_reference/test_openai_responses.py +++ b/tests/unit/providers/agents/meta_reference/test_openai_responses.py @@ -327,6 +327,132 @@ async def test_create_openai_response_with_tool_call_type_none(openai_responses_ assert chunks[5].response.output[0].name == "get_weather" +async def test_create_openai_response_with_tool_call_function_arguments_none(openai_responses_impl, mock_inference_api): + """Test creating an OpenAI response with a tool call response that has a function that does not accept arguments, or arguments set to None when they are not mandatory.""" + # Setup + input_text = "What is the time right now?" + model = "meta-llama/Llama-3.1-8B-Instruct" + + async def fake_stream_toolcall(): + yield ChatCompletionChunk( + id="123", + choices=[ + Choice( + index=0, + delta=ChoiceDelta( + tool_calls=[ + ChoiceDeltaToolCall( + index=0, + id="tc_123", + function=ChoiceDeltaToolCallFunction(name="get_current_time", arguments=None), + type=None, + ) + ] + ), + ), + ], + created=1, + model=model, + object="chat.completion.chunk", + ) + + mock_inference_api.openai_chat_completion.return_value = fake_stream_toolcall() + + # Function does not accept arguments + result = await openai_responses_impl.create_openai_response( + input=input_text, + model=model, + stream=True, + temperature=0.1, + tools=[ + OpenAIResponseInputToolFunction( + name="get_current_time", + description="Get current time for system's timezone", + parameters={}, + ) + ], + ) + + # Check that we got the content from our mocked tool execution result + chunks = [chunk async for chunk in result] + + # Verify event types + # Should have: response.created, output_item.added, function_call_arguments.delta, + # function_call_arguments.done, output_item.done, response.completed + assert len(chunks) == 5 + + # Verify inference API was called correctly (after iterating over result) + first_call = mock_inference_api.openai_chat_completion.call_args_list[0] + assert first_call.kwargs["messages"][0].content == input_text + assert first_call.kwargs["tools"] is not None + assert first_call.kwargs["temperature"] == 0.1 + + # Check response.created event (should have empty output) + assert chunks[0].type == "response.created" + assert len(chunks[0].response.output) == 0 + + # Check streaming events + assert chunks[1].type == "response.output_item.added" + assert chunks[2].type == "response.function_call_arguments.done" + assert chunks[3].type == "response.output_item.done" + + # Check response.completed event (should have the tool call with arguments set to "{}") + assert chunks[4].type == "response.completed" + assert len(chunks[4].response.output) == 1 + assert chunks[4].response.output[0].type == "function_call" + assert chunks[4].response.output[0].name == "get_current_time" + assert chunks[4].response.output[0].arguments == "{}" + + mock_inference_api.openai_chat_completion.return_value = fake_stream_toolcall() + + # Function accepts optional arguments + result = await openai_responses_impl.create_openai_response( + input=input_text, + model=model, + stream=True, + temperature=0.1, + tools=[ + OpenAIResponseInputToolFunction( + name="get_current_time", + description="Get current time for system's timezone", + parameters={ + "timezone": "string", + }, + ) + ], + ) + + # Check that we got the content from our mocked tool execution result + chunks = [chunk async for chunk in result] + + # Verify event types + # Should have: response.created, output_item.added, function_call_arguments.delta, + # function_call_arguments.done, output_item.done, response.completed + assert len(chunks) == 5 + + # Verify inference API was called correctly (after iterating over result) + first_call = mock_inference_api.openai_chat_completion.call_args_list[0] + assert first_call.kwargs["messages"][0].content == input_text + assert first_call.kwargs["tools"] is not None + assert first_call.kwargs["temperature"] == 0.1 + + # Check response.created event (should have empty output) + assert chunks[0].type == "response.created" + assert len(chunks[0].response.output) == 0 + + # Check streaming events + assert chunks[1].type == "response.output_item.added" + assert chunks[2].type == "response.function_call_arguments.done" + assert chunks[3].type == "response.output_item.done" + + # Check response.completed event (should have the tool call with arguments set to "{}") + assert chunks[4].type == "response.completed" + assert len(chunks[4].response.output) == 1 + assert chunks[4].response.output[0].type == "function_call" + assert chunks[4].response.output[0].name == "get_current_time" + assert chunks[4].response.output[0].arguments == "{}" + + async def test_create_openai_response_with_multiple_messages(openai_responses_impl, mock_inference_api): """Test creating an OpenAI response with multiple messages.""" # Setup diff --git a/tests/unit/providers/inference/test_openai_base_url_config.py b/tests/unit/providers/inference/test_openai_base_url_config.py index 903772f0c..7c5a5b327 100644 --- a/tests/unit/providers/inference/test_openai_base_url_config.py +++ b/tests/unit/providers/inference/test_openai_base_url_config.py @@ -19,6 +19,7 @@ class TestOpenAIBaseURLConfig: """Test that the adapter uses the default OpenAI base URL when no environment variable is set.""" config = OpenAIConfig(api_key="test-key") adapter = OpenAIInferenceAdapter(config) + adapter.provider_data_api_key_field = None # Disable provider data for this test assert adapter.get_base_url() == "https://api.openai.com/v1" @@ -27,6 +28,7 @@ class TestOpenAIBaseURLConfig: custom_url = "https://custom.openai.com/v1" config = OpenAIConfig(api_key="test-key", base_url=custom_url) adapter = OpenAIInferenceAdapter(config) + adapter.provider_data_api_key_field = None # Disable provider data for this test assert adapter.get_base_url() == custom_url @@ -38,6 +40,7 @@ class TestOpenAIBaseURLConfig: processed_config = replace_env_vars(config_data) config = OpenAIConfig.model_validate(processed_config) adapter = OpenAIInferenceAdapter(config) + adapter.provider_data_api_key_field = None # Disable provider data for this test assert adapter.get_base_url() == "https://env.openai.com/v1" @@ -47,6 +50,7 @@ class TestOpenAIBaseURLConfig: custom_url = "https://config.openai.com/v1" config = OpenAIConfig(api_key="test-key", base_url=custom_url) adapter = OpenAIInferenceAdapter(config) + adapter.provider_data_api_key_field = None # Disable provider data for this test # Config should take precedence over environment variable assert adapter.get_base_url() == custom_url @@ -57,6 +61,7 @@ class TestOpenAIBaseURLConfig: custom_url = "https://test.openai.com/v1" config = OpenAIConfig(api_key="test-key", base_url=custom_url) adapter = OpenAIInferenceAdapter(config) + adapter.provider_data_api_key_field = None # Disable provider data for this test # Mock the get_api_key method since it's delegated to LiteLLMOpenAIMixin adapter.get_api_key = MagicMock(return_value="test-key") @@ -76,6 +81,7 @@ class TestOpenAIBaseURLConfig: custom_url = "https://test.openai.com/v1" config = OpenAIConfig(api_key="test-key", base_url=custom_url) adapter = OpenAIInferenceAdapter(config) + adapter.provider_data_api_key_field = None # Disable provider data for this test # Mock the get_api_key method adapter.get_api_key = MagicMock(return_value="test-key") @@ -117,6 +123,7 @@ class TestOpenAIBaseURLConfig: processed_config = replace_env_vars(config_data) config = OpenAIConfig.model_validate(processed_config) adapter = OpenAIInferenceAdapter(config) + adapter.provider_data_api_key_field = None # Disable provider data for this test # Mock the get_api_key method adapter.get_api_key = MagicMock(return_value="test-key") diff --git a/tests/unit/providers/utils/inference/test_openai_mixin.py b/tests/unit/providers/utils/inference/test_openai_mixin.py index b55f206b9..8ef7ec81c 100644 --- a/tests/unit/providers/utils/inference/test_openai_mixin.py +++ b/tests/unit/providers/utils/inference/test_openai_mixin.py @@ -4,18 +4,20 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. -from unittest.mock import AsyncMock, MagicMock, PropertyMock, patch +import json +from unittest.mock import AsyncMock, MagicMock, Mock, PropertyMock, patch import pytest +from pydantic import BaseModel, Field from llama_stack.apis.inference import Model, OpenAIUserMessageParam from llama_stack.apis.models import ModelType +from llama_stack.core.request_headers import request_provider_data_context from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin class OpenAIMixinImpl(OpenAIMixin): - def __init__(self): - self.__provider_id__ = "test-provider" + __provider_id__: str = "test-provider" def get_api_key(self) -> str: raise NotImplementedError("This method should be mocked in tests") @@ -24,7 +26,7 @@ class OpenAIMixinImpl(OpenAIMixin): raise NotImplementedError("This method should be mocked in tests") -class OpenAIMixinWithEmbeddingsImpl(OpenAIMixin): +class OpenAIMixinWithEmbeddingsImpl(OpenAIMixinImpl): """Test implementation with embedding model metadata""" embedding_model_metadata = { @@ -32,14 +34,6 @@ class OpenAIMixinWithEmbeddingsImpl(OpenAIMixin): "text-embedding-ada-002": {"embedding_dimension": 1536, "context_length": 8192}, } - __provider_id__ = "test-provider" - - def get_api_key(self) -> str: - raise NotImplementedError("This method should be mocked in tests") - - def get_base_url(self) -> str: - raise NotImplementedError("This method should be mocked in tests") - @pytest.fixture def mixin(): @@ -366,3 +360,78 @@ class TestOpenAIMixinAllowedModels: assert await mixin.check_model_availability("final-mock-model-id") assert not await mixin.check_model_availability("some-mock-model-id") assert not await mixin.check_model_availability("another-mock-model-id") + + +class ProviderDataValidator(BaseModel): + """Validator for provider data in tests""" + + test_api_key: str | None = Field(default=None) + + +class OpenAIMixinWithProviderData(OpenAIMixinImpl): + """Test implementation that supports provider data API key field""" + + provider_data_api_key_field: str = "test_api_key" + + def get_api_key(self) -> str: + return "default-api-key" + + def get_base_url(self): + return "default-base-url" + + +class TestOpenAIMixinProviderDataApiKey: + """Test cases for provider_data_api_key_field functionality""" + + @pytest.fixture + def mixin_with_provider_data_field(self): + """Mixin instance with provider_data_api_key_field set""" + mixin_instance = OpenAIMixinWithProviderData() + + # Mock provider_spec for provider data validation + mock_provider_spec = MagicMock() + mock_provider_spec.provider_type = "test-provider-with-data" + mock_provider_spec.provider_data_validator = ( + "tests.unit.providers.utils.inference.test_openai_mixin.ProviderDataValidator" + ) + mixin_instance.__provider_spec__ = mock_provider_spec + + return mixin_instance + + @pytest.fixture + def mixin_with_provider_data_field_and_none_api_key(self, mixin_with_provider_data_field): + mixin_with_provider_data_field.get_api_key = Mock(return_value=None) + return mixin_with_provider_data_field + + def test_no_provider_data(self, mixin_with_provider_data_field): + """Test that client uses config API key when no provider data is available""" + assert mixin_with_provider_data_field.client.api_key == "default-api-key" + + def test_with_provider_data(self, mixin_with_provider_data_field): + """Test that provider data API key overrides config API key""" + with request_provider_data_context( + {"x-llamastack-provider-data": json.dumps({"test_api_key": "provider-data-key"})} + ): + assert mixin_with_provider_data_field.client.api_key == "provider-data-key" + + def test_with_wrong_key(self, mixin_with_provider_data_field): + """Test fallback to config when provider data doesn't have the required key""" + with request_provider_data_context({"x-llamastack-provider-data": json.dumps({"wrong_key": "some-value"})}): + assert mixin_with_provider_data_field.client.api_key == "default-api-key" + + def test_error_when_no_config_and_provider_data_has_wrong_key( + self, mixin_with_provider_data_field_and_none_api_key + ): + """Test that ValueError is raised when provider data exists but doesn't have required key""" + with request_provider_data_context({"x-llamastack-provider-data": json.dumps({"wrong_key": "some-value"})}): + with pytest.raises(ValueError, match="API key is not set"): + _ = mixin_with_provider_data_field_and_none_api_key.client + + def test_error_message_includes_correct_field_names(self, mixin_with_provider_data_field_and_none_api_key): + """Test that error message includes correct field name and header information""" + with pytest.raises(ValueError) as exc_info: + _ = mixin_with_provider_data_field_and_none_api_key.client + + error_message = str(exc_info.value) + assert "test_api_key" in error_message + assert "x-llamastack-provider-data" in error_message