Merge remote-tracking branch 'upstream/main' into runpod-adapter-fix

This commit is contained in:
Justin 2025-10-01 14:47:33 -07:00
commit 813ff44659
153 changed files with 63378 additions and 51988 deletions

View file

@ -1,6 +1,11 @@
# API Conformance Tests # API Conformance Tests
# This workflow ensures that API changes maintain backward compatibility and don't break existing integrations # This workflow ensures that API changes maintain backward compatibility and don't break existing integrations
# It runs schema validation and OpenAPI diff checks to catch breaking changes early # It runs schema validation and OpenAPI diff checks to catch breaking changes early
#
# The workflow handles both monolithic and split API specifications:
# - If split specs exist (stable/experimental/deprecated), they are stitched together for comparison
# - If only monolithic spec exists, it is used directly
# This allows for clean API organization while maintaining robust conformance testing
name: API Conformance Tests name: API Conformance Tests
@ -11,11 +16,14 @@ on:
branches: [ main ] branches: [ main ]
pull_request: pull_request:
branches: [ main ] branches: [ main ]
types: [opened, synchronize, reopened] types: [opened, synchronize, reopened, edited]
paths: paths:
- 'docs/static/llama-stack-spec.yaml' - 'docs/static/llama-stack-spec.yaml' # Legacy monolithic spec
- 'docs/static/llama-stack-spec.html' - 'docs/static/stable-llama-stack-spec.yaml' # Stable APIs spec
- '.github/workflows/conformance.yml' # This workflow itself - 'docs/static/experimental-llama-stack-spec.yaml' # Experimental APIs spec
- 'docs/static/deprecated-llama-stack-spec.yaml' # Deprecated APIs spec
- 'docs/static/llama-stack-spec.html' # Legacy HTML spec
- '.github/workflows/conformance.yml' # This workflow itself
concurrency: concurrency:
group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/main' && github.run_id || github.ref }} group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/main' && github.run_id || github.ref }}
@ -27,14 +35,31 @@ jobs:
check-schema-compatibility: check-schema-compatibility:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
# Using specific version 4.1.7 because 5.0.0 fails when trying to run this locally using `act`
# This ensures consistent behavior between local testing and CI
- name: Checkout PR Code - name: Checkout PR Code
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
fetch-depth: 0
# Check if we should skip conformance testing due to breaking changes
- name: Check if conformance test should be skipped
id: skip-check
run: |
PR_TITLE="${{ github.event.pull_request.title }}"
# Skip if title contains "!:" indicating breaking change (like "feat!:")
if [[ "$PR_TITLE" == *"!:"* ]]; then
echo "skip=true" >> $GITHUB_OUTPUT
exit 0
fi
# Get all commits in this PR and check for BREAKING CHANGE footer
git log --format="%B" ${{ github.event.pull_request.base.sha }}..${{ github.event.pull_request.head.sha }} | \
grep -q "BREAKING CHANGE:" && echo "skip=true" >> $GITHUB_OUTPUT || echo "skip=false" >> $GITHUB_OUTPUT
shell: bash
# Checkout the base branch to compare against (usually main) # Checkout the base branch to compare against (usually main)
# This allows us to diff the current changes against the previous state # This allows us to diff the current changes against the previous state
- name: Checkout Base Branch - name: Checkout Base Branch
if: steps.skip-check.outputs.skip != 'true'
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with: with:
ref: ${{ github.event.pull_request.base.ref }} ref: ${{ github.event.pull_request.base.ref }}
@ -42,6 +67,7 @@ jobs:
# Cache oasdiff to avoid checksum failures and speed up builds # Cache oasdiff to avoid checksum failures and speed up builds
- name: Cache oasdiff - name: Cache oasdiff
if: steps.skip-check.outputs.skip != 'true'
id: cache-oasdiff id: cache-oasdiff
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830
with: with:
@ -50,20 +76,68 @@ jobs:
# Install oasdiff: https://github.com/oasdiff/oasdiff, a tool for detecting breaking changes in OpenAPI specs. # Install oasdiff: https://github.com/oasdiff/oasdiff, a tool for detecting breaking changes in OpenAPI specs.
- name: Install oasdiff - name: Install oasdiff
if: steps.cache-oasdiff.outputs.cache-hit != 'true' if: steps.skip-check.outputs.skip != 'true' && steps.cache-oasdiff.outputs.cache-hit != 'true'
run: | run: |
curl -fsSL https://raw.githubusercontent.com/oasdiff/oasdiff/main/install.sh | sh curl -fsSL https://raw.githubusercontent.com/oasdiff/oasdiff/main/install.sh | sh
cp /usr/local/bin/oasdiff ~/oasdiff cp /usr/local/bin/oasdiff ~/oasdiff
# Setup cached oasdiff # Setup cached oasdiff
- name: Setup cached oasdiff - name: Setup cached oasdiff
if: steps.cache-oasdiff.outputs.cache-hit == 'true' if: steps.skip-check.outputs.skip != 'true' && steps.cache-oasdiff.outputs.cache-hit == 'true'
run: | run: |
sudo cp ~/oasdiff /usr/local/bin/oasdiff sudo cp ~/oasdiff /usr/local/bin/oasdiff
sudo chmod +x /usr/local/bin/oasdiff sudo chmod +x /usr/local/bin/oasdiff
# Install yq for YAML processing
- name: Install yq
run: |
sudo wget -qO /usr/local/bin/yq https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64
sudo chmod +x /usr/local/bin/yq
# Verify API specs exist for conformance testing
- name: Check API Specs
run: |
echo "Checking for API specification files..."
# Check current branch
if [ -f "docs/static/stable-llama-stack-spec.yaml" ]; then
echo "✓ Found stable API spec in current branch"
CURRENT_SPEC="docs/static/stable-llama-stack-spec.yaml"
elif [ -f "docs/static/llama-stack-spec.yaml" ]; then
echo "✓ Found monolithic API spec in current branch"
CURRENT_SPEC="docs/static/llama-stack-spec.yaml"
else
echo "❌ No API specs found in current branch"
exit 1
fi
# Check base branch
if [ -f "base/docs/static/stable-llama-stack-spec.yaml" ]; then
echo "✓ Found stable API spec in base branch"
BASE_SPEC="base/docs/static/stable-llama-stack-spec.yaml"
elif [ -f "base/docs/static/llama-stack-spec.yaml" ]; then
echo "✓ Found monolithic API spec in base branch"
BASE_SPEC="base/docs/static/llama-stack-spec.yaml"
else
echo "❌ No API specs found in base branch"
exit 1
fi
# Export for next step
echo "BASE_SPEC=${BASE_SPEC}" >> $GITHUB_ENV
echo "CURRENT_SPEC=${CURRENT_SPEC}" >> $GITHUB_ENV
echo "Will compare: ${BASE_SPEC} -> ${CURRENT_SPEC}"
# Run oasdiff to detect breaking changes in the API specification # Run oasdiff to detect breaking changes in the API specification
# This step will fail if incompatible changes are detected, preventing breaking changes from being merged # This step will fail if incompatible changes are detected, preventing breaking changes from being merged
- name: Run OpenAPI Breaking Change Diff - name: Run OpenAPI Breaking Change Diff
if: steps.skip-check.outputs.skip != 'true'
run: | run: |
oasdiff breaking --fail-on ERR base/docs/static/llama-stack-spec.yaml docs/static/llama-stack-spec.yaml --match-path '^/v1/' oasdiff breaking --fail-on ERR base/docs/static/llama-stack-spec.yaml docs/static/llama-stack-spec.yaml --match-path '^/v1/'
# Report when test is skipped
- name: Report skip reason
if: steps.skip-check.outputs.skip == 'true'
run: |
oasdiff breaking --fail-on ERR $BASE_SPEC $CURRENT_SPEC --match-path '^/v1/'

49
docs/docs/api-overview.md Normal file
View file

@ -0,0 +1,49 @@
# API Reference Overview
The Llama Stack provides a comprehensive set of APIs organized by stability level to help you choose the right endpoints for your use case.
## 🟢 Stable APIs
**Production-ready APIs with backward compatibility guarantees.**
These APIs are fully tested, documented, and stable. They follow semantic versioning principles and maintain backward compatibility within major versions. Recommended for production applications.
[**Browse Stable APIs →**](./api/llama-stack-specification)
**Key Features:**
- ✅ Backward compatibility guaranteed
- ✅ Comprehensive testing and validation
- ✅ Production-ready reliability
- ✅ Long-term support
---
## 🟡 Experimental APIs
**Preview APIs that may change before becoming stable.**
These APIs include v1alpha and v1beta endpoints that are feature-complete but may undergo changes based on feedback. Great for exploring new capabilities and providing feedback.
[**Browse Experimental APIs →**](./api-experimental/llama-stack-specification-experimental-apis)
**Key Features:**
- 🧪 Latest features and capabilities
- 🧪 May change based on user feedback
- 🧪 Active development and iteration
- 🧪 Opportunity to influence final design
---
## 🔴 Deprecated APIs
**Legacy APIs for migration reference.**
These APIs are deprecated and will be removed in future versions. They are provided for migration purposes and to help transition to newer, stable alternatives.
[**Browse Deprecated APIs →**](./api-deprecated/llama-stack-specification-deprecated-apis)
**Key Features:**
- ⚠️ Will be removed in future versions
- ⚠️ Migration guidance provided
- ⚠️ Use for compatibility during transition
- ⚠️ Not recommended for new projects

View file

@ -1,12 +1,7 @@
--- ---
description: "Agents API for creating and interacting with agentic systems. description: "Agents
Main functionalities provided by this API: APIs for creating and interacting with agentic systems."
- Create agents with specific instructions and ability to use tools.
- Interactions with agents are grouped into sessions (\"threads\"), and each interaction is called a \"turn\".
- Agents can be provided with various tools (see the ToolGroups and ToolRuntime APIs for more details).
- Agents can be provided with various shields (see the Safety API for more details).
- Agents can also use Memory to retrieve information from knowledge bases. See the RAG Tool and Vector IO APIs for more details."
sidebar_label: Agents sidebar_label: Agents
title: Agents title: Agents
--- ---
@ -15,13 +10,8 @@ title: Agents
## Overview ## Overview
Agents API for creating and interacting with agentic systems. Agents
Main functionalities provided by this API: APIs for creating and interacting with agentic systems.
- Create agents with specific instructions and ability to use tools.
- Interactions with agents are grouped into sessions ("threads"), and each interaction is called a "turn".
- Agents can be provided with various tools (see the ToolGroups and ToolRuntime APIs for more details).
- Agents can be provided with various shields (see the Safety API for more details).
- Agents can also use Memory to retrieve information from knowledge bases. See the RAG Tool and Vector IO APIs for more details.
This section contains documentation for all available providers for the **agents** API. This section contains documentation for all available providers for the **agents** API.

View file

@ -55,10 +55,27 @@ const config: Config = {
label: 'Docs', label: 'Docs',
}, },
{ {
type: 'docSidebar', type: 'dropdown',
sidebarId: 'apiSidebar',
position: 'left',
label: 'API Reference', label: 'API Reference',
position: 'left',
to: '/docs/api-overview',
items: [
{
type: 'docSidebar',
sidebarId: 'stableApiSidebar',
label: '🟢 Stable APIs',
},
{
type: 'docSidebar',
sidebarId: 'experimentalApiSidebar',
label: '🟡 Experimental APIs',
},
{
type: 'docSidebar',
sidebarId: 'deprecatedApiSidebar',
label: '🔴 Deprecated APIs',
},
],
}, },
{ {
href: 'https://github.com/llamastack/llama-stack', href: 'https://github.com/llamastack/llama-stack',
@ -83,7 +100,7 @@ const config: Config = {
}, },
{ {
label: 'API Reference', label: 'API Reference',
to: '/docs/api/llama-stack-specification', to: '/docs/api-overview',
}, },
], ],
}, },
@ -170,7 +187,7 @@ const config: Config = {
id: "openapi", id: "openapi",
docsPluginId: "classic", docsPluginId: "classic",
config: { config: {
llamastack: { stable: {
specPath: "static/llama-stack-spec.yaml", specPath: "static/llama-stack-spec.yaml",
outputDir: "docs/api", outputDir: "docs/api",
downloadUrl: "https://raw.githubusercontent.com/meta-llama/llama-stack/main/docs/static/llama-stack-spec.yaml", downloadUrl: "https://raw.githubusercontent.com/meta-llama/llama-stack/main/docs/static/llama-stack-spec.yaml",
@ -179,6 +196,24 @@ const config: Config = {
categoryLinkSource: "tag", categoryLinkSource: "tag",
}, },
} satisfies OpenApiPlugin.Options, } satisfies OpenApiPlugin.Options,
experimental: {
specPath: "static/experimental-llama-stack-spec.yaml",
outputDir: "docs/api-experimental",
downloadUrl: "https://raw.githubusercontent.com/meta-llama/llama-stack/main/docs/static/experimental-llama-stack-spec.yaml",
sidebarOptions: {
groupPathsBy: "tag",
categoryLinkSource: "tag",
},
} satisfies OpenApiPlugin.Options,
deprecated: {
specPath: "static/deprecated-llama-stack-spec.yaml",
outputDir: "docs/api-deprecated",
downloadUrl: "https://raw.githubusercontent.com/meta-llama/llama-stack/main/docs/static/deprecated-llama-stack-spec.yaml",
sidebarOptions: {
groupPathsBy: "tag",
categoryLinkSource: "tag",
},
} satisfies OpenApiPlugin.Options,
} satisfies Plugin.PluginOptions, } satisfies Plugin.PluginOptions,
}, },
], ],

View file

@ -34,40 +34,52 @@ def str_presenter(dumper, data):
return dumper.represent_scalar("tag:yaml.org,2002:str", data, style=style) return dumper.represent_scalar("tag:yaml.org,2002:str", data, style=style)
def main(output_dir: str): def generate_spec(output_dir: Path, stability_filter: str = None, main_spec: bool = False):
output_dir = Path(output_dir) """Generate OpenAPI spec with optional stability filtering."""
if not output_dir.exists():
raise ValueError(f"Directory {output_dir} does not exist")
# Validate API protocols before generating spec if stability_filter:
return_type_errors = validate_api() title_suffix = {
if return_type_errors: "stable": " - Stable APIs" if not main_spec else "",
print("\nAPI Method Return Type Validation Errors:\n") "experimental": " - Experimental APIs",
for error in return_type_errors: "deprecated": " - Deprecated APIs"
print(error, file=sys.stderr) }.get(stability_filter, f" - {stability_filter.title()} APIs")
sys.exit(1)
now = str(datetime.now()) # Use main spec filename for stable when main_spec=True
print( if main_spec and stability_filter == "stable":
"Converting the spec to YAML (openapi.yaml) and HTML (openapi.html) at " + now filename_prefix = ""
) else:
print("") filename_prefix = f"{stability_filter}-"
description_suffix = {
"stable": "\n\n**✅ STABLE**: Production-ready APIs with backward compatibility guarantees.",
"experimental": "\n\n**🧪 EXPERIMENTAL**: Pre-release APIs (v1alpha, v1beta) that may change before becoming stable.",
"deprecated": "\n\n**⚠️ DEPRECATED**: Legacy APIs that may be removed in future versions. Use for migration reference only."
}.get(stability_filter, "")
else:
title_suffix = ""
filename_prefix = ""
description_suffix = ""
spec = Specification( spec = Specification(
LlamaStack, LlamaStack,
Options( Options(
server=Server(url="http://any-hosted-llama-stack.com"), server=Server(url="http://any-hosted-llama-stack.com"),
info=Info( info=Info(
title="Llama Stack Specification", title=f"Llama Stack Specification{title_suffix}",
version=LLAMA_STACK_API_V1, version=LLAMA_STACK_API_V1,
description="""This is the specification of the Llama Stack that provides description=f"""This is the specification of the Llama Stack that provides
a set of endpoints and their corresponding interfaces that are tailored to a set of endpoints and their corresponding interfaces that are tailored to
best leverage Llama Models.""", best leverage Llama Models.{description_suffix}""",
), ),
include_standard_error_responses=True, include_standard_error_responses=True,
stability_filter=stability_filter, # Pass the filter to the generator
), ),
) )
with open(output_dir / "llama-stack-spec.yaml", "w", encoding="utf-8") as fp: yaml_filename = f"{filename_prefix}llama-stack-spec.yaml"
html_filename = f"{filename_prefix}llama-stack-spec.html"
with open(output_dir / yaml_filename, "w", encoding="utf-8") as fp:
y = yaml.YAML() y = yaml.YAML()
y.default_flow_style = False y.default_flow_style = False
y.block_seq_indent = 2 y.block_seq_indent = 2
@ -83,9 +95,36 @@ def main(output_dir: str):
fp, fp,
) )
with open(output_dir / "llama-stack-spec.html", "w") as fp: with open(output_dir / html_filename, "w") as fp:
spec.write_html(fp, pretty_print=True) spec.write_html(fp, pretty_print=True)
print(f"Generated {yaml_filename} and {html_filename}")
def main(output_dir: str):
output_dir = Path(output_dir)
if not output_dir.exists():
raise ValueError(f"Directory {output_dir} does not exist")
# Validate API protocols before generating spec
return_type_errors = validate_api()
if return_type_errors:
print("\nAPI Method Return Type Validation Errors:\n")
for error in return_type_errors:
print(error, file=sys.stderr)
sys.exit(1)
now = str(datetime.now())
print(f"Converting the spec to YAML (openapi.yaml) and HTML (openapi.html) at {now}")
print("")
# Generate main spec as stable APIs (llama-stack-spec.yaml)
print("Generating main specification (stable APIs)...")
generate_spec(output_dir, "stable", main_spec=True)
print("Generating other stability-filtered specifications...")
generate_spec(output_dir, "experimental")
generate_spec(output_dir, "deprecated")
if __name__ == "__main__": if __name__ == "__main__":
fire.Fire(main) fire.Fire(main)

View file

@ -7,13 +7,14 @@
import hashlib import hashlib
import inspect import inspect
import ipaddress import ipaddress
import os
import types import types
import typing import typing
from dataclasses import make_dataclass from dataclasses import make_dataclass
from pathlib import Path
from typing import Annotated, Any, Dict, get_args, get_origin, Set, Union from typing import Annotated, Any, Dict, get_args, get_origin, Set, Union
from fastapi import UploadFile from fastapi import UploadFile
from pydantic import BaseModel
from llama_stack.apis.datatypes import Error from llama_stack.apis.datatypes import Error
from llama_stack.strong_typing.core import JsonType from llama_stack.strong_typing.core import JsonType
@ -35,6 +36,7 @@ from llama_stack.strong_typing.schema import (
SchemaOptions, SchemaOptions,
) )
from llama_stack.strong_typing.serialization import json_dump_string, object_to_json from llama_stack.strong_typing.serialization import json_dump_string, object_to_json
from pydantic import BaseModel
from .operations import ( from .operations import (
EndpointOperation, EndpointOperation,
@ -546,6 +548,84 @@ class Generator:
return extra_tags return extra_tags
def _get_api_group_for_operation(self, op) -> str | None:
"""
Determine the API group for an operation based on its route path.
Args:
op: The endpoint operation
Returns:
The API group name derived from the route, or None if unable to determine
"""
if not hasattr(op, 'webmethod') or not op.webmethod or not hasattr(op.webmethod, 'route'):
return None
route = op.webmethod.route
if not route or not route.startswith('/'):
return None
# Extract API group from route path
# Examples: /v1/agents/list -> agents-api
# /v1/responses -> responses-api
# /v1/models -> models-api
path_parts = route.strip('/').split('/')
if len(path_parts) < 2:
return None
# Skip version prefix (v1, v1alpha, v1beta, etc.)
if path_parts[0].startswith('v1'):
if len(path_parts) < 2:
return None
api_segment = path_parts[1]
else:
api_segment = path_parts[0]
# Convert to supplementary file naming convention
# agents -> agents-api, responses -> responses-api, etc.
return f"{api_segment}-api"
def _load_supplemental_content(self, api_group: str | None) -> str:
"""
Load supplemental content for an API group based on stability level.
Follows this resolution order:
1. docs/supplementary/{stability}/{api_group}.md
2. docs/supplementary/shared/{api_group}.md (fallback)
3. Empty string if no files found
Args:
api_group: The API group name (e.g., "agents-responses-api"), or None if no mapping exists
Returns:
The supplemental content as markdown string, or empty string if not found
"""
if not api_group:
return ""
base_path = Path(__file__).parent.parent.parent / "supplementary"
# Try stability-specific content first if stability filter is set
if self.options.stability_filter:
stability_path = base_path / self.options.stability_filter / f"{api_group}.md"
if stability_path.exists():
try:
return stability_path.read_text(encoding="utf-8")
except Exception as e:
print(f"Warning: Could not read stability-specific supplemental content from {stability_path}: {e}")
# Fall back to shared content
shared_path = base_path / "shared" / f"{api_group}.md"
if shared_path.exists():
try:
return shared_path.read_text(encoding="utf-8")
except Exception as e:
print(f"Warning: Could not read shared supplemental content from {shared_path}: {e}")
# No supplemental content found
return ""
def _build_operation(self, op: EndpointOperation) -> Operation: def _build_operation(self, op: EndpointOperation) -> Operation:
if op.defining_class.__name__ in [ if op.defining_class.__name__ in [
"SyntheticDataGeneration", "SyntheticDataGeneration",
@ -797,10 +877,14 @@ class Generator:
else: else:
callbacks = None callbacks = None
description = "\n".join( # Build base description from docstring
base_description = "\n".join(
filter(None, [doc_string.short_description, doc_string.long_description]) filter(None, [doc_string.short_description, doc_string.long_description])
) )
# Individual endpoints get clean descriptions only
description = base_description
return Operation( return Operation(
tags=[ tags=[
getattr(op.defining_class, "API_NAMESPACE", op.defining_class.__name__) getattr(op.defining_class, "API_NAMESPACE", op.defining_class.__name__)
@ -811,16 +895,121 @@ class Generator:
requestBody=requestBody, requestBody=requestBody,
responses=responses, responses=responses,
callbacks=callbacks, callbacks=callbacks,
deprecated=True if "DEPRECATED" in op.func_name else None, deprecated=getattr(op.webmethod, "deprecated", False)
or "DEPRECATED" in op.func_name,
security=[] if op.public else None, security=[] if op.public else None,
) )
def _get_api_stability_priority(self, api_level: str) -> int:
"""
Return sorting priority for API stability levels.
Lower numbers = higher priority (appear first)
:param api_level: The API level (e.g., "v1", "v1beta", "v1alpha")
:return: Priority number for sorting
"""
stability_order = {
"v1": 0, # Stable - highest priority
"v1beta": 1, # Beta - medium priority
"v1alpha": 2, # Alpha - lowest priority
}
return stability_order.get(api_level, 999) # Unknown levels go last
def generate(self) -> Document: def generate(self) -> Document:
paths: Dict[str, PathItem] = {} paths: Dict[str, PathItem] = {}
endpoint_classes: Set[type] = set() endpoint_classes: Set[type] = set()
for op in get_endpoint_operations(
self.endpoint, use_examples=self.options.use_examples # Collect all operations and filter by stability if specified
): operations = list(
get_endpoint_operations(
self.endpoint, use_examples=self.options.use_examples
)
)
# Filter operations by stability level if requested
if self.options.stability_filter:
filtered_operations = []
for op in operations:
deprecated = (
getattr(op.webmethod, "deprecated", False)
or "DEPRECATED" in op.func_name
)
stability_level = op.webmethod.level
if self.options.stability_filter == "stable":
# Include v1 non-deprecated endpoints
if stability_level == "v1" and not deprecated:
filtered_operations.append(op)
elif self.options.stability_filter == "experimental":
# Include v1alpha and v1beta endpoints (deprecated or not)
if stability_level in ["v1alpha", "v1beta"]:
filtered_operations.append(op)
elif self.options.stability_filter == "deprecated":
# Include only deprecated endpoints
if deprecated:
filtered_operations.append(op)
operations = filtered_operations
print(
f"Filtered to {len(operations)} operations for stability level: {self.options.stability_filter}"
)
# Sort operations by multiple criteria for consistent ordering:
# 1. Stability level with deprecation handling (global priority):
# - Active stable (v1) comes first
# - Beta (v1beta) comes next
# - Alpha (v1alpha) comes next
# - Deprecated stable (v1 deprecated) comes last
# 2. Route path (group related endpoints within same stability level)
# 3. HTTP method (GET, POST, PUT, DELETE, PATCH)
# 4. Operation name (alphabetical)
def sort_key(op):
http_method_order = {
HTTPMethod.GET: 0,
HTTPMethod.POST: 1,
HTTPMethod.PUT: 2,
HTTPMethod.DELETE: 3,
HTTPMethod.PATCH: 4,
}
# Enhanced stability priority for migration pattern support
deprecated = getattr(op.webmethod, "deprecated", False)
stability_priority = self._get_api_stability_priority(op.webmethod.level)
# Deprecated versions should appear after everything else
# This ensures deprecated stable endpoints come last globally
if deprecated:
stability_priority += 10 # Push deprecated endpoints to the end
return (
stability_priority, # Global stability handling comes first
op.get_route(
op.webmethod
), # Group by route path within stability level
http_method_order.get(op.http_method, 999),
op.func_name,
)
operations.sort(key=sort_key)
# Debug output for migration pattern tracking
migration_routes = {}
for op in operations:
route_key = (op.get_route(op.webmethod), op.http_method)
if route_key not in migration_routes:
migration_routes[route_key] = []
migration_routes[route_key].append(
(op.webmethod.level, getattr(op.webmethod, "deprecated", False))
)
for route_key, versions in migration_routes.items():
if len(versions) > 1:
print(f"Migration pattern detected for {route_key[1]} {route_key[0]}:")
for level, deprecated in versions:
status = "DEPRECATED" if deprecated else "ACTIVE"
print(f" - {level} ({status})")
for op in operations:
endpoint_classes.add(op.defining_class) endpoint_classes.add(op.defining_class)
operation = self._build_operation(op) operation = self._build_operation(op)
@ -851,10 +1040,22 @@ class Generator:
doc_string = parse_type(cls) doc_string = parse_type(cls)
if hasattr(cls, "API_NAMESPACE") and cls.API_NAMESPACE != cls.__name__: if hasattr(cls, "API_NAMESPACE") and cls.API_NAMESPACE != cls.__name__:
continue continue
# Add supplemental content to tag pages
api_group = f"{cls.__name__.lower()}-api"
supplemental_content = self._load_supplemental_content(api_group)
tag_description = doc_string.long_description or ""
if supplemental_content:
if tag_description:
tag_description = f"{tag_description}\n\n{supplemental_content}"
else:
tag_description = supplemental_content
operation_tags.append( operation_tags.append(
Tag( Tag(
name=cls.__name__, name=cls.__name__,
description=doc_string.long_description, description=tag_description,
displayName=doc_string.short_description, displayName=doc_string.short_description,
) )
) )

View file

@ -54,6 +54,7 @@ class Options:
property_description_fun: Optional[Callable[[type, str, str], str]] = None property_description_fun: Optional[Callable[[type, str, str], str]] = None
captions: Optional[Dict[str, str]] = None captions: Optional[Dict[str, str]] = None
include_standard_error_responses: bool = True include_standard_error_responses: bool = True
stability_filter: Optional[str] = None
default_captions: ClassVar[Dict[str, str]] = { default_captions: ClassVar[Dict[str, str]] = {
"Operations": "Operations", "Operations": "Operations",

View file

@ -335,8 +335,10 @@ const sidebars: SidebarsConfig = {
}, },
], ],
// API Reference sidebar - use plugin-generated sidebar // API Reference sidebars - use plugin-generated sidebars
apiSidebar: require('./docs/api/sidebar.ts').default, stableApiSidebar: require('./docs/api/sidebar.ts').default,
experimentalApiSidebar: require('./docs/api-experimental/sidebar.ts').default,
deprecatedApiSidebar: require('./docs/api-deprecated/sidebar.ts').default,
}; };
export default sidebars; export default sidebars;

View file

@ -189,3 +189,29 @@ button[class*="button"]:hover,
.pagination-nav__link--prev:hover { .pagination-nav__link--prev:hover {
background-color: #f3f4f6 !important; background-color: #f3f4f6 !important;
} }
/* Deprecated endpoint styling */
.menu__list-item--deprecated .menu__link {
text-decoration: line-through !important;
opacity: 0.7;
font-style: italic;
}
.menu__list-item--deprecated .menu__link:hover {
opacity: 0.9;
}
/* Deprecated endpoint badges - slightly muted */
.menu__list-item--deprecated.api-method > .menu__link::before {
opacity: 0.7;
border-style: dashed !important;
}
/* Dark theme adjustments for deprecated endpoints */
[data-theme='dark'] .menu__list-item--deprecated .menu__link {
opacity: 0.6;
}
[data-theme='dark'] .menu__list-item--deprecated .menu__link:hover {
opacity: 0.8;
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,9 @@
## Deprecated APIs
> **⚠️ DEPRECATED**: These APIs are provided for migration reference and will be removed in future versions. Not recommended for new projects.
### Migration Guidance
If you are using deprecated versions of the Agents or Responses APIs, please migrate to:
- **Responses API**: Use the stable v1 Responses API endpoints

View file

@ -0,0 +1,21 @@
## Agents API (Experimental)
> **🧪 EXPERIMENTAL**: This API is in preview and may change based on user feedback. Great for exploring new capabilities and providing feedback to influence the final design.
Main functionalities provided by this API:
- Create agents with specific instructions and ability to use tools.
- Interactions with agents are grouped into sessions ("threads"), and each interaction is called a "turn".
- Agents can be provided with various tools (see the ToolGroups and ToolRuntime APIs for more details).
- Agents can be provided with various shields (see the Safety API for more details).
- Agents can also use Memory to retrieve information from knowledge bases. See the RAG Tool and Vector IO APIs for more details.
### 🧪 Feedback Welcome
This API is actively being developed. We welcome feedback on:
- API design and usability
- Performance characteristics
- Missing features or capabilities
- Integration patterns
**Provide Feedback**: [GitHub Discussions](https://github.com/llamastack/llama-stack/discussions) or [GitHub Issues](https://github.com/llamastack/llama-stack/issues)

View file

@ -0,0 +1,40 @@
## Responses API
The Responses API provides OpenAI-compatible functionality with enhanced capabilities for dynamic, stateful interactions.
> **✅ STABLE**: This API is production-ready with backward compatibility guarantees. Recommended for production applications.
### ✅ Supported Tools
The Responses API supports the following tool types:
- **`web_search`**: Search the web for current information and real-time data
- **`file_search`**: Search through uploaded files and vector stores
- Supports dynamic `vector_store_ids` per call
- Compatible with OpenAI file search patterns
- **`function`**: Call custom functions with JSON schema validation
- **`mcp_tool`**: Model Context Protocol integration
### ✅ Supported Fields & Features
**Core Capabilities:**
- **Dynamic Configuration**: Switch models, vector stores, and tools per request without pre-configuration
- **Conversation Branching**: Use `previous_response_id` to branch conversations and explore different paths
- **Rich Annotations**: Automatic file citations, URL citations, and container file citations
- **Status Tracking**: Monitor tool call execution status and handle failures gracefully
### 🚧 Work in Progress
- Full real-time response streaming support
- `tool_choice` parameter
- `max_tool_calls` parameter
- Built-in tools (code interpreter, containers API)
- Safety & guardrails
- `reasoning` capabilities
- `service_tier`
- `logprobs`
- `max_output_tokens`
- `metadata` handling
- `instructions`
- `incomplete_details`
- `background`

View file

@ -472,20 +472,23 @@ class AgentStepResponse(BaseModel):
@runtime_checkable @runtime_checkable
class Agents(Protocol): class Agents(Protocol):
"""Agents API for creating and interacting with agentic systems. """Agents
Main functionalities provided by this API: APIs for creating and interacting with agentic systems."""
- Create agents with specific instructions and ability to use tools.
- Interactions with agents are grouped into sessions ("threads"), and each interaction is called a "turn".
- Agents can be provided with various tools (see the ToolGroups and ToolRuntime APIs for more details).
- Agents can be provided with various shields (see the Safety API for more details).
- Agents can also use Memory to retrieve information from knowledge bases. See the RAG Tool and Vector IO APIs for more details.
"""
@webmethod( @webmethod(
route="/agents", method="POST", descriptive_name="create_agent", deprecated=True, level=LLAMA_STACK_API_V1 route="/agents",
method="POST",
descriptive_name="create_agent",
deprecated=True,
level=LLAMA_STACK_API_V1,
)
@webmethod(
route="/agents",
method="POST",
descriptive_name="create_agent",
level=LLAMA_STACK_API_V1ALPHA,
) )
@webmethod(route="/agents", method="POST", descriptive_name="create_agent", level=LLAMA_STACK_API_V1ALPHA)
async def create_agent( async def create_agent(
self, self,
agent_config: AgentConfig, agent_config: AgentConfig,
@ -648,8 +651,17 @@ class Agents(Protocol):
""" """
... ...
@webmethod(route="/agents/{agent_id}/session/{session_id}", method="GET", deprecated=True, level=LLAMA_STACK_API_V1) @webmethod(
@webmethod(route="/agents/{agent_id}/session/{session_id}", method="GET", level=LLAMA_STACK_API_V1ALPHA) route="/agents/{agent_id}/session/{session_id}",
method="GET",
deprecated=True,
level=LLAMA_STACK_API_V1,
)
@webmethod(
route="/agents/{agent_id}/session/{session_id}",
method="GET",
level=LLAMA_STACK_API_V1ALPHA,
)
async def get_agents_session( async def get_agents_session(
self, self,
session_id: str, session_id: str,
@ -666,9 +678,16 @@ class Agents(Protocol):
... ...
@webmethod( @webmethod(
route="/agents/{agent_id}/session/{session_id}", method="DELETE", deprecated=True, level=LLAMA_STACK_API_V1 route="/agents/{agent_id}/session/{session_id}",
method="DELETE",
deprecated=True,
level=LLAMA_STACK_API_V1,
)
@webmethod(
route="/agents/{agent_id}/session/{session_id}",
method="DELETE",
level=LLAMA_STACK_API_V1ALPHA,
) )
@webmethod(route="/agents/{agent_id}/session/{session_id}", method="DELETE", level=LLAMA_STACK_API_V1ALPHA)
async def delete_agents_session( async def delete_agents_session(
self, self,
session_id: str, session_id: str,
@ -681,7 +700,12 @@ class Agents(Protocol):
""" """
... ...
@webmethod(route="/agents/{agent_id}", method="DELETE", deprecated=True, level=LLAMA_STACK_API_V1) @webmethod(
route="/agents/{agent_id}",
method="DELETE",
deprecated=True,
level=LLAMA_STACK_API_V1,
)
@webmethod(route="/agents/{agent_id}", method="DELETE", level=LLAMA_STACK_API_V1ALPHA) @webmethod(route="/agents/{agent_id}", method="DELETE", level=LLAMA_STACK_API_V1ALPHA)
async def delete_agent( async def delete_agent(
self, self,
@ -704,7 +728,12 @@ class Agents(Protocol):
""" """
... ...
@webmethod(route="/agents/{agent_id}", method="GET", deprecated=True, level=LLAMA_STACK_API_V1) @webmethod(
route="/agents/{agent_id}",
method="GET",
deprecated=True,
level=LLAMA_STACK_API_V1,
)
@webmethod(route="/agents/{agent_id}", method="GET", level=LLAMA_STACK_API_V1ALPHA) @webmethod(route="/agents/{agent_id}", method="GET", level=LLAMA_STACK_API_V1ALPHA)
async def get_agent(self, agent_id: str) -> Agent: async def get_agent(self, agent_id: str) -> Agent:
"""Describe an agent by its ID. """Describe an agent by its ID.
@ -714,7 +743,12 @@ class Agents(Protocol):
""" """
... ...
@webmethod(route="/agents/{agent_id}/sessions", method="GET", deprecated=True, level=LLAMA_STACK_API_V1) @webmethod(
route="/agents/{agent_id}/sessions",
method="GET",
deprecated=True,
level=LLAMA_STACK_API_V1,
)
@webmethod(route="/agents/{agent_id}/sessions", method="GET", level=LLAMA_STACK_API_V1ALPHA) @webmethod(route="/agents/{agent_id}/sessions", method="GET", level=LLAMA_STACK_API_V1ALPHA)
async def list_agent_sessions( async def list_agent_sessions(
self, self,
@ -793,7 +827,11 @@ class Agents(Protocol):
""" """
... ...
@webmethod(route="/responses/{response_id}/input_items", method="GET", level=LLAMA_STACK_API_V1) @webmethod(
route="/responses/{response_id}/input_items",
method="GET",
level=LLAMA_STACK_API_V1,
)
async def list_openai_response_input_items( async def list_openai_response_input_items(
self, self,
response_id: str, response_id: str,

View file

@ -8,7 +8,7 @@ from typing import Any, Protocol, runtime_checkable
from llama_stack.apis.common.responses import PaginatedResponse from llama_stack.apis.common.responses import PaginatedResponse
from llama_stack.apis.datasets import Dataset from llama_stack.apis.datasets import Dataset
from llama_stack.apis.version import LLAMA_STACK_API_V1 from llama_stack.apis.version import LLAMA_STACK_API_V1, LLAMA_STACK_API_V1BETA
from llama_stack.schema_utils import webmethod from llama_stack.schema_utils import webmethod
@ -21,7 +21,8 @@ class DatasetIO(Protocol):
# keeping for aligning with inference/safety, but this is not used # keeping for aligning with inference/safety, but this is not used
dataset_store: DatasetStore dataset_store: DatasetStore
@webmethod(route="/datasetio/iterrows/{dataset_id:path}", method="GET", level=LLAMA_STACK_API_V1) @webmethod(route="/datasetio/iterrows/{dataset_id:path}", method="GET", deprecated=True, level=LLAMA_STACK_API_V1)
@webmethod(route="/datasetio/iterrows/{dataset_id:path}", method="GET", level=LLAMA_STACK_API_V1BETA)
async def iterrows( async def iterrows(
self, self,
dataset_id: str, dataset_id: str,
@ -45,7 +46,10 @@ class DatasetIO(Protocol):
""" """
... ...
@webmethod(route="/datasetio/append-rows/{dataset_id:path}", method="POST", level=LLAMA_STACK_API_V1) @webmethod(
route="/datasetio/append-rows/{dataset_id:path}", method="POST", deprecated=True, level=LLAMA_STACK_API_V1
)
@webmethod(route="/datasetio/append-rows/{dataset_id:path}", method="POST", level=LLAMA_STACK_API_V1BETA)
async def append_rows(self, dataset_id: str, rows: list[dict[str, Any]]) -> None: async def append_rows(self, dataset_id: str, rows: list[dict[str, Any]]) -> None:
"""Append rows to a dataset. """Append rows to a dataset.

View file

@ -10,7 +10,7 @@ from typing import Annotated, Any, Literal, Protocol
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from llama_stack.apis.resource import Resource, ResourceType from llama_stack.apis.resource import Resource, ResourceType
from llama_stack.apis.version import LLAMA_STACK_API_V1 from llama_stack.apis.version import LLAMA_STACK_API_V1, LLAMA_STACK_API_V1BETA
from llama_stack.schema_utils import json_schema_type, register_schema, webmethod from llama_stack.schema_utils import json_schema_type, register_schema, webmethod
@ -146,7 +146,8 @@ class ListDatasetsResponse(BaseModel):
class Datasets(Protocol): class Datasets(Protocol):
@webmethod(route="/datasets", method="POST", level=LLAMA_STACK_API_V1) @webmethod(route="/datasets", method="POST", deprecated=True, level=LLAMA_STACK_API_V1)
@webmethod(route="/datasets", method="POST", level=LLAMA_STACK_API_V1BETA)
async def register_dataset( async def register_dataset(
self, self,
purpose: DatasetPurpose, purpose: DatasetPurpose,
@ -215,7 +216,8 @@ class Datasets(Protocol):
""" """
... ...
@webmethod(route="/datasets/{dataset_id:path}", method="GET", level=LLAMA_STACK_API_V1) @webmethod(route="/datasets/{dataset_id:path}", method="GET", deprecated=True, level=LLAMA_STACK_API_V1)
@webmethod(route="/datasets/{dataset_id:path}", method="GET", level=LLAMA_STACK_API_V1BETA)
async def get_dataset( async def get_dataset(
self, self,
dataset_id: str, dataset_id: str,
@ -227,7 +229,8 @@ class Datasets(Protocol):
""" """
... ...
@webmethod(route="/datasets", method="GET", level=LLAMA_STACK_API_V1) @webmethod(route="/datasets", method="GET", deprecated=True, level=LLAMA_STACK_API_V1)
@webmethod(route="/datasets", method="GET", level=LLAMA_STACK_API_V1BETA)
async def list_datasets(self) -> ListDatasetsResponse: async def list_datasets(self) -> ListDatasetsResponse:
"""List all datasets. """List all datasets.
@ -235,7 +238,8 @@ class Datasets(Protocol):
""" """
... ...
@webmethod(route="/datasets/{dataset_id:path}", method="DELETE", level=LLAMA_STACK_API_V1) @webmethod(route="/datasets/{dataset_id:path}", method="DELETE", deprecated=True, level=LLAMA_STACK_API_V1)
@webmethod(route="/datasets/{dataset_id:path}", method="DELETE", level=LLAMA_STACK_API_V1BETA)
async def unregister_dataset( async def unregister_dataset(
self, self,
dataset_id: str, dataset_id: str,

View file

@ -1008,28 +1008,6 @@ class InferenceProvider(Protocol):
model_store: ModelStore | None = None model_store: ModelStore | None = None
async def completion(
self,
model_id: str,
content: InterleavedContent,
sampling_params: SamplingParams | None = None,
response_format: ResponseFormat | None = None,
stream: bool | None = False,
logprobs: LogProbConfig | None = None,
) -> CompletionResponse | AsyncIterator[CompletionResponseStreamChunk]:
"""Generate a completion for the given content using the specified model.
:param model_id: The identifier of the model to use. The model must be registered with Llama Stack and available via the /models endpoint.
:param content: The content to generate a completion for.
:param sampling_params: (Optional) Parameters to control the sampling strategy.
:param response_format: (Optional) Grammar specification for guided (structured) decoding.
:param stream: (Optional) If True, generate an SSE event stream of the response. Defaults to False.
:param logprobs: (Optional) If specified, log probabilities for each token position will be returned.
:returns: If stream=False, returns a CompletionResponse with the full completion.
If stream=True, returns an SSE event stream of CompletionResponseStreamChunk.
"""
...
async def chat_completion( async def chat_completion(
self, self,
model_id: str, model_id: str,

View file

@ -16,7 +16,7 @@ from typing import (
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from llama_stack.apis.version import LLAMA_STACK_API_V1 from llama_stack.apis.version import LLAMA_STACK_API_V1, LLAMA_STACK_API_V1ALPHA
from llama_stack.models.llama.datatypes import Primitive from llama_stack.models.llama.datatypes import Primitive
from llama_stack.schema_utils import json_schema_type, register_schema, webmethod from llama_stack.schema_utils import json_schema_type, register_schema, webmethod
@ -426,7 +426,14 @@ class Telemetry(Protocol):
""" """
... ...
@webmethod(route="/telemetry/traces", method="POST", required_scope=REQUIRED_SCOPE, level=LLAMA_STACK_API_V1) @webmethod(
route="/telemetry/traces",
method="POST",
required_scope=REQUIRED_SCOPE,
deprecated=True,
level=LLAMA_STACK_API_V1,
)
@webmethod(route="/telemetry/traces", method="POST", required_scope=REQUIRED_SCOPE, level=LLAMA_STACK_API_V1ALPHA)
async def query_traces( async def query_traces(
self, self,
attribute_filters: list[QueryCondition] | None = None, attribute_filters: list[QueryCondition] | None = None,
@ -445,7 +452,17 @@ class Telemetry(Protocol):
... ...
@webmethod( @webmethod(
route="/telemetry/traces/{trace_id:path}", method="GET", required_scope=REQUIRED_SCOPE, level=LLAMA_STACK_API_V1 route="/telemetry/traces/{trace_id:path}",
method="GET",
required_scope=REQUIRED_SCOPE,
deprecated=True,
level=LLAMA_STACK_API_V1,
)
@webmethod(
route="/telemetry/traces/{trace_id:path}",
method="GET",
required_scope=REQUIRED_SCOPE,
level=LLAMA_STACK_API_V1ALPHA,
) )
async def get_trace(self, trace_id: str) -> Trace: async def get_trace(self, trace_id: str) -> Trace:
"""Get a trace by its ID. """Get a trace by its ID.
@ -459,8 +476,15 @@ class Telemetry(Protocol):
route="/telemetry/traces/{trace_id:path}/spans/{span_id:path}", route="/telemetry/traces/{trace_id:path}/spans/{span_id:path}",
method="GET", method="GET",
required_scope=REQUIRED_SCOPE, required_scope=REQUIRED_SCOPE,
deprecated=True,
level=LLAMA_STACK_API_V1, level=LLAMA_STACK_API_V1,
) )
@webmethod(
route="/telemetry/traces/{trace_id:path}/spans/{span_id:path}",
method="GET",
required_scope=REQUIRED_SCOPE,
level=LLAMA_STACK_API_V1ALPHA,
)
async def get_span(self, trace_id: str, span_id: str) -> Span: async def get_span(self, trace_id: str, span_id: str) -> Span:
"""Get a span by its ID. """Get a span by its ID.
@ -473,9 +497,16 @@ class Telemetry(Protocol):
@webmethod( @webmethod(
route="/telemetry/spans/{span_id:path}/tree", route="/telemetry/spans/{span_id:path}/tree",
method="POST", method="POST",
deprecated=True,
required_scope=REQUIRED_SCOPE, required_scope=REQUIRED_SCOPE,
level=LLAMA_STACK_API_V1, level=LLAMA_STACK_API_V1,
) )
@webmethod(
route="/telemetry/spans/{span_id:path}/tree",
method="POST",
required_scope=REQUIRED_SCOPE,
level=LLAMA_STACK_API_V1ALPHA,
)
async def get_span_tree( async def get_span_tree(
self, self,
span_id: str, span_id: str,
@ -491,7 +522,14 @@ class Telemetry(Protocol):
""" """
... ...
@webmethod(route="/telemetry/spans", method="POST", required_scope=REQUIRED_SCOPE, level=LLAMA_STACK_API_V1) @webmethod(
route="/telemetry/spans",
method="POST",
required_scope=REQUIRED_SCOPE,
deprecated=True,
level=LLAMA_STACK_API_V1,
)
@webmethod(route="/telemetry/spans", method="POST", required_scope=REQUIRED_SCOPE, level=LLAMA_STACK_API_V1ALPHA)
async def query_spans( async def query_spans(
self, self,
attribute_filters: list[QueryCondition], attribute_filters: list[QueryCondition],
@ -507,7 +545,8 @@ class Telemetry(Protocol):
""" """
... ...
@webmethod(route="/telemetry/spans/export", method="POST", level=LLAMA_STACK_API_V1) @webmethod(route="/telemetry/spans/export", method="POST", deprecated=True, level=LLAMA_STACK_API_V1)
@webmethod(route="/telemetry/spans/export", method="POST", level=LLAMA_STACK_API_V1ALPHA)
async def save_spans_to_dataset( async def save_spans_to_dataset(
self, self,
attribute_filters: list[QueryCondition], attribute_filters: list[QueryCondition],
@ -525,7 +564,17 @@ class Telemetry(Protocol):
... ...
@webmethod( @webmethod(
route="/telemetry/metrics/{metric_name}", method="POST", required_scope=REQUIRED_SCOPE, level=LLAMA_STACK_API_V1 route="/telemetry/metrics/{metric_name}",
method="POST",
required_scope=REQUIRED_SCOPE,
deprecated=True,
level=LLAMA_STACK_API_V1,
)
@webmethod(
route="/telemetry/metrics/{metric_name}",
method="POST",
required_scope=REQUIRED_SCOPE,
level=LLAMA_STACK_API_V1ALPHA,
) )
async def query_metrics( async def query_metrics(
self, self,

View file

@ -267,47 +267,6 @@ class InferenceRouter(Inference):
) )
return response return response
async def completion(
self,
model_id: str,
content: InterleavedContent,
sampling_params: SamplingParams | None = None,
response_format: ResponseFormat | None = None,
stream: bool | None = False,
logprobs: LogProbConfig | None = None,
) -> AsyncGenerator:
if sampling_params is None:
sampling_params = SamplingParams()
logger.debug(
f"InferenceRouter.completion: {model_id=}, {stream=}, {content=}, {sampling_params=}, {response_format=}",
)
model = await self._get_model(model_id, ModelType.llm)
provider = await self.routing_table.get_provider_impl(model_id)
params = dict(
model_id=model_id,
content=content,
sampling_params=sampling_params,
response_format=response_format,
stream=stream,
logprobs=logprobs,
)
prompt_tokens = await self._count_tokens(content)
response = await provider.completion(**params)
if stream:
return self.stream_tokens_and_compute_metrics(
response=response,
prompt_tokens=prompt_tokens,
model=model,
)
metrics = await self.count_tokens_and_compute_metrics(
response=response, prompt_tokens=prompt_tokens, model=model
)
response.metrics = metrics if response.metrics is None else response.metrics + metrics
return response
async def openai_completion( async def openai_completion(
self, self,
model: str, model: str,

View file

@ -247,7 +247,16 @@ def get_logger(
_category_levels.update(parse_yaml_config(config)) _category_levels.update(parse_yaml_config(config))
logger = logging.getLogger(name) logger = logging.getLogger(name)
logger.setLevel(_category_levels.get(category, DEFAULT_LOG_LEVEL)) if category in _category_levels:
log_level = _category_levels[category]
else:
root_category = category.split("::")[0]
if root_category in _category_levels:
log_level = _category_levels[root_category]
else:
log_level = _category_levels.get("root", DEFAULT_LOG_LEVEL)
logging.warning(f"Unknown logging category: {category}. Falling back to default 'root' level: {log_level}")
logger.setLevel(log_level)
return logging.LoggerAdapter(logger, {"category": category}) return logging.LoggerAdapter(logger, {"category": category})

View file

@ -355,8 +355,11 @@ class StreamingResponseOrchestrator:
# Emit arguments.done events for completed tool calls (differentiate between MCP and function calls) # Emit arguments.done events for completed tool calls (differentiate between MCP and function calls)
for tool_call_index in sorted(chat_response_tool_calls.keys()): for tool_call_index in sorted(chat_response_tool_calls.keys()):
tool_call = chat_response_tool_calls[tool_call_index]
# Ensure that arguments, if sent back to the inference provider, are not None
tool_call.function.arguments = tool_call.function.arguments or "{}"
tool_call_item_id = tool_call_item_ids[tool_call_index] tool_call_item_id = tool_call_item_ids[tool_call_index]
final_arguments = chat_response_tool_calls[tool_call_index].function.arguments or "" final_arguments = tool_call.function.arguments
tool_call_name = chat_response_tool_calls[tool_call_index].function.name tool_call_name = chat_response_tool_calls[tool_call_index].function.name
# Check if this is an MCP tool call # Check if this is an MCP tool call

View file

@ -24,11 +24,7 @@ from llama_stack.apis.inference import (
ChatCompletionResponseEventType, ChatCompletionResponseEventType,
ChatCompletionResponseStreamChunk, ChatCompletionResponseStreamChunk,
CompletionMessage, CompletionMessage,
CompletionRequest,
CompletionResponse,
CompletionResponseStreamChunk,
InferenceProvider, InferenceProvider,
InterleavedContent,
LogProbConfig, LogProbConfig,
Message, Message,
ResponseFormat, ResponseFormat,
@ -59,10 +55,8 @@ from llama_stack.providers.utils.inference.model_registry import (
) )
from llama_stack.providers.utils.inference.openai_compat import ( from llama_stack.providers.utils.inference.openai_compat import (
OpenAIChatCompletionToLlamaStackMixin, OpenAIChatCompletionToLlamaStackMixin,
OpenAICompletionToLlamaStackMixin,
) )
from llama_stack.providers.utils.inference.prompt_adapter import ( from llama_stack.providers.utils.inference.prompt_adapter import (
augment_content_with_response_format_prompt,
chat_completion_request_to_messages, chat_completion_request_to_messages,
convert_request_to_raw, convert_request_to_raw,
) )
@ -82,7 +76,6 @@ def llama_builder_fn(config: MetaReferenceInferenceConfig, model_id: str, llama_
class MetaReferenceInferenceImpl( class MetaReferenceInferenceImpl(
OpenAICompletionToLlamaStackMixin,
OpenAIChatCompletionToLlamaStackMixin, OpenAIChatCompletionToLlamaStackMixin,
SentenceTransformerEmbeddingMixin, SentenceTransformerEmbeddingMixin,
InferenceProvider, InferenceProvider,
@ -100,6 +93,9 @@ class MetaReferenceInferenceImpl(
if self.config.create_distributed_process_group: if self.config.create_distributed_process_group:
self.generator.stop() self.generator.stop()
async def openai_completion(self, *args, **kwargs):
raise NotImplementedError("OpenAI completion not supported by meta reference provider")
async def should_refresh_models(self) -> bool: async def should_refresh_models(self) -> bool:
return False return False
@ -165,11 +161,6 @@ class MetaReferenceInferenceImpl(
self.llama_model = llama_model self.llama_model = llama_model
log.info("Warming up...") log.info("Warming up...")
await self.completion(
model_id=model_id,
content="Hello, world!",
sampling_params=SamplingParams(max_tokens=10),
)
await self.chat_completion( await self.chat_completion(
model_id=model_id, model_id=model_id,
messages=[UserMessage(content="Hi how are you?")], messages=[UserMessage(content="Hi how are you?")],
@ -185,137 +176,6 @@ class MetaReferenceInferenceImpl(
elif request.model != self.model_id: elif request.model != self.model_id:
raise RuntimeError(f"Model mismatch: request model: {request.model} != loaded model: {self.model_id}") raise RuntimeError(f"Model mismatch: request model: {request.model} != loaded model: {self.model_id}")
async def completion(
self,
model_id: str,
content: InterleavedContent,
sampling_params: SamplingParams | None = None,
response_format: ResponseFormat | None = None,
stream: bool | None = False,
logprobs: LogProbConfig | None = None,
) -> CompletionResponse | CompletionResponseStreamChunk:
if sampling_params is None:
sampling_params = SamplingParams()
if logprobs:
assert logprobs.top_k == 1, f"Unexpected top_k={logprobs.top_k}"
content = augment_content_with_response_format_prompt(response_format, content)
request = CompletionRequest(
model=model_id,
content=content,
sampling_params=sampling_params,
response_format=response_format,
stream=stream,
logprobs=logprobs,
)
self.check_model(request)
request = await convert_request_to_raw(request)
if request.stream:
return self._stream_completion(request)
else:
results = await self._nonstream_completion([request])
return results[0]
async def _stream_completion(self, request: CompletionRequest) -> AsyncGenerator:
tokenizer = self.generator.formatter.tokenizer
def impl():
stop_reason = None
for token_results in self.generator.completion([request]):
token_result = token_results[0]
if token_result.token == tokenizer.eot_id:
stop_reason = StopReason.end_of_turn
text = ""
elif token_result.token == tokenizer.eom_id:
stop_reason = StopReason.end_of_message
text = ""
else:
text = token_result.text
logprobs = None
if stop_reason is None:
if request.logprobs:
assert len(token_result.logprobs) == 1
logprobs = [TokenLogProbs(logprobs_by_token={token_result.text: token_result.logprobs[0]})]
yield CompletionResponseStreamChunk(
delta=text,
stop_reason=stop_reason,
logprobs=logprobs if request.logprobs else None,
)
if stop_reason is None:
yield CompletionResponseStreamChunk(
delta="",
stop_reason=StopReason.out_of_tokens,
)
if self.config.create_distributed_process_group:
async with SEMAPHORE:
for x in impl():
yield x
else:
for x in impl():
yield x
async def _nonstream_completion(self, request_batch: list[CompletionRequest]) -> list[CompletionResponse]:
tokenizer = self.generator.formatter.tokenizer
first_request = request_batch[0]
class ItemState(BaseModel):
tokens: list[int] = []
logprobs: list[TokenLogProbs] = []
stop_reason: StopReason | None = None
finished: bool = False
def impl():
states = [ItemState() for _ in request_batch]
results = []
for token_results in self.generator.completion(request_batch):
for result in token_results:
idx = result.batch_idx
state = states[idx]
if state.finished or result.ignore_token:
continue
state.finished = result.finished
if first_request.logprobs:
state.logprobs.append(TokenLogProbs(logprobs_by_token={result.text: result.logprobs[0]}))
state.tokens.append(result.token)
if result.token == tokenizer.eot_id:
state.stop_reason = StopReason.end_of_turn
elif result.token == tokenizer.eom_id:
state.stop_reason = StopReason.end_of_message
for state in states:
if state.stop_reason is None:
state.stop_reason = StopReason.out_of_tokens
if state.tokens[-1] in self.generator.formatter.tokenizer.stop_tokens:
state.tokens = state.tokens[:-1]
content = self.generator.formatter.tokenizer.decode(state.tokens)
results.append(
CompletionResponse(
content=content,
stop_reason=state.stop_reason,
logprobs=state.logprobs if first_request.logprobs else None,
)
)
return results
if self.config.create_distributed_process_group:
async with SEMAPHORE:
return impl()
else:
return impl()
async def chat_completion( async def chat_completion(
self, self,
model_id: str, model_id: str,

View file

@ -27,8 +27,6 @@ class ModelRunner:
def __call__(self, task: Any): def __call__(self, task: Any):
if task[0] == "chat_completion": if task[0] == "chat_completion":
return self.llama.chat_completion(task[1]) return self.llama.chat_completion(task[1])
elif task[0] == "completion":
return self.llama.completion(task[1])
else: else:
raise ValueError(f"Unexpected task type {task[0]}") raise ValueError(f"Unexpected task type {task[0]}")

View file

@ -5,9 +5,9 @@
# the root directory of this source tree. # the root directory of this source tree.
from collections.abc import AsyncGenerator from collections.abc import AsyncGenerator
from typing import Any
from llama_stack.apis.inference import ( from llama_stack.apis.inference import (
CompletionResponse,
InferenceProvider, InferenceProvider,
LogProbConfig, LogProbConfig,
Message, Message,
@ -18,6 +18,7 @@ from llama_stack.apis.inference import (
ToolDefinition, ToolDefinition,
ToolPromptFormat, ToolPromptFormat,
) )
from llama_stack.apis.inference.inference import OpenAICompletion
from llama_stack.apis.models import ModelType from llama_stack.apis.models import ModelType
from llama_stack.log import get_logger from llama_stack.log import get_logger
from llama_stack.providers.datatypes import Model, ModelsProtocolPrivate from llama_stack.providers.datatypes import Model, ModelsProtocolPrivate
@ -26,7 +27,6 @@ from llama_stack.providers.utils.inference.embedding_mixin import (
) )
from llama_stack.providers.utils.inference.openai_compat import ( from llama_stack.providers.utils.inference.openai_compat import (
OpenAIChatCompletionToLlamaStackMixin, OpenAIChatCompletionToLlamaStackMixin,
OpenAICompletionToLlamaStackMixin,
) )
from .config import SentenceTransformersInferenceConfig from .config import SentenceTransformersInferenceConfig
@ -36,7 +36,6 @@ log = get_logger(name=__name__, category="inference")
class SentenceTransformersInferenceImpl( class SentenceTransformersInferenceImpl(
OpenAIChatCompletionToLlamaStackMixin, OpenAIChatCompletionToLlamaStackMixin,
OpenAICompletionToLlamaStackMixin,
SentenceTransformerEmbeddingMixin, SentenceTransformerEmbeddingMixin,
InferenceProvider, InferenceProvider,
ModelsProtocolPrivate, ModelsProtocolPrivate,
@ -74,17 +73,6 @@ class SentenceTransformersInferenceImpl(
async def unregister_model(self, model_id: str) -> None: async def unregister_model(self, model_id: str) -> None:
pass pass
async def completion(
self,
model_id: str,
content: str,
sampling_params: SamplingParams | None = None,
response_format: ResponseFormat | None = None,
stream: bool | None = False,
logprobs: LogProbConfig | None = None,
) -> CompletionResponse | AsyncGenerator:
raise ValueError("Sentence transformers don't support completion")
async def chat_completion( async def chat_completion(
self, self,
model_id: str, model_id: str,
@ -99,3 +87,31 @@ class SentenceTransformersInferenceImpl(
tool_config: ToolConfig | None = None, tool_config: ToolConfig | None = None,
) -> AsyncGenerator: ) -> AsyncGenerator:
raise ValueError("Sentence transformers don't support chat completion") raise ValueError("Sentence transformers don't support chat completion")
async def openai_completion(
self,
# Standard OpenAI completion parameters
model: str,
prompt: str | list[str] | list[int] | list[list[int]],
best_of: int | None = None,
echo: bool | None = None,
frequency_penalty: float | None = None,
logit_bias: dict[str, float] | None = None,
logprobs: bool | None = None,
max_tokens: int | None = None,
n: int | None = None,
presence_penalty: float | None = None,
seed: int | None = None,
stop: str | list[str] | None = None,
stream: bool | None = None,
stream_options: dict[str, Any] | None = None,
temperature: float | None = None,
top_p: float | None = None,
user: str | None = None,
# vLLM-specific parameters
guided_choice: list[str] | None = None,
prompt_logprobs: int | None = None,
# for fill-in-the-middle type completion
suffix: str | None = None,
) -> OpenAICompletion:
raise NotImplementedError("OpenAI completion not supported by sentence transformers provider")

View file

@ -6,7 +6,7 @@
import re import re
from typing import Any from typing import Any
from llama_stack.apis.inference import Inference, UserMessage from llama_stack.apis.inference import Inference
from llama_stack.apis.scoring import ScoringResultRow from llama_stack.apis.scoring import ScoringResultRow
from llama_stack.apis.scoring_functions import ScoringFnParams from llama_stack.apis.scoring_functions import ScoringFnParams
from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn
@ -55,15 +55,16 @@ class LlmAsJudgeScoringFn(RegisteredBaseScoringFn):
generated_answer=generated_answer, generated_answer=generated_answer,
) )
judge_response = await self.inference_api.chat_completion( judge_response = await self.inference_api.openai_chat_completion(
model_id=fn_def.params.judge_model, model=fn_def.params.judge_model,
messages=[ messages=[
UserMessage( {
content=judge_input_msg, "role": "user",
), "content": judge_input_msg,
}
], ],
) )
content = judge_response.completion_message.content content = judge_response.choices[0].message.content
rating_regexes = fn_def.params.judge_score_regexes rating_regexes = fn_def.params.judge_score_regexes
judge_rating = None judge_rating = None

View file

@ -6,12 +6,10 @@
import json import json
from collections.abc import AsyncGenerator, AsyncIterator from collections.abc import AsyncGenerator, AsyncIterator
from typing import Any
from botocore.client import BaseClient from botocore.client import BaseClient
from llama_stack.apis.common.content_types import (
InterleavedContent,
)
from llama_stack.apis.inference import ( from llama_stack.apis.inference import (
ChatCompletionRequest, ChatCompletionRequest,
ChatCompletionResponse, ChatCompletionResponse,
@ -27,6 +25,7 @@ from llama_stack.apis.inference import (
ToolDefinition, ToolDefinition,
ToolPromptFormat, ToolPromptFormat,
) )
from llama_stack.apis.inference.inference import OpenAICompletion
from llama_stack.providers.remote.inference.bedrock.config import BedrockConfig from llama_stack.providers.remote.inference.bedrock.config import BedrockConfig
from llama_stack.providers.utils.bedrock.client import create_bedrock_client from llama_stack.providers.utils.bedrock.client import create_bedrock_client
from llama_stack.providers.utils.inference.model_registry import ( from llama_stack.providers.utils.inference.model_registry import (
@ -36,7 +35,6 @@ from llama_stack.providers.utils.inference.openai_compat import (
OpenAIChatCompletionToLlamaStackMixin, OpenAIChatCompletionToLlamaStackMixin,
OpenAICompatCompletionChoice, OpenAICompatCompletionChoice,
OpenAICompatCompletionResponse, OpenAICompatCompletionResponse,
OpenAICompletionToLlamaStackMixin,
get_sampling_strategy_options, get_sampling_strategy_options,
process_chat_completion_response, process_chat_completion_response,
process_chat_completion_stream_response, process_chat_completion_stream_response,
@ -89,7 +87,6 @@ class BedrockInferenceAdapter(
ModelRegistryHelper, ModelRegistryHelper,
Inference, Inference,
OpenAIChatCompletionToLlamaStackMixin, OpenAIChatCompletionToLlamaStackMixin,
OpenAICompletionToLlamaStackMixin,
): ):
def __init__(self, config: BedrockConfig) -> None: def __init__(self, config: BedrockConfig) -> None:
ModelRegistryHelper.__init__(self, model_entries=MODEL_ENTRIES) ModelRegistryHelper.__init__(self, model_entries=MODEL_ENTRIES)
@ -109,17 +106,6 @@ class BedrockInferenceAdapter(
if self._client is not None: if self._client is not None:
self._client.close() self._client.close()
async def completion(
self,
model_id: str,
content: InterleavedContent,
sampling_params: SamplingParams | None = None,
response_format: ResponseFormat | None = None,
stream: bool | None = False,
logprobs: LogProbConfig | None = None,
) -> AsyncGenerator:
raise NotImplementedError()
async def chat_completion( async def chat_completion(
self, self,
model_id: str, model_id: str,
@ -221,3 +207,31 @@ class BedrockInferenceAdapter(
user: str | None = None, user: str | None = None,
) -> OpenAIEmbeddingsResponse: ) -> OpenAIEmbeddingsResponse:
raise NotImplementedError() raise NotImplementedError()
async def openai_completion(
self,
# Standard OpenAI completion parameters
model: str,
prompt: str | list[str] | list[int] | list[list[int]],
best_of: int | None = None,
echo: bool | None = None,
frequency_penalty: float | None = None,
logit_bias: dict[str, float] | None = None,
logprobs: bool | None = None,
max_tokens: int | None = None,
n: int | None = None,
presence_penalty: float | None = None,
seed: int | None = None,
stop: str | list[str] | None = None,
stream: bool | None = None,
stream_options: dict[str, Any] | None = None,
temperature: float | None = None,
top_p: float | None = None,
user: str | None = None,
# vLLM-specific parameters
guided_choice: list[str] | None = None,
prompt_logprobs: int | None = None,
# for fill-in-the-middle type completion
suffix: str | None = None,
) -> OpenAICompletion:
raise NotImplementedError("OpenAI completion not supported by the Bedrock provider")

View file

@ -9,9 +9,6 @@ from urllib.parse import urljoin
from cerebras.cloud.sdk import AsyncCerebras from cerebras.cloud.sdk import AsyncCerebras
from llama_stack.apis.common.content_types import (
InterleavedContent,
)
from llama_stack.apis.inference import ( from llama_stack.apis.inference import (
ChatCompletionRequest, ChatCompletionRequest,
CompletionRequest, CompletionRequest,
@ -35,8 +32,6 @@ from llama_stack.providers.utils.inference.openai_compat import (
get_sampling_options, get_sampling_options,
process_chat_completion_response, process_chat_completion_response,
process_chat_completion_stream_response, process_chat_completion_stream_response,
process_completion_response,
process_completion_stream_response,
) )
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
from llama_stack.providers.utils.inference.prompt_adapter import ( from llama_stack.providers.utils.inference.prompt_adapter import (
@ -73,48 +68,6 @@ class CerebrasInferenceAdapter(
async def shutdown(self) -> None: async def shutdown(self) -> None:
pass pass
async def completion(
self,
model_id: str,
content: InterleavedContent,
sampling_params: SamplingParams | None = None,
response_format: ResponseFormat | None = None,
stream: bool | None = False,
logprobs: LogProbConfig | None = None,
) -> AsyncGenerator:
if sampling_params is None:
sampling_params = SamplingParams()
model = await self.model_store.get_model(model_id)
request = CompletionRequest(
model=model.provider_resource_id,
content=content,
sampling_params=sampling_params,
response_format=response_format,
stream=stream,
logprobs=logprobs,
)
if stream:
return self._stream_completion(
request,
)
else:
return await self._nonstream_completion(request)
async def _nonstream_completion(self, request: CompletionRequest) -> CompletionResponse:
params = await self._get_params(request)
r = await self._cerebras_client.completions.create(**params)
return process_completion_response(r)
async def _stream_completion(self, request: CompletionRequest) -> AsyncGenerator:
params = await self._get_params(request)
stream = await self._cerebras_client.completions.create(**params)
async for chunk in process_completion_stream_response(stream):
yield chunk
async def chat_completion( async def chat_completion(
self, self,
model_id: str, model_id: str,

View file

@ -9,14 +9,9 @@ from typing import Any
from databricks.sdk import WorkspaceClient from databricks.sdk import WorkspaceClient
from llama_stack.apis.common.content_types import (
InterleavedContent,
)
from llama_stack.apis.inference import ( from llama_stack.apis.inference import (
ChatCompletionResponse, ChatCompletionResponse,
ChatCompletionResponseStreamChunk, ChatCompletionResponseStreamChunk,
CompletionResponse,
CompletionResponseStreamChunk,
Inference, Inference,
LogProbConfig, LogProbConfig,
Message, Message,
@ -63,17 +58,6 @@ class DatabricksInferenceAdapter(
async def shutdown(self) -> None: async def shutdown(self) -> None:
pass pass
async def completion(
self,
model_id: str,
content: InterleavedContent,
sampling_params: SamplingParams | None = None,
response_format: ResponseFormat | None = None,
stream: bool | None = False,
logprobs: LogProbConfig | None = None,
) -> CompletionResponse | AsyncIterator[CompletionResponseStreamChunk]:
raise NotImplementedError()
async def openai_completion( async def openai_completion(
self, self,
model: str, model: str,

View file

@ -8,14 +8,9 @@ from collections.abc import AsyncGenerator
from fireworks.client import Fireworks from fireworks.client import Fireworks
from llama_stack.apis.common.content_types import (
InterleavedContent,
)
from llama_stack.apis.inference import ( from llama_stack.apis.inference import (
ChatCompletionRequest, ChatCompletionRequest,
ChatCompletionResponse, ChatCompletionResponse,
CompletionRequest,
CompletionResponse,
Inference, Inference,
LogProbConfig, LogProbConfig,
Message, Message,
@ -37,13 +32,10 @@ from llama_stack.providers.utils.inference.openai_compat import (
get_sampling_options, get_sampling_options,
process_chat_completion_response, process_chat_completion_response,
process_chat_completion_stream_response, process_chat_completion_stream_response,
process_completion_response,
process_completion_stream_response,
) )
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
from llama_stack.providers.utils.inference.prompt_adapter import ( from llama_stack.providers.utils.inference.prompt_adapter import (
chat_completion_request_to_prompt, chat_completion_request_to_prompt,
completion_request_to_prompt,
request_has_media, request_has_media,
) )
@ -94,79 +86,6 @@ class FireworksInferenceAdapter(OpenAIMixin, ModelRegistryHelper, Inference, Nee
return prompt[len("<|begin_of_text|>") :] return prompt[len("<|begin_of_text|>") :]
return prompt return prompt
async def completion(
self,
model_id: str,
content: InterleavedContent,
sampling_params: SamplingParams | None = None,
response_format: ResponseFormat | None = None,
stream: bool | None = False,
logprobs: LogProbConfig | None = None,
) -> AsyncGenerator:
if sampling_params is None:
sampling_params = SamplingParams()
model = await self.model_store.get_model(model_id)
request = CompletionRequest(
model=model.provider_resource_id,
content=content,
sampling_params=sampling_params,
response_format=response_format,
stream=stream,
logprobs=logprobs,
)
if stream:
return self._stream_completion(request)
else:
return await self._nonstream_completion(request)
async def _nonstream_completion(self, request: CompletionRequest) -> CompletionResponse:
params = await self._get_params(request)
r = await self._get_client().completion.acreate(**params)
return process_completion_response(r)
async def _stream_completion(self, request: CompletionRequest) -> AsyncGenerator:
params = await self._get_params(request)
# Wrapper for async generator similar
async def _to_async_generator():
stream = self._get_client().completion.create(**params)
for chunk in stream:
yield chunk
stream = _to_async_generator()
async for chunk in process_completion_stream_response(stream):
yield chunk
def _build_options(
self,
sampling_params: SamplingParams | None,
fmt: ResponseFormat,
logprobs: LogProbConfig | None,
) -> dict:
options = get_sampling_options(sampling_params)
options.setdefault("max_tokens", 512)
if fmt:
if fmt.type == ResponseFormatType.json_schema.value:
options["response_format"] = {
"type": "json_object",
"schema": fmt.json_schema,
}
elif fmt.type == ResponseFormatType.grammar.value:
options["response_format"] = {
"type": "grammar",
"grammar": fmt.bnf,
}
else:
raise ValueError(f"Unknown response format {fmt.type}")
if logprobs and logprobs.top_k:
options["logprobs"] = logprobs.top_k
if options["logprobs"] <= 0 or options["logprobs"] >= 5:
raise ValueError("Required range: 0 < top_k < 5")
return options
async def chat_completion( async def chat_completion(
self, self,
model_id: str, model_id: str,
@ -222,22 +141,46 @@ class FireworksInferenceAdapter(OpenAIMixin, ModelRegistryHelper, Inference, Nee
async for chunk in process_chat_completion_stream_response(stream, request): async for chunk in process_chat_completion_stream_response(stream, request):
yield chunk yield chunk
async def _get_params(self, request: ChatCompletionRequest | CompletionRequest) -> dict: def _build_options(
self,
sampling_params: SamplingParams | None,
fmt: ResponseFormat | None,
logprobs: LogProbConfig | None,
) -> dict:
options = get_sampling_options(sampling_params)
options.setdefault("max_tokens", 512)
if fmt:
if fmt.type == ResponseFormatType.json_schema.value:
options["response_format"] = {
"type": "json_object",
"schema": fmt.json_schema,
}
elif fmt.type == ResponseFormatType.grammar.value:
options["response_format"] = {
"type": "grammar",
"grammar": fmt.bnf,
}
else:
raise ValueError(f"Unknown response format {fmt.type}")
if logprobs and logprobs.top_k:
options["logprobs"] = logprobs.top_k
if options["logprobs"] <= 0 or options["logprobs"] >= 5:
raise ValueError("Required range: 0 < top_k < 5")
return options
async def _get_params(self, request: ChatCompletionRequest) -> dict:
input_dict = {} input_dict = {}
media_present = request_has_media(request) media_present = request_has_media(request)
llama_model = self.get_llama_model(request.model) llama_model = self.get_llama_model(request.model)
if isinstance(request, ChatCompletionRequest): # TODO: tools are never added to the request, so we need to add them here
# TODO: tools are never added to the request, so we need to add them here if media_present or not llama_model:
if media_present or not llama_model: input_dict["messages"] = [await convert_message_to_openai_dict(m, download=True) for m in request.messages]
input_dict["messages"] = [
await convert_message_to_openai_dict(m, download=True) for m in request.messages
]
else:
input_dict["prompt"] = await chat_completion_request_to_prompt(request, llama_model)
else: else:
assert not media_present, "Fireworks does not support media for Completion requests" input_dict["prompt"] = await chat_completion_request_to_prompt(request, llama_model)
input_dict["prompt"] = await completion_request_to_prompt(request)
# Fireworks always prepends with BOS # Fireworks always prepends with BOS
if "prompt" in input_dict: if "prompt" in input_dict:

View file

@ -9,16 +9,10 @@ from collections.abc import AsyncIterator
from openai import NOT_GIVEN, APIConnectionError from openai import NOT_GIVEN, APIConnectionError
from llama_stack.apis.common.content_types import (
InterleavedContent,
)
from llama_stack.apis.inference import ( from llama_stack.apis.inference import (
ChatCompletionRequest, ChatCompletionRequest,
ChatCompletionResponse, ChatCompletionResponse,
ChatCompletionResponseStreamChunk, ChatCompletionResponseStreamChunk,
CompletionRequest,
CompletionResponse,
CompletionResponseStreamChunk,
Inference, Inference,
LogProbConfig, LogProbConfig,
Message, Message,
@ -37,14 +31,10 @@ from llama_stack.providers.utils.inference.openai_compat import (
convert_openai_chat_completion_stream, convert_openai_chat_completion_stream,
) )
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
from llama_stack.providers.utils.inference.prompt_adapter import content_has_media
from . import NVIDIAConfig from . import NVIDIAConfig
from .openai_utils import ( from .openai_utils import (
convert_chat_completion_request, convert_chat_completion_request,
convert_completion_request,
convert_openai_completion_choice,
convert_openai_completion_stream,
) )
from .utils import _is_nvidia_hosted from .utils import _is_nvidia_hosted
@ -109,48 +99,6 @@ class NVIDIAInferenceAdapter(OpenAIMixin, Inference):
""" """
return f"{self._config.url}/v1" if self._config.append_api_version else self._config.url return f"{self._config.url}/v1" if self._config.append_api_version else self._config.url
async def completion(
self,
model_id: str,
content: InterleavedContent,
sampling_params: SamplingParams | None = None,
response_format: ResponseFormat | None = None,
stream: bool | None = False,
logprobs: LogProbConfig | None = None,
) -> CompletionResponse | AsyncIterator[CompletionResponseStreamChunk]:
if sampling_params is None:
sampling_params = SamplingParams()
if content_has_media(content):
raise NotImplementedError("Media is not supported")
# ToDo: check health of NeMo endpoints and enable this
# removing this health check as NeMo customizer endpoint health check is returning 404
# await check_health(self._config) # this raises errors
provider_model_id = await self._get_provider_model_id(model_id)
request = convert_completion_request(
request=CompletionRequest(
model=provider_model_id,
content=content,
sampling_params=sampling_params,
response_format=response_format,
stream=stream,
logprobs=logprobs,
),
n=1,
)
try:
response = await self.client.completions.create(**request)
except APIConnectionError as e:
raise ConnectionError(f"Failed to connect to NVIDIA NIM at {self._config.url}: {e}") from e
if stream:
return convert_openai_completion_stream(response)
else:
# we pass n=1 to get only one completion
return convert_openai_completion_choice(response.choices[0])
async def openai_embeddings( async def openai_embeddings(
self, self,
model: str, model: str,

View file

@ -13,7 +13,6 @@ from ollama import AsyncClient as AsyncOllamaClient
from llama_stack.apis.common.content_types import ( from llama_stack.apis.common.content_types import (
ImageContentItem, ImageContentItem,
InterleavedContent,
TextContentItem, TextContentItem,
) )
from llama_stack.apis.common.errors import UnsupportedModelError from llama_stack.apis.common.errors import UnsupportedModelError
@ -21,9 +20,6 @@ from llama_stack.apis.inference import (
ChatCompletionRequest, ChatCompletionRequest,
ChatCompletionResponse, ChatCompletionResponse,
ChatCompletionResponseStreamChunk, ChatCompletionResponseStreamChunk,
CompletionRequest,
CompletionResponse,
CompletionResponseStreamChunk,
GrammarResponseFormat, GrammarResponseFormat,
InferenceProvider, InferenceProvider,
JsonSchemaResponseFormat, JsonSchemaResponseFormat,
@ -55,13 +51,10 @@ from llama_stack.providers.utils.inference.openai_compat import (
get_sampling_options, get_sampling_options,
process_chat_completion_response, process_chat_completion_response,
process_chat_completion_stream_response, process_chat_completion_stream_response,
process_completion_response,
process_completion_stream_response,
) )
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
from llama_stack.providers.utils.inference.prompt_adapter import ( from llama_stack.providers.utils.inference.prompt_adapter import (
chat_completion_request_to_prompt, chat_completion_request_to_prompt,
completion_request_to_prompt,
convert_image_content_to_url, convert_image_content_to_url,
request_has_media, request_has_media,
) )
@ -168,67 +161,6 @@ class OllamaInferenceAdapter(
raise ValueError("Model store not set") raise ValueError("Model store not set")
return await self.model_store.get_model(model_id) return await self.model_store.get_model(model_id)
async def completion(
self,
model_id: str,
content: InterleavedContent,
sampling_params: SamplingParams | None = None,
response_format: ResponseFormat | None = None,
stream: bool | None = False,
logprobs: LogProbConfig | None = None,
) -> CompletionResponse | AsyncGenerator[CompletionResponseStreamChunk, None]:
if sampling_params is None:
sampling_params = SamplingParams()
model = await self._get_model(model_id)
if model.provider_resource_id is None:
raise ValueError(f"Model {model_id} has no provider_resource_id set")
request = CompletionRequest(
model=model.provider_resource_id,
content=content,
sampling_params=sampling_params,
response_format=response_format,
stream=stream,
logprobs=logprobs,
)
if stream:
return self._stream_completion(request)
else:
return await self._nonstream_completion(request)
async def _stream_completion(
self, request: CompletionRequest
) -> AsyncGenerator[CompletionResponseStreamChunk, None]:
params = await self._get_params(request)
async def _generate_and_convert_to_openai_compat():
s = await self.ollama_client.generate(**params)
async for chunk in s:
choice = OpenAICompatCompletionChoice(
finish_reason=chunk["done_reason"] if chunk["done"] else None,
text=chunk["response"],
)
yield OpenAICompatCompletionResponse(
choices=[choice],
)
stream = _generate_and_convert_to_openai_compat()
async for chunk in process_completion_stream_response(stream):
yield chunk
async def _nonstream_completion(self, request: CompletionRequest) -> CompletionResponse:
params = await self._get_params(request)
r = await self.ollama_client.generate(**params)
choice = OpenAICompatCompletionChoice(
finish_reason=r["done_reason"] if r["done"] else None,
text=r["response"],
)
response = OpenAICompatCompletionResponse(
choices=[choice],
)
return process_completion_response(response)
async def chat_completion( async def chat_completion(
self, self,
model_id: str, model_id: str,
@ -262,7 +194,7 @@ class OllamaInferenceAdapter(
else: else:
return await self._nonstream_chat_completion(request) return await self._nonstream_chat_completion(request)
async def _get_params(self, request: ChatCompletionRequest | CompletionRequest) -> dict: async def _get_params(self, request: ChatCompletionRequest) -> dict:
sampling_options = get_sampling_options(request.sampling_params) sampling_options = get_sampling_options(request.sampling_params)
# This is needed since the Ollama API expects num_predict to be set # This is needed since the Ollama API expects num_predict to be set
# for early truncation instead of max_tokens. # for early truncation instead of max_tokens.
@ -272,21 +204,16 @@ class OllamaInferenceAdapter(
input_dict: dict[str, Any] = {} input_dict: dict[str, Any] = {}
media_present = request_has_media(request) media_present = request_has_media(request)
llama_model = self.get_llama_model(request.model) llama_model = self.get_llama_model(request.model)
if isinstance(request, ChatCompletionRequest): if media_present or not llama_model:
if media_present or not llama_model: contents = [await convert_message_to_openai_dict_for_ollama(m) for m in request.messages]
contents = [await convert_message_to_openai_dict_for_ollama(m) for m in request.messages] # flatten the list of lists
# flatten the list of lists input_dict["messages"] = [item for sublist in contents for item in sublist]
input_dict["messages"] = [item for sublist in contents for item in sublist]
else:
input_dict["raw"] = True
input_dict["prompt"] = await chat_completion_request_to_prompt(
request,
llama_model,
)
else: else:
assert not media_present, "Ollama does not support media for Completion requests"
input_dict["prompt"] = await completion_request_to_prompt(request)
input_dict["raw"] = True input_dict["raw"] = True
input_dict["prompt"] = await chat_completion_request_to_prompt(
request,
llama_model,
)
if fmt := request.response_format: if fmt := request.response_format:
if isinstance(fmt, JsonSchemaResponseFormat): if isinstance(fmt, JsonSchemaResponseFormat):

View file

@ -9,7 +9,6 @@ from typing import Any
from llama_stack_client import AsyncLlamaStackClient from llama_stack_client import AsyncLlamaStackClient
from llama_stack.apis.common.content_types import InterleavedContent
from llama_stack.apis.inference import ( from llama_stack.apis.inference import (
ChatCompletionResponse, ChatCompletionResponse,
ChatCompletionResponseStreamChunk, ChatCompletionResponseStreamChunk,
@ -86,37 +85,6 @@ class PassthroughInferenceAdapter(Inference):
provider_data=provider_data, provider_data=provider_data,
) )
async def completion(
self,
model_id: str,
content: InterleavedContent,
sampling_params: SamplingParams | None = None,
response_format: ResponseFormat | None = None,
stream: bool | None = False,
logprobs: LogProbConfig | None = None,
) -> AsyncGenerator:
if sampling_params is None:
sampling_params = SamplingParams()
client = self._get_client()
model = await self.model_store.get_model(model_id)
request_params = {
"model_id": model.provider_resource_id,
"content": content,
"sampling_params": sampling_params,
"response_format": response_format,
"stream": stream,
"logprobs": logprobs,
}
request_params = {key: value for key, value in request_params.items() if value is not None}
# cast everything to json dict
json_params = self.cast_value_to_json_dict(request_params)
# only pass through the not None params
return await client.inference.completion(**json_params)
async def chat_completion( async def chat_completion(
self, self,
model_id: str, model_id: str,

View file

@ -10,13 +10,9 @@ from collections.abc import AsyncGenerator
from huggingface_hub import AsyncInferenceClient, HfApi from huggingface_hub import AsyncInferenceClient, HfApi
from pydantic import SecretStr from pydantic import SecretStr
from llama_stack.apis.common.content_types import (
InterleavedContent,
)
from llama_stack.apis.inference import ( from llama_stack.apis.inference import (
ChatCompletionRequest, ChatCompletionRequest,
ChatCompletionResponse, ChatCompletionResponse,
CompletionRequest,
Inference, Inference,
LogProbConfig, LogProbConfig,
Message, Message,
@ -44,13 +40,10 @@ from llama_stack.providers.utils.inference.openai_compat import (
get_sampling_options, get_sampling_options,
process_chat_completion_response, process_chat_completion_response,
process_chat_completion_stream_response, process_chat_completion_stream_response,
process_completion_response,
process_completion_stream_response,
) )
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
from llama_stack.providers.utils.inference.prompt_adapter import ( from llama_stack.providers.utils.inference.prompt_adapter import (
chat_completion_request_to_model_input_info, chat_completion_request_to_model_input_info,
completion_request_to_prompt_model_input_info,
) )
from .config import InferenceAPIImplConfig, InferenceEndpointImplConfig, TGIImplConfig from .config import InferenceAPIImplConfig, InferenceEndpointImplConfig, TGIImplConfig
@ -122,31 +115,6 @@ class _HfAdapter(
async def unregister_model(self, model_id: str) -> None: async def unregister_model(self, model_id: str) -> None:
pass pass
async def completion(
self,
model_id: str,
content: InterleavedContent,
sampling_params: SamplingParams | None = None,
response_format: ResponseFormat | None = None,
stream: bool | None = False,
logprobs: LogProbConfig | None = None,
) -> AsyncGenerator:
if sampling_params is None:
sampling_params = SamplingParams()
model = await self.model_store.get_model(model_id)
request = CompletionRequest(
model=model.provider_resource_id,
content=content,
sampling_params=sampling_params,
response_format=response_format,
stream=stream,
logprobs=logprobs,
)
if stream:
return self._stream_completion(request)
else:
return await self._nonstream_completion(request)
def _get_max_new_tokens(self, sampling_params, input_tokens): def _get_max_new_tokens(self, sampling_params, input_tokens):
return min( return min(
sampling_params.max_tokens or (self.max_tokens - input_tokens), sampling_params.max_tokens or (self.max_tokens - input_tokens),
@ -180,53 +148,6 @@ class _HfAdapter(
return options return options
async def _get_params_for_completion(self, request: CompletionRequest) -> dict:
prompt, input_tokens = await completion_request_to_prompt_model_input_info(request)
return dict(
prompt=prompt,
stream=request.stream,
details=True,
max_new_tokens=self._get_max_new_tokens(request.sampling_params, input_tokens),
stop_sequences=["<|eom_id|>", "<|eot_id|>"],
**self._build_options(request.sampling_params, request.response_format),
)
async def _stream_completion(self, request: CompletionRequest) -> AsyncGenerator:
params = await self._get_params_for_completion(request)
async def _generate_and_convert_to_openai_compat():
s = await self.hf_client.text_generation(**params)
async for chunk in s:
token_result = chunk.token
finish_reason = None
if chunk.details:
finish_reason = chunk.details.finish_reason
choice = OpenAICompatCompletionChoice(text=token_result.text, finish_reason=finish_reason)
yield OpenAICompatCompletionResponse(
choices=[choice],
)
stream = _generate_and_convert_to_openai_compat()
async for chunk in process_completion_stream_response(stream):
yield chunk
async def _nonstream_completion(self, request: CompletionRequest) -> AsyncGenerator:
params = await self._get_params_for_completion(request)
r = await self.hf_client.text_generation(**params)
choice = OpenAICompatCompletionChoice(
finish_reason=r.details.finish_reason,
text="".join(t.text for t in r.details.tokens),
)
response = OpenAICompatCompletionResponse(
choices=[choice],
)
return process_completion_response(response)
async def chat_completion( async def chat_completion(
self, self,
model_id: str, model_id: str,

View file

@ -10,13 +10,9 @@ from openai import AsyncOpenAI
from together import AsyncTogether from together import AsyncTogether
from together.constants import BASE_URL from together.constants import BASE_URL
from llama_stack.apis.common.content_types import (
InterleavedContent,
)
from llama_stack.apis.inference import ( from llama_stack.apis.inference import (
ChatCompletionRequest, ChatCompletionRequest,
ChatCompletionResponse, ChatCompletionResponse,
CompletionRequest,
Inference, Inference,
LogProbConfig, LogProbConfig,
Message, Message,
@ -39,13 +35,10 @@ from llama_stack.providers.utils.inference.openai_compat import (
get_sampling_options, get_sampling_options,
process_chat_completion_response, process_chat_completion_response,
process_chat_completion_stream_response, process_chat_completion_stream_response,
process_completion_response,
process_completion_stream_response,
) )
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
from llama_stack.providers.utils.inference.prompt_adapter import ( from llama_stack.providers.utils.inference.prompt_adapter import (
chat_completion_request_to_prompt, chat_completion_request_to_prompt,
completion_request_to_prompt,
request_has_media, request_has_media,
) )
@ -81,31 +74,6 @@ class TogetherInferenceAdapter(OpenAIMixin, ModelRegistryHelper, Inference, Need
async def shutdown(self) -> None: async def shutdown(self) -> None:
pass pass
async def completion(
self,
model_id: str,
content: InterleavedContent,
sampling_params: SamplingParams | None = None,
response_format: ResponseFormat | None = None,
stream: bool | None = False,
logprobs: LogProbConfig | None = None,
) -> AsyncGenerator:
if sampling_params is None:
sampling_params = SamplingParams()
model = await self.model_store.get_model(model_id)
request = CompletionRequest(
model=model.provider_resource_id,
content=content,
sampling_params=sampling_params,
response_format=response_format,
stream=stream,
logprobs=logprobs,
)
if stream:
return self._stream_completion(request)
else:
return await self._nonstream_completion(request)
def _get_client(self) -> AsyncTogether: def _get_client(self) -> AsyncTogether:
together_api_key = None together_api_key = None
config_api_key = self.config.api_key.get_secret_value() if self.config.api_key else None config_api_key = self.config.api_key.get_secret_value() if self.config.api_key else None
@ -127,19 +95,6 @@ class TogetherInferenceAdapter(OpenAIMixin, ModelRegistryHelper, Inference, Need
api_key=together_client.api_key, api_key=together_client.api_key,
) )
async def _nonstream_completion(self, request: CompletionRequest) -> ChatCompletionResponse:
params = await self._get_params(request)
client = self._get_client()
r = await client.completions.create(**params)
return process_completion_response(r)
async def _stream_completion(self, request: CompletionRequest) -> AsyncGenerator:
params = await self._get_params(request)
client = self._get_client()
stream = await client.completions.create(**params)
async for chunk in process_completion_stream_response(stream):
yield chunk
def _build_options( def _build_options(
self, self,
sampling_params: SamplingParams | None, sampling_params: SamplingParams | None,
@ -219,18 +174,14 @@ class TogetherInferenceAdapter(OpenAIMixin, ModelRegistryHelper, Inference, Need
async for chunk in process_chat_completion_stream_response(stream, request): async for chunk in process_chat_completion_stream_response(stream, request):
yield chunk yield chunk
async def _get_params(self, request: ChatCompletionRequest | CompletionRequest) -> dict: async def _get_params(self, request: ChatCompletionRequest) -> dict:
input_dict = {} input_dict = {}
media_present = request_has_media(request) media_present = request_has_media(request)
llama_model = self.get_llama_model(request.model) llama_model = self.get_llama_model(request.model)
if isinstance(request, ChatCompletionRequest): if media_present or not llama_model:
if media_present or not llama_model: input_dict["messages"] = [await convert_message_to_openai_dict(m) for m in request.messages]
input_dict["messages"] = [await convert_message_to_openai_dict(m) for m in request.messages]
else:
input_dict["prompt"] = await chat_completion_request_to_prompt(request, llama_model)
else: else:
assert not media_present, "Together does not support media for Completion requests" input_dict["prompt"] = await chat_completion_request_to_prompt(request, llama_model)
input_dict["prompt"] = await completion_request_to_prompt(request)
params = { params = {
"model": request.model, "model": request.model,

View file

@ -15,7 +15,6 @@ from openai.types.chat.chat_completion_chunk import (
) )
from llama_stack.apis.common.content_types import ( from llama_stack.apis.common.content_types import (
InterleavedContent,
TextDelta, TextDelta,
ToolCallDelta, ToolCallDelta,
ToolCallParseStatus, ToolCallParseStatus,
@ -27,9 +26,6 @@ from llama_stack.apis.inference import (
ChatCompletionResponseEventType, ChatCompletionResponseEventType,
ChatCompletionResponseStreamChunk, ChatCompletionResponseStreamChunk,
CompletionMessage, CompletionMessage,
CompletionRequest,
CompletionResponse,
CompletionResponseStreamChunk,
GrammarResponseFormat, GrammarResponseFormat,
Inference, Inference,
JsonSchemaResponseFormat, JsonSchemaResponseFormat,
@ -64,14 +60,8 @@ from llama_stack.providers.utils.inference.openai_compat import (
convert_tool_call, convert_tool_call,
get_sampling_options, get_sampling_options,
process_chat_completion_stream_response, process_chat_completion_stream_response,
process_completion_response,
process_completion_stream_response,
) )
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
from llama_stack.providers.utils.inference.prompt_adapter import (
completion_request_to_prompt,
request_has_media,
)
from .config import VLLMInferenceAdapterConfig from .config import VLLMInferenceAdapterConfig
@ -363,33 +353,6 @@ class VLLMInferenceAdapter(OpenAIMixin, LiteLLMOpenAIMixin, Inference, ModelsPro
def get_extra_client_params(self): def get_extra_client_params(self):
return {"http_client": httpx.AsyncClient(verify=self.config.tls_verify)} return {"http_client": httpx.AsyncClient(verify=self.config.tls_verify)}
async def completion( # type: ignore[override] # Return type more specific than base class which is allows for both streaming and non-streaming responses.
self,
model_id: str,
content: InterleavedContent,
sampling_params: SamplingParams | None = None,
response_format: ResponseFormat | None = None,
stream: bool | None = False,
logprobs: LogProbConfig | None = None,
) -> CompletionResponse | AsyncGenerator[CompletionResponseStreamChunk, None]:
if sampling_params is None:
sampling_params = SamplingParams()
model = await self._get_model(model_id)
if model.provider_resource_id is None:
raise ValueError(f"Model {model_id} has no provider_resource_id set")
request = CompletionRequest(
model=model.provider_resource_id,
content=content,
sampling_params=sampling_params,
response_format=response_format,
stream=stream,
logprobs=logprobs,
)
if stream:
return self._stream_completion(request)
else:
return await self._nonstream_completion(request)
async def chat_completion( async def chat_completion(
self, self,
model_id: str, model_id: str,
@ -474,24 +437,6 @@ class VLLMInferenceAdapter(OpenAIMixin, LiteLLMOpenAIMixin, Inference, ModelsPro
async for chunk in res: async for chunk in res:
yield chunk yield chunk
async def _nonstream_completion(self, request: CompletionRequest) -> CompletionResponse:
if self.client is None:
raise RuntimeError("Client is not initialized")
params = await self._get_params(request)
r = await self.client.completions.create(**params)
return process_completion_response(r)
async def _stream_completion(
self, request: CompletionRequest
) -> AsyncGenerator[CompletionResponseStreamChunk, None]:
if self.client is None:
raise RuntimeError("Client is not initialized")
params = await self._get_params(request)
stream = await self.client.completions.create(**params)
async for chunk in process_completion_stream_response(stream):
yield chunk
async def register_model(self, model: Model) -> Model: async def register_model(self, model: Model) -> Model:
try: try:
model = await self.register_helper.register_model(model) model = await self.register_helper.register_model(model)
@ -511,7 +456,7 @@ class VLLMInferenceAdapter(OpenAIMixin, LiteLLMOpenAIMixin, Inference, ModelsPro
) )
return model return model
async def _get_params(self, request: ChatCompletionRequest | CompletionRequest) -> dict: async def _get_params(self, request: ChatCompletionRequest) -> dict:
options = get_sampling_options(request.sampling_params) options = get_sampling_options(request.sampling_params)
if "max_tokens" not in options: if "max_tokens" not in options:
options["max_tokens"] = self.config.max_tokens options["max_tokens"] = self.config.max_tokens
@ -521,11 +466,7 @@ class VLLMInferenceAdapter(OpenAIMixin, LiteLLMOpenAIMixin, Inference, ModelsPro
if isinstance(request, ChatCompletionRequest) and request.tools: if isinstance(request, ChatCompletionRequest) and request.tools:
input_dict = {"tools": _convert_to_vllm_tools_in_request(request.tools)} input_dict = {"tools": _convert_to_vllm_tools_in_request(request.tools)}
if isinstance(request, ChatCompletionRequest): input_dict["messages"] = [await convert_message_to_openai_dict(m, download=True) for m in request.messages]
input_dict["messages"] = [await convert_message_to_openai_dict(m, download=True) for m in request.messages]
else:
assert not request_has_media(request), "vLLM does not support media for Completion requests"
input_dict["prompt"] = await completion_request_to_prompt(request)
if fmt := request.response_format: if fmt := request.response_format:
if isinstance(fmt, JsonSchemaResponseFormat): if isinstance(fmt, JsonSchemaResponseFormat):

View file

@ -11,7 +11,6 @@ from ibm_watsonx_ai.foundation_models import Model
from ibm_watsonx_ai.metanames import GenTextParamsMetaNames as GenParams from ibm_watsonx_ai.metanames import GenTextParamsMetaNames as GenParams
from openai import AsyncOpenAI from openai import AsyncOpenAI
from llama_stack.apis.common.content_types import InterleavedContent
from llama_stack.apis.inference import ( from llama_stack.apis.inference import (
ChatCompletionRequest, ChatCompletionRequest,
ChatCompletionResponse, ChatCompletionResponse,
@ -43,8 +42,6 @@ from llama_stack.providers.utils.inference.openai_compat import (
prepare_openai_completion_params, prepare_openai_completion_params,
process_chat_completion_response, process_chat_completion_response,
process_chat_completion_stream_response, process_chat_completion_stream_response,
process_completion_response,
process_completion_stream_response,
) )
from llama_stack.providers.utils.inference.prompt_adapter import ( from llama_stack.providers.utils.inference.prompt_adapter import (
chat_completion_request_to_prompt, chat_completion_request_to_prompt,
@ -87,31 +84,6 @@ class WatsonXInferenceAdapter(Inference, ModelRegistryHelper):
async def shutdown(self) -> None: async def shutdown(self) -> None:
pass pass
async def completion(
self,
model_id: str,
content: InterleavedContent,
sampling_params: SamplingParams | None = None,
response_format: ResponseFormat | None = None,
stream: bool | None = False,
logprobs: LogProbConfig | None = None,
) -> AsyncGenerator:
if sampling_params is None:
sampling_params = SamplingParams()
model = await self.model_store.get_model(model_id)
request = CompletionRequest(
model=model.provider_resource_id,
content=content,
sampling_params=sampling_params,
response_format=response_format,
stream=stream,
logprobs=logprobs,
)
if stream:
return self._stream_completion(request)
else:
return await self._nonstream_completion(request)
def _get_client(self, model_id) -> Model: def _get_client(self, model_id) -> Model:
config_api_key = self._config.api_key.get_secret_value() if self._config.api_key else None config_api_key = self._config.api_key.get_secret_value() if self._config.api_key else None
config_url = self._config.url config_url = self._config.url
@ -128,40 +100,6 @@ class WatsonXInferenceAdapter(Inference, ModelRegistryHelper):
) )
return self._openai_client return self._openai_client
async def _nonstream_completion(self, request: CompletionRequest) -> ChatCompletionResponse:
params = await self._get_params(request)
r = self._get_client(request.model).generate(**params)
choices = []
if "results" in r:
for result in r["results"]:
choice = OpenAICompatCompletionChoice(
finish_reason=result["stop_reason"] if result["stop_reason"] else None,
text=result["generated_text"],
)
choices.append(choice)
response = OpenAICompatCompletionResponse(
choices=choices,
)
return process_completion_response(response)
async def _stream_completion(self, request: CompletionRequest) -> AsyncGenerator:
params = await self._get_params(request)
async def _generate_and_convert_to_openai_compat():
s = self._get_client(request.model).generate_text_stream(**params)
for chunk in s:
choice = OpenAICompatCompletionChoice(
finish_reason=None,
text=chunk,
)
yield OpenAICompatCompletionResponse(
choices=[choice],
)
stream = _generate_and_convert_to_openai_compat()
async for chunk in process_completion_stream_response(stream):
yield chunk
async def chat_completion( async def chat_completion(
self, self,
model_id: str, model_id: str,

View file

@ -4,14 +4,11 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from collections.abc import AsyncGenerator, AsyncIterator from collections.abc import AsyncIterator
from typing import Any from typing import Any
import litellm import litellm
from llama_stack.apis.common.content_types import (
InterleavedContent,
)
from llama_stack.apis.inference import ( from llama_stack.apis.inference import (
ChatCompletionRequest, ChatCompletionRequest,
ChatCompletionResponse, ChatCompletionResponse,
@ -62,7 +59,7 @@ class LiteLLMOpenAIMixin(
self, self,
litellm_provider_name: str, litellm_provider_name: str,
api_key_from_config: str | None, api_key_from_config: str | None,
provider_data_api_key_field: str, provider_data_api_key_field: str | None = None,
model_entries: list[ProviderModelEntry] | None = None, model_entries: list[ProviderModelEntry] | None = None,
openai_compat_api_base: str | None = None, openai_compat_api_base: str | None = None,
download_images: bool = False, download_images: bool = False,
@ -73,7 +70,7 @@ class LiteLLMOpenAIMixin(
:param model_entries: The model entries to register. :param model_entries: The model entries to register.
:param api_key_from_config: The API key to use from the config. :param api_key_from_config: The API key to use from the config.
:param provider_data_api_key_field: The field in the provider data that contains the API key. :param provider_data_api_key_field: The field in the provider data that contains the API key (optional).
:param litellm_provider_name: The name of the provider, used for model lookups. :param litellm_provider_name: The name of the provider, used for model lookups.
:param openai_compat_api_base: The base URL for OpenAI compatibility, or None if not using OpenAI compatibility. :param openai_compat_api_base: The base URL for OpenAI compatibility, or None if not using OpenAI compatibility.
:param download_images: Whether to download images and convert to base64 for message conversion. :param download_images: Whether to download images and convert to base64 for message conversion.
@ -108,17 +105,6 @@ class LiteLLMOpenAIMixin(
else model_id else model_id
) )
async def completion(
self,
model_id: str,
content: InterleavedContent,
sampling_params: SamplingParams | None = None,
response_format: ResponseFormat | None = None,
stream: bool | None = False,
logprobs: LogProbConfig | None = None,
) -> AsyncGenerator:
raise NotImplementedError("LiteLLM does not support completion requests")
async def chat_completion( async def chat_completion(
self, self,
model_id: str, model_id: str,

View file

@ -63,7 +63,7 @@ class ModelRegistryHelper(ModelsProtocolPrivate):
model_entries: list[ProviderModelEntry] | None = None, model_entries: list[ProviderModelEntry] | None = None,
allowed_models: list[str] | None = None, allowed_models: list[str] | None = None,
): ):
self.allowed_models = allowed_models self.allowed_models = allowed_models if allowed_models else []
self.alias_to_provider_id_map = {} self.alias_to_provider_id_map = {}
self.provider_id_to_llama_model_map = {} self.provider_id_to_llama_model_map = {}

View file

@ -103,8 +103,6 @@ from llama_stack.apis.inference import (
JsonSchemaResponseFormat, JsonSchemaResponseFormat,
Message, Message,
OpenAIChatCompletion, OpenAIChatCompletion,
OpenAICompletion,
OpenAICompletionChoice,
OpenAIEmbeddingData, OpenAIEmbeddingData,
OpenAIMessageParam, OpenAIMessageParam,
OpenAIResponseFormatParam, OpenAIResponseFormatParam,
@ -1281,76 +1279,6 @@ async def prepare_openai_completion_params(**params):
return completion_params return completion_params
class OpenAICompletionToLlamaStackMixin:
async def openai_completion(
self,
model: str,
prompt: str | list[str] | list[int] | list[list[int]],
best_of: int | None = None,
echo: bool | None = None,
frequency_penalty: float | None = None,
logit_bias: dict[str, float] | None = None,
logprobs: bool | None = None,
max_tokens: int | None = None,
n: int | None = None,
presence_penalty: float | None = None,
seed: int | None = None,
stop: str | list[str] | None = None,
stream: bool | None = None,
stream_options: dict[str, Any] | None = None,
temperature: float | None = None,
top_p: float | None = None,
user: str | None = None,
guided_choice: list[str] | None = None,
prompt_logprobs: int | None = None,
suffix: str | None = None,
) -> OpenAICompletion:
if stream:
raise ValueError(f"{self.__class__.__name__} doesn't support streaming openai completions")
# This is a pretty hacky way to do emulate completions -
# basically just de-batches them...
prompts = [prompt] if not isinstance(prompt, list) else prompt
sampling_params = _convert_openai_sampling_params(
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
)
choices = []
# "n" is the number of completions to generate per prompt
n = n or 1
for _i in range(0, n):
# and we may have multiple prompts, if batching was used
for prompt in prompts:
result = self.completion(
model_id=model,
content=prompt,
sampling_params=sampling_params,
)
index = len(choices)
text = result.content
finish_reason = _convert_stop_reason_to_openai_finish_reason(result.stop_reason)
choice = OpenAICompletionChoice(
index=index,
text=text,
finish_reason=finish_reason,
)
choices.append(choice)
return OpenAICompletion(
id=f"cmpl-{uuid.uuid4()}",
choices=choices,
created=int(time.time()),
model=model,
object="text_completion",
)
class OpenAIChatCompletionToLlamaStackMixin: class OpenAIChatCompletionToLlamaStackMixin:
async def openai_chat_completion( async def openai_chat_completion(
self, self,

View file

@ -24,6 +24,7 @@ from llama_stack.apis.inference import (
OpenAIResponseFormatParam, OpenAIResponseFormatParam,
) )
from llama_stack.apis.models import ModelType from llama_stack.apis.models import ModelType
from llama_stack.core.request_headers import NeedsRequestProviderData
from llama_stack.log import get_logger from llama_stack.log import get_logger
from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper
from llama_stack.providers.utils.inference.openai_compat import prepare_openai_completion_params from llama_stack.providers.utils.inference.openai_compat import prepare_openai_completion_params
@ -32,7 +33,7 @@ from llama_stack.providers.utils.inference.prompt_adapter import localize_image_
logger = get_logger(name=__name__, category="providers::utils") logger = get_logger(name=__name__, category="providers::utils")
class OpenAIMixin(ModelRegistryHelper, ABC): class OpenAIMixin(ModelRegistryHelper, NeedsRequestProviderData, ABC):
""" """
Mixin class that provides OpenAI-specific functionality for inference providers. Mixin class that provides OpenAI-specific functionality for inference providers.
This class handles direct OpenAI API calls using the AsyncOpenAI client. This class handles direct OpenAI API calls using the AsyncOpenAI client.
@ -69,6 +70,9 @@ class OpenAIMixin(ModelRegistryHelper, ABC):
# List of allowed models for this provider, if empty all models allowed # List of allowed models for this provider, if empty all models allowed
allowed_models: list[str] = [] allowed_models: list[str] = []
# Optional field name in provider data to look for API key, which takes precedence
provider_data_api_key_field: str | None = None
@abstractmethod @abstractmethod
def get_api_key(self) -> str: def get_api_key(self) -> str:
""" """
@ -111,9 +115,28 @@ class OpenAIMixin(ModelRegistryHelper, ABC):
Uses the abstract methods get_api_key() and get_base_url() which must be Uses the abstract methods get_api_key() and get_base_url() which must be
implemented by child classes. implemented by child classes.
Users can also provide the API key via the provider data header, which
is used instead of any config API key.
""" """
api_key = self.get_api_key()
if self.provider_data_api_key_field:
provider_data = self.get_request_provider_data()
if provider_data and getattr(provider_data, self.provider_data_api_key_field, None):
api_key = getattr(provider_data, self.provider_data_api_key_field)
if not api_key: # TODO: let get_api_key return None
raise ValueError(
"API key is not set. Please provide a valid API key in the "
"provider data header, e.g. x-llamastack-provider-data: "
f'{{"{self.provider_data_api_key_field}": "<API_KEY>"}}, '
"or in the provider config."
)
return AsyncOpenAI( return AsyncOpenAI(
api_key=self.get_api_key(), api_key=api_key,
base_url=self.get_base_url(), base_url=self.get_base_url(),
**self.get_extra_client_params(), **self.get_extra_client_params(),
) )

View file

@ -229,28 +229,6 @@ async def convert_image_content_to_url(
return base64.b64encode(content).decode("utf-8") return base64.b64encode(content).decode("utf-8")
async def completion_request_to_prompt(request: CompletionRequest) -> str:
content = augment_content_with_response_format_prompt(request.response_format, request.content)
request.content = content
request = await convert_request_to_raw(request)
formatter = ChatFormat(tokenizer=Tokenizer.get_instance())
model_input = formatter.encode_content(request.content)
return formatter.tokenizer.decode(model_input.tokens)
async def completion_request_to_prompt_model_input_info(
request: CompletionRequest,
) -> tuple[str, int]:
content = augment_content_with_response_format_prompt(request.response_format, request.content)
request.content = content
request = await convert_request_to_raw(request)
formatter = ChatFormat(tokenizer=Tokenizer.get_instance())
model_input = formatter.encode_content(request.content)
return (formatter.tokenizer.decode(model_input.tokens), len(model_input.tokens))
def augment_content_with_response_format_prompt(response_format, content): def augment_content_with_response_format_prompt(response_format, content):
if fmt_prompt := response_format_prompt(response_format): if fmt_prompt := response_format_prompt(response_format):
if isinstance(content, list): if isinstance(content, list):

View file

@ -264,3 +264,36 @@ def test_function_call_output_response(openai_client, client_with_models, text_m
assert ( assert (
"sunny" in response2.output[0].content[0].text.lower() or "warm" in response2.output[0].content[0].text.lower() "sunny" in response2.output[0].content[0].text.lower() or "warm" in response2.output[0].content[0].text.lower()
) )
def test_function_call_output_response_with_none_arguments(openai_client, client_with_models, text_model_id):
"""Test handling of function call outputs in responses when function does not accept arguments."""
if isinstance(client_with_models, LlamaStackAsLibraryClient):
pytest.skip("OpenAI responses are not supported when testing with library client yet.")
client = openai_client
# First create a response that triggers a function call
response = client.responses.create(
model=text_model_id,
input=[
{
"role": "user",
"content": "what's the current time? You MUST call the `get_current_time` function to find out.",
}
],
tools=[
{
"type": "function",
"name": "get_current_time",
"description": "Get the current time",
"parameters": {},
}
],
stream=False,
)
# Verify we got a function call
assert response.output[0].type == "function_call"
assert response.output[0].arguments == "{}"
_ = response.output[0].call_id

View file

@ -21,7 +21,7 @@
"body": { "body": {
"__type__": "openai.types.chat.chat_completion.ChatCompletion", "__type__": "openai.types.chat.chat_completion.ChatCompletion",
"__data__": { "__data__": {
"id": "chatcmpl-618", "id": "chatcmpl-447",
"choices": [ "choices": [
{ {
"finish_reason": "stop", "finish_reason": "stop",
@ -38,7 +38,7 @@
} }
} }
], ],
"created": 1759245078, "created": 1759282456,
"model": "llama-guard3:1b", "model": "llama-guard3:1b",
"object": "chat.completion", "object": "chat.completion",
"service_tier": null, "service_tier": null,

View file

@ -20,15 +20,15 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama-guard3:1b", "model": "llama-guard3:1b",
"created_at": "2025-09-03T17:37:47.461886Z", "created_at": "2025-09-30T17:37:24.035083658Z",
"done": true, "done": true,
"done_reason": "stop", "done_reason": "stop",
"total_duration": 338927833, "total_duration": 2990785181,
"load_duration": 100895125, "load_duration": 52933018,
"prompt_eval_count": 223, "prompt_eval_count": 223,
"prompt_eval_duration": 221583042, "prompt_eval_duration": 2884018743,
"eval_count": 2, "eval_count": 2,
"eval_duration": 12341416, "eval_duration": 53216446,
"response": "safe", "response": "safe",
"thinking": null, "thinking": null,
"context": null "context": null

View file

@ -24,7 +24,7 @@
{ {
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": { "__data__": {
"id": "chatcmpl-414", "id": "chatcmpl-106",
"choices": [ "choices": [
{ {
"delta": { "delta": {
@ -39,7 +39,7 @@
"logprobs": null "logprobs": null
} }
], ],
"created": 1756921333, "created": 1759254065,
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion.chunk", "object": "chat.completion.chunk",
"service_tier": null, "service_tier": null,
@ -50,7 +50,7 @@
{ {
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": { "__data__": {
"id": "chatcmpl-414", "id": "chatcmpl-106",
"choices": [ "choices": [
{ {
"delta": { "delta": {
@ -65,7 +65,7 @@
"logprobs": null "logprobs": null
} }
], ],
"created": 1756921333, "created": 1759254066,
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion.chunk", "object": "chat.completion.chunk",
"service_tier": null, "service_tier": null,
@ -76,7 +76,7 @@
{ {
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": { "__data__": {
"id": "chatcmpl-414", "id": "chatcmpl-106",
"choices": [ "choices": [
{ {
"delta": { "delta": {
@ -91,7 +91,7 @@
"logprobs": null "logprobs": null
} }
], ],
"created": 1756921333, "created": 1759254066,
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion.chunk", "object": "chat.completion.chunk",
"service_tier": null, "service_tier": null,
@ -102,7 +102,7 @@
{ {
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": { "__data__": {
"id": "chatcmpl-414", "id": "chatcmpl-106",
"choices": [ "choices": [
{ {
"delta": { "delta": {
@ -117,7 +117,7 @@
"logprobs": null "logprobs": null
} }
], ],
"created": 1756921333, "created": 1759254066,
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion.chunk", "object": "chat.completion.chunk",
"service_tier": null, "service_tier": null,
@ -128,7 +128,7 @@
{ {
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": { "__data__": {
"id": "chatcmpl-414", "id": "chatcmpl-106",
"choices": [ "choices": [
{ {
"delta": { "delta": {
@ -143,7 +143,7 @@
"logprobs": null "logprobs": null
} }
], ],
"created": 1756921334, "created": 1759254066,
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion.chunk", "object": "chat.completion.chunk",
"service_tier": null, "service_tier": null,
@ -154,7 +154,7 @@
{ {
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": { "__data__": {
"id": "chatcmpl-414", "id": "chatcmpl-106",
"choices": [ "choices": [
{ {
"delta": { "delta": {
@ -169,7 +169,7 @@
"logprobs": null "logprobs": null
} }
], ],
"created": 1756921334, "created": 1759254066,
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion.chunk", "object": "chat.completion.chunk",
"service_tier": null, "service_tier": null,
@ -180,7 +180,7 @@
{ {
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": { "__data__": {
"id": "chatcmpl-414", "id": "chatcmpl-106",
"choices": [ "choices": [
{ {
"delta": { "delta": {
@ -195,7 +195,7 @@
"logprobs": null "logprobs": null
} }
], ],
"created": 1756921334, "created": 1759254067,
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion.chunk", "object": "chat.completion.chunk",
"service_tier": null, "service_tier": null,
@ -206,7 +206,7 @@
{ {
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": { "__data__": {
"id": "chatcmpl-414", "id": "chatcmpl-106",
"choices": [ "choices": [
{ {
"delta": { "delta": {
@ -221,7 +221,7 @@
"logprobs": null "logprobs": null
} }
], ],
"created": 1756921334, "created": 1759254067,
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion.chunk", "object": "chat.completion.chunk",
"service_tier": null, "service_tier": null,

View file

@ -1,167 +0,0 @@
{
"request": {
"method": "POST",
"url": "http://localhost:11434/api/generate",
"headers": {},
"body": {
"model": "llama3.2:3b-instruct-fp16",
"raw": true,
"prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant. You have access to functions, but you should only use them if they are required.\nYou are an expert in composing functions. You are given a question and a set of possible functions.\nBased on the question, you may or may not need to make one function/tool call to achieve the purpose.\n\nIf you decide to invoke any of the function(s), you MUST put it in the format of [func_name1(params_name1=params_value1, params_name2=params_value2...), func_name2(params)]\nIf you decide to invoke a function, you SHOULD NOT include any other text in the response. besides the function call in the above format.\nFor a boolean parameter, be sure to use `True` or `False` (capitalized) for the value.\n\n\nHere is a list of functions in JSON format that you can invoke.\n\n[\n {\n \"name\": \"greet_everyone\",\n \"description\": \"\",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"url\"],\n \"properties\": {\n \"url\": {\n \"type\": \"string\",\n \"description\": \"\"\n }\n }\n }\n },\n {\n \"name\": \"get_boiling_point\",\n \"description\": \"\nReturns the boiling point of a liquid in Celsius or Fahrenheit.\n\n:param liquid_name: The name of the liquid\n:param celsius: Whether to return the boiling point in Celsius\n:return: The boiling point of the liquid in Celcius or Fahrenheit\n\",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"liquid_name\", \"celsius\"],\n \"properties\": {\n \"liquid_name\": {\n \"type\": \"string\",\n \"description\": \"\"\n },\n \"celsius\": {\n \"type\": \"boolean\",\n \"description\": \"\"\n }\n }\n }\n }\n]\n\nYou can answer general questions or invoke tools when necessary.\nIn addition to tool calls, you should also augment your responses by using the tool outputs.\nYou are a helpful assistant.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nSay hi to the world. Use tools to do so.<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n[greet_everyone(url=\"world\")]<|eot_id|><|start_header_id|>ipython<|end_header_id|>\n\nHello, world!<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n",
"options": {
"temperature": 0.0
},
"stream": true
},
"endpoint": "/api/generate",
"model": "llama3.2:3b-instruct-fp16"
},
"response": {
"body": [
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-07-29T23:26:18.143606Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "How",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-07-29T23:26:18.186151Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " can",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-07-29T23:26:18.229036Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " I",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-07-29T23:26:18.271516Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " assist",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-07-29T23:26:18.316272Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " you",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-07-29T23:26:18.361005Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " further",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-07-29T23:26:18.404689Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "?",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-07-29T23:26:18.447699Z",
"done": true,
"done_reason": "stop",
"total_duration": 456939083,
"load_duration": 79653292,
"prompt_eval_count": 471,
"prompt_eval_duration": 71724667,
"eval_count": 8,
"eval_duration": 304859000,
"response": "",
"thinking": null,
"context": null
}
}
],
"is_streaming": true
}
}

View file

@ -40,7 +40,7 @@
{ {
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": { "__data__": {
"id": "chatcmpl-921", "id": "chatcmpl-629",
"choices": [ "choices": [
{ {
"delta": { "delta": {
@ -55,7 +55,7 @@
"logprobs": null "logprobs": null
} }
], ],
"created": 1756920971, "created": 1759253815,
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion.chunk", "object": "chat.completion.chunk",
"service_tier": null, "service_tier": null,
@ -66,7 +66,7 @@
{ {
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": { "__data__": {
"id": "chatcmpl-921", "id": "chatcmpl-629",
"choices": [ "choices": [
{ {
"delta": { "delta": {
@ -81,7 +81,7 @@
"logprobs": null "logprobs": null
} }
], ],
"created": 1756920971, "created": 1759253815,
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion.chunk", "object": "chat.completion.chunk",
"service_tier": null, "service_tier": null,
@ -92,7 +92,7 @@
{ {
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": { "__data__": {
"id": "chatcmpl-921", "id": "chatcmpl-629",
"choices": [ "choices": [
{ {
"delta": { "delta": {
@ -107,7 +107,7 @@
"logprobs": null "logprobs": null
} }
], ],
"created": 1756920971, "created": 1759253815,
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion.chunk", "object": "chat.completion.chunk",
"service_tier": null, "service_tier": null,
@ -118,7 +118,7 @@
{ {
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": { "__data__": {
"id": "chatcmpl-921", "id": "chatcmpl-629",
"choices": [ "choices": [
{ {
"delta": { "delta": {
@ -133,7 +133,7 @@
"logprobs": null "logprobs": null
} }
], ],
"created": 1756920971, "created": 1759253816,
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion.chunk", "object": "chat.completion.chunk",
"service_tier": null, "service_tier": null,
@ -144,7 +144,7 @@
{ {
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": { "__data__": {
"id": "chatcmpl-921", "id": "chatcmpl-629",
"choices": [ "choices": [
{ {
"delta": { "delta": {
@ -159,7 +159,7 @@
"logprobs": null "logprobs": null
} }
], ],
"created": 1756920971, "created": 1759253816,
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion.chunk", "object": "chat.completion.chunk",
"service_tier": null, "service_tier": null,
@ -170,7 +170,7 @@
{ {
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": { "__data__": {
"id": "chatcmpl-921", "id": "chatcmpl-629",
"choices": [ "choices": [
{ {
"delta": { "delta": {
@ -185,7 +185,7 @@
"logprobs": null "logprobs": null
} }
], ],
"created": 1756920971, "created": 1759253816,
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion.chunk", "object": "chat.completion.chunk",
"service_tier": null, "service_tier": null,
@ -196,7 +196,7 @@
{ {
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": { "__data__": {
"id": "chatcmpl-921", "id": "chatcmpl-629",
"choices": [ "choices": [
{ {
"delta": { "delta": {
@ -211,7 +211,7 @@
"logprobs": null "logprobs": null
} }
], ],
"created": 1756920971, "created": 1759253816,
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion.chunk", "object": "chat.completion.chunk",
"service_tier": null, "service_tier": null,
@ -222,7 +222,7 @@
{ {
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": { "__data__": {
"id": "chatcmpl-921", "id": "chatcmpl-629",
"choices": [ "choices": [
{ {
"delta": { "delta": {
@ -237,7 +237,7 @@
"logprobs": null "logprobs": null
} }
], ],
"created": 1756920971, "created": 1759253816,
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion.chunk", "object": "chat.completion.chunk",
"service_tier": null, "service_tier": null,

View file

@ -21,7 +21,7 @@
"body": { "body": {
"__type__": "openai.types.chat.chat_completion.ChatCompletion", "__type__": "openai.types.chat.chat_completion.ChatCompletion",
"__data__": { "__data__": {
"id": "chatcmpl-438", "id": "chatcmpl-478",
"choices": [ "choices": [
{ {
"finish_reason": "stop", "finish_reason": "stop",
@ -38,7 +38,7 @@
} }
} }
], ],
"created": 1759245073, "created": 1759282396,
"model": "llama-guard3:1b", "model": "llama-guard3:1b",
"object": "chat.completion", "object": "chat.completion",
"service_tier": null, "service_tier": null,

View file

@ -1,41 +0,0 @@
{
"request": {
"method": "POST",
"url": "http://localhost:11434/api/generate",
"headers": {},
"body": {
"model": "llama3.2:3b-instruct-fp16",
"prompt": "<|begin_of_text|>Complete the sentence using one word: Roses are red, violets are ",
"raw": true,
"options": {
"temperature": 0.0,
"max_tokens": 50,
"num_predict": 50
},
"stream": false
},
"endpoint": "/api/generate",
"model": "llama3.2:3b-instruct-fp16"
},
"response": {
"body": {
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:36:13.821929Z",
"done": true,
"done_reason": "stop",
"total_duration": 1907912167,
"load_duration": 90979292,
"prompt_eval_count": 18,
"prompt_eval_duration": 77350291,
"eval_count": 43,
"eval_duration": 1738568334,
"response": " _______.\n\nThe best answer is blue. The traditional nursery rhyme goes like this:\n\nRoses are red,\nViolets are blue,\nSugar is sweet,\nAnd so are you! (Or something similar.)",
"thinking": null,
"context": null
}
},
"is_streaming": false
}
}

View file

@ -1,39 +0,0 @@
{
"request": {
"method": "POST",
"url": "http://localhost:11434/api/generate",
"headers": {},
"body": {
"model": "llama3.2:3b-instruct-fp16",
"raw": true,
"prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWho is the CEO of Meta?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n",
"options": {
"temperature": 0.0
},
"stream": false
},
"endpoint": "/api/generate",
"model": "llama3.2:3b-instruct-fp16"
},
"response": {
"body": {
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:39:38.236797Z",
"done": true,
"done_reason": "stop",
"total_duration": 1296281500,
"load_duration": 283393917,
"prompt_eval_count": 23,
"prompt_eval_duration": 75453042,
"eval_count": 24,
"eval_duration": 936860125,
"response": "Mark Zuckerberg is the founder, chairman and CEO of Meta, which he originally founded as Facebook in 2004.",
"thinking": null,
"context": null
}
},
"is_streaming": false
}
}

View file

@ -1,39 +0,0 @@
{
"request": {
"method": "POST",
"url": "http://localhost:11434/api/generate",
"headers": {},
"body": {
"model": "llama3.2:3b-instruct-fp16",
"raw": true,
"prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhich planet do humans live on?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n",
"options": {
"temperature": 0.0
},
"stream": false
},
"endpoint": "/api/generate",
"model": "llama3.2:3b-instruct-fp16"
},
"response": {
"body": {
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:36:17.894986Z",
"done": true,
"done_reason": "stop",
"total_duration": 363397458,
"load_duration": 86692791,
"prompt_eval_count": 23,
"prompt_eval_duration": 68658541,
"eval_count": 6,
"eval_duration": 207389084,
"response": "Humans live on Earth.",
"thinking": null,
"context": null
}
},
"is_streaming": false
}
}

View file

@ -0,0 +1,806 @@
{
"request": {
"method": "POST",
"url": "http://0.0.0.0:11434/v1/v1/embeddings",
"headers": {},
"body": {
"model": "nomic-embed-text:137m-v1.5-fp16",
"input": [
"What inspires neural networks?"
],
"encoding_format": "float"
},
"endpoint": "/v1/embeddings",
"model": "nomic-embed-text:137m-v1.5-fp16"
},
"response": {
"body": {
"__type__": "openai.types.create_embedding_response.CreateEmbeddingResponse",
"__data__": {
"data": [
{
"embedding": [
-0.0050316164,
0.07984447,
-0.15915774,
-0.015208397,
0.06857012,
-0.025208611,
0.013689548,
0.01110039,
-0.021925347,
-0.014392589,
-0.0557497,
0.048096333,
0.124248095,
0.05381016,
-0.032023083,
0.03293363,
-0.07727248,
-0.01613264,
-0.0012452743,
-0.015702942,
-0.067251004,
-0.028757395,
0.034863908,
-0.0017118178,
0.0616299,
0.021848574,
-0.022553956,
-0.033664376,
0.01553894,
0.009967761,
0.08114387,
-0.066336334,
-0.025725907,
0.0058821645,
-0.072110265,
-0.015364161,
0.031697143,
-0.015320406,
0.011826234,
0.05202543,
-0.008305483,
-0.013734584,
-0.06918373,
-0.016431326,
0.0070836195,
0.026307657,
0.021504063,
-0.053779546,
0.072037436,
-0.036065537,
0.016765,
-0.015237846,
-0.023797043,
-0.017345365,
0.081010945,
0.017555244,
0.00849005,
-0.011041562,
0.021113921,
0.0012852269,
0.05733302,
0.04459211,
-0.006820112,
0.049741834,
0.032682,
-0.018714704,
-0.047921024,
0.05474767,
0.010007742,
0.027578747,
0.01696662,
-0.0005828434,
0.02848909,
0.049656194,
0.029906206,
0.04397822,
-0.04246628,
0.01594018,
-0.029281856,
0.052589595,
0.086577676,
0.0042159576,
-0.029517883,
-0.009740598,
0.043349918,
0.044087544,
-0.02930377,
0.0024098633,
-0.030418152,
0.08221704,
0.046374217,
0.008004957,
0.017713528,
-0.034519937,
-0.034394786,
-0.019209871,
0.01361772,
-0.0012474392,
-0.06304891,
-0.03015956,
-0.026744615,
-0.04382269,
0.009914152,
-0.050125472,
0.030627307,
-0.010395332,
0.0067255315,
-0.025443034,
0.015175414,
0.011367137,
-0.004649633,
0.0003723871,
-0.010448302,
-0.0021068275,
-0.046118032,
-0.022402227,
0.01804005,
-0.025681397,
0.036584888,
0.080027714,
0.025778025,
-0.017021077,
0.00734547,
-0.007449189,
0.013060171,
0.07254409,
-0.015623211,
-0.019112717,
-0.010143475,
-0.048559416,
0.038491815,
-0.0065740654,
-0.0521703,
-0.059264045,
0.032110944,
0.061506197,
-0.048721578,
-0.03464822,
0.013747572,
0.007892225,
0.03265148,
-0.037367918,
0.024855481,
-0.01627199,
-0.01771346,
-0.035029493,
0.0013889165,
0.0036677802,
-0.029530859,
0.03162031,
-0.024760932,
0.028933072,
0.017674228,
-0.03722869,
0.063645,
-0.04195384,
-0.034291398,
-0.042508453,
-0.0026806353,
0.008954077,
0.06860229,
-0.0043270513,
0.031392172,
-0.0052816705,
-0.042464685,
-0.03767891,
0.037023526,
0.009309706,
0.03279453,
0.06322216,
-0.04550696,
0.022164896,
-0.03588774,
0.028416842,
0.050470043,
-0.0034147543,
0.0069440254,
-0.016464153,
0.03128234,
-0.046282057,
0.017499384,
-0.044354558,
0.041510575,
0.044442233,
-0.005217252,
0.011210587,
-0.01738494,
-0.0050604055,
-0.04739853,
-0.006758368,
0.010371208,
0.0031476691,
-0.047869083,
-0.031100815,
-0.049210694,
-0.026688233,
0.0077580754,
-0.022510948,
0.054258704,
0.011458622,
-0.02378493,
-0.012583161,
-0.056452923,
-0.007816392,
-0.038032427,
0.04502559,
-0.01308419,
0.043747045,
0.016204404,
-0.0041383137,
0.049442504,
0.0076792636,
-0.0021476683,
-0.021795,
-0.031687617,
0.025953416,
0.0012399888,
-0.01656653,
-0.005198368,
0.023106242,
0.026499178,
-0.007669003,
0.04550536,
-0.019885251,
-0.006509397,
-0.028927304,
-0.03770212,
-0.015793309,
0.009043467,
0.020382207,
-0.02132457,
-0.04350365,
0.030105298,
0.013326256,
0.05148862,
0.013384519,
0.08420081,
0.012137208,
0.01429465,
-0.021215776,
0.019751377,
0.010666951,
-0.0028496862,
-0.0044943816,
-0.046843883,
-0.0145780165,
0.0044858507,
-0.052179694,
-0.010133602,
0.038626175,
0.018442878,
-0.0016659115,
-0.003639202,
0.018665677,
0.053869862,
0.006519413,
-0.0063330783,
0.03512428,
-0.0033435219,
-0.050845515,
0.059054703,
-0.018078795,
0.012237686,
-0.032968126,
0.015100413,
-0.054588336,
0.015835619,
-0.03670951,
-0.012846813,
-0.01836416,
-0.024260957,
0.059409123,
0.015367348,
-0.028107207,
0.009289864,
0.037938606,
0.024906129,
0.02536807,
0.005617444,
-0.02020537,
-0.067401595,
-0.009159591,
-0.049427476,
-0.04140775,
-0.028121712,
-0.0012032806,
0.065760456,
-0.009735368,
0.024084985,
0.022508778,
0.017129708,
-0.054647677,
0.015578886,
0.017550059,
0.004188966,
-0.021639245,
0.08918487,
-0.010681521,
-0.0013267483,
-0.04089318,
0.004022531,
0.009869387,
0.03852075,
0.012265251,
-0.021414107,
-0.035589736,
-0.041858815,
0.0010829576,
-0.0052885553,
0.027289463,
-0.090056516,
0.013117442,
0.015796974,
-0.006428205,
-0.010485043,
0.03804702,
0.0019676236,
0.030326132,
0.06926383,
-0.04581391,
-0.026230657,
-0.05017411,
-0.069891036,
-0.020800032,
-0.0021375767,
0.03964166,
0.022971395,
0.009086531,
-0.0025304465,
-0.015464918,
0.042726092,
-0.006683121,
-0.008244169,
-0.016234832,
-0.0031603999,
-0.044795815,
-0.035910357,
0.053608935,
-0.006930592,
0.04424536,
-0.012017321,
0.0155857755,
-0.008697974,
-0.067098126,
-0.032931764,
0.026898768,
0.0010457109,
-0.041276965,
0.017719025,
-0.009889669,
-0.048280854,
0.009008355,
-0.008872175,
-0.01640687,
-0.0051646377,
-0.022281006,
0.041271873,
0.06915707,
0.029213337,
0.0133835655,
0.044670742,
0.0017441317,
0.013911358,
-0.03592245,
-0.060621563,
0.018041532,
0.017789826,
-0.00043342085,
0.019603321,
0.012585408,
0.034794804,
-0.0023819709,
-0.013787601,
0.05080919,
-0.044285674,
0.055536143,
-0.08918706,
-0.03900586,
-0.037006263,
0.003928892,
-0.015029967,
-0.02021197,
0.033677697,
-0.013563023,
0.037201263,
0.019805612,
-0.02354718,
-0.037705727,
0.025382977,
0.0061666463,
-0.020041076,
0.04034747,
-0.07936578,
-0.031228192,
0.035324488,
-0.054238997,
0.047006484,
0.00159503,
0.07012299,
0.007637998,
-0.018800775,
-0.053914547,
-0.050283875,
-0.034318645,
0.008452663,
0.01237047,
0.00035791937,
-0.046610557,
0.042989474,
-0.019692015,
-0.00061614456,
0.062187936,
0.04266471,
-0.050016437,
0.021421405,
-0.024854518,
0.068603024,
0.060942996,
-0.014557106,
0.03239151,
0.010247157,
0.015091995,
0.009245114,
0.02277781,
0.027239017,
0.043091062,
-0.00082639145,
0.00031364473,
-0.058441285,
-0.018276462,
0.030178891,
-0.023433916,
-0.013687651,
-0.012881733,
-0.030734714,
0.03498326,
-0.013399916,
0.04820285,
0.013932867,
0.05571984,
0.04240612,
-0.0060554333,
0.0032024565,
-0.042510703,
0.048483945,
0.08732585,
0.0027016816,
0.0011064744,
-0.09377502,
0.067491576,
0.018435383,
0.012728095,
0.029038312,
0.0040321746,
0.07395845,
0.0031073147,
0.028865123,
0.006154529,
0.03711985,
0.03329579,
-0.0040069376,
-0.011551551,
-0.053671077,
0.010432108,
-0.038892966,
-0.0003408905,
0.0007365908,
-0.047822062,
0.053264767,
0.02096518,
0.004777782,
0.0432757,
0.021553257,
-0.0026501648,
-0.0072480487,
-0.002123129,
0.061610248,
-0.01611616,
0.035909727,
0.058587678,
0.0145304715,
-0.020112783,
-0.05207282,
-0.08221201,
0.009016992,
-0.00064655097,
0.01956686,
0.018373564,
-0.013966411,
-0.022123411,
-0.0071573188,
0.033414096,
-0.04946249,
-0.0034403466,
-0.01580445,
-0.026580384,
-0.07122861,
0.04952695,
0.036092717,
-0.002789775,
0.026477033,
0.03799533,
-0.0452679,
-0.003930312,
0.018536521,
-0.01201987,
0.025422221,
-0.066111766,
-0.029471582,
0.009364392,
-0.04817774,
-0.0008147315,
-0.0148154665,
0.00984774,
-0.00092833134,
-0.03763107,
-0.020189954,
-0.024074532,
-0.023612108,
0.015350284,
0.030945191,
-0.03588645,
-0.021719966,
-0.020571873,
-0.012741516,
0.039295603,
-0.033746354,
0.0028816632,
0.048078135,
-0.0034790456,
0.04186476,
-0.016505575,
-0.056669652,
-0.0026806216,
0.04009492,
-0.016062018,
0.016597595,
-0.015369735,
0.01423482,
-0.01612097,
0.05822151,
-0.0043877237,
0.009242956,
-0.0037488444,
-0.0044891555,
-0.027579125,
-0.025424628,
0.028450571,
-0.01797597,
-0.06810425,
0.0168767,
0.0026893963,
-0.008469021,
0.012569571,
0.004442434,
-0.041943144,
-0.019236285,
-0.028779197,
0.0046836706,
-0.0365118,
0.018350676,
0.021902338,
0.03604989,
-0.006049927,
-0.037667684,
0.043027684,
-0.01943701,
0.010076409,
0.038713254,
0.07812194,
0.06597296,
-0.045489065,
0.0070664356,
0.0044989125,
-0.011527495,
-0.046050567,
0.067999,
-0.008593809,
-0.086977795,
-0.052920334,
-0.016987754,
-0.0752132,
0.029077167,
-0.024781171,
-0.00960023,
0.0056692883,
-0.039548755,
-0.013300934,
0.054275468,
-0.03491646,
-0.035587896,
-0.007802609,
-0.028378379,
-0.05615233,
-0.011850314,
-0.017397001,
-0.0525217,
-0.0003308184,
-0.040857855,
-0.021513592,
0.025556894,
0.01627368,
0.055545956,
-0.004418218,
-0.051336065,
0.0488211,
0.012719186,
0.007410796,
-0.0034307821,
0.0516907,
-0.01817577,
-0.004452086,
-0.0056198505,
-0.015632447,
0.075757094,
-0.018579062,
0.035753764,
-0.015519769,
-0.054327093,
0.01306886,
-0.019790396,
-0.036639318,
0.07008371,
0.0061804685,
0.046798132,
-0.005218823,
-0.064510226,
-0.0127003165,
0.0017728137,
0.040912032,
-0.058067385,
0.059538517,
-0.10029672,
0.002820211,
-0.07771457,
0.008914206,
0.00806939,
0.03881859,
0.017941529,
0.007458678,
0.0011317434,
-0.050489407,
-0.039054077,
0.028261676,
0.04449006,
0.010117796,
0.057966575,
0.08405063,
0.037630063,
0.0017458433,
0.07786049,
0.012527607,
0.05369065,
-0.004282323,
-0.044055793,
0.003343061,
0.02884031,
-0.057139236,
-0.030217687,
-0.0159622,
-0.04396499,
-0.00034443758,
-0.019190768,
0.0051302793,
0.005976632,
-0.05645029,
-0.0011924162,
-0.020180402,
-0.037948944,
-0.008716054,
0.035000052,
-0.041332114,
0.0021782147,
-0.0439729,
-0.032859106,
0.027919779,
0.008747301,
0.05736891,
0.013317791,
0.0012040264,
-0.0033161226,
0.018489197,
-0.0026256584,
-0.05727805,
0.023803348,
-0.012519388,
0.02669887,
0.0062565706,
-0.017575208,
-0.04754666,
-0.02628541,
-0.07511388,
0.008495705,
-0.04325911,
-0.05147621,
0.05350302,
-0.047565665,
0.029716888,
-0.017600134,
0.06251193,
-0.06014906,
0.06652642,
-0.016948748,
0.047118686,
-0.022581328,
0.008118961,
0.023824824,
-0.028134644,
-0.013040867,
-0.036118224,
-0.043649647,
0.024044087,
0.043980736,
0.09335813,
0.0065352735,
0.048652958,
0.02291362,
-0.031512454,
-0.026838718,
0.072112754,
0.029041806,
0.009871398,
-0.076643795,
0.017986268,
-0.036420677,
-0.030303614,
0.02293626,
-0.028474882,
-0.02937154,
0.01083049,
0.0067934864,
-0.031213833,
-0.04556768,
-0.0046230564,
-0.0074542915,
-0.021028588,
-0.058362946,
0.0034970073,
0.04495744,
-0.008255564,
-0.011092999,
0.026076281,
0.016826289,
-0.026028905,
-0.0025076317,
0.017507493,
0.015523931,
0.04691712,
0.011547796,
-0.038370498,
0.029770205,
-0.017786123,
-0.006200203,
0.013117157,
0.027439341,
0.017241932,
-0.063327014,
0.075111434,
0.10742071,
-0.00892997,
0.042728376,
-0.0031351764,
0.06845063,
-0.009078234,
-0.030184548,
0.04281056,
-0.037315223,
0.012807935
],
"index": 0,
"object": "embedding"
}
],
"model": "nomic-embed-text:137m-v1.5-fp16",
"object": "list",
"usage": {
"prompt_tokens": 6,
"total_tokens": 6
}
}
},
"is_streaming": false
}
}

View file

@ -0,0 +1,806 @@
{
"request": {
"method": "POST",
"url": "http://0.0.0.0:11434/v1/v1/embeddings",
"headers": {},
"body": {
"model": "nomic-embed-text:137m-v1.5-fp16",
"input": [
"Python programming language"
],
"encoding_format": "float"
},
"endpoint": "/v1/embeddings",
"model": "nomic-embed-text:137m-v1.5-fp16"
},
"response": {
"body": {
"__type__": "openai.types.create_embedding_response.CreateEmbeddingResponse",
"__data__": {
"data": [
{
"embedding": [
-0.012737296,
0.052157503,
-0.09865639,
-0.05476475,
0.05301662,
0.0074160905,
-0.06798324,
-0.0033211287,
-0.016955739,
-0.066146754,
-0.00029801717,
0.044583604,
0.04537025,
-0.044383764,
0.0023149354,
-0.09608677,
0.025675122,
-0.0704009,
-0.03931903,
0.06766093,
0.017914528,
-0.040849652,
0.026488103,
-0.015297751,
0.11874497,
0.020230753,
0.0105890855,
-0.0036319923,
-0.0075948774,
0.016645674,
-0.045041427,
0.004138968,
0.0004353597,
-0.02476739,
-0.044161372,
-0.06683856,
0.06450044,
-0.018002711,
0.038697395,
0.015279114,
-0.043509968,
0.009773898,
0.060179695,
-0.007329619,
0.07848926,
-0.06192075,
0.004529198,
-0.014174553,
-0.03300747,
0.021683672,
-0.020385684,
-0.035768215,
-0.043068312,
-0.013654137,
0.07617396,
0.038741313,
0.006725823,
0.011636873,
0.015038775,
-0.06120382,
0.07566976,
0.082728565,
-0.08939894,
0.04476117,
0.05678162,
-0.011741467,
0.0026016668,
0.03271547,
-0.023847334,
0.014053751,
0.030476196,
-0.06255138,
0.04260044,
-0.0026815364,
-0.0260585,
-0.007336162,
-0.020206766,
-0.04938916,
0.017385937,
0.06006105,
-0.013208199,
0.016350197,
-0.0109011745,
0.028250203,
0.04128484,
-0.06976558,
-0.042334184,
-0.0020309563,
-0.051363576,
0.020697631,
-0.06012748,
-0.0064777704,
-0.02580574,
0.004771875,
-0.064917386,
0.02215894,
-0.054416675,
0.026068965,
0.04200019,
-0.024564879,
0.0077957124,
-0.015894597,
0.060694925,
-0.048398413,
0.03545728,
0.043259352,
0.04367656,
-0.035536934,
-0.058171894,
-0.0115244435,
-0.006172969,
0.045124453,
-0.027776113,
-0.022800889,
-0.045794144,
0.0015683161,
0.02532558,
-0.0408559,
0.06885377,
0.053380273,
-0.002310288,
-0.048188288,
0.040053353,
0.048873883,
-0.018484699,
0.024138113,
-0.06406123,
0.028043946,
0.013406045,
-0.03121256,
0.04827139,
-0.022590872,
-0.044979047,
-0.009155806,
-0.0345572,
0.040470112,
-0.053579397,
-0.014609841,
0.09309223,
-0.022341968,
0.022824768,
0.027127359,
-0.023630599,
-0.014862734,
0.019149441,
-0.022489576,
0.037146494,
0.026537362,
-0.013998867,
0.023908654,
0.019494286,
0.035421006,
0.010681667,
0.04866381,
-0.00028648498,
0.0076756324,
0.01770439,
0.004861778,
0.0675088,
-0.02110296,
0.07012984,
0.011100984,
-0.015785491,
0.029732592,
-0.042797945,
-0.028424682,
0.024825025,
0.012830561,
-0.031163441,
0.0010846684,
-0.04394154,
-0.06074506,
-0.0068602944,
-0.02000956,
0.017218532,
0.016892785,
-0.016099539,
-0.011027052,
0.04092132,
-0.013812635,
-0.0171445,
-0.05161461,
0.043900732,
0.054356292,
-0.06110619,
0.010437808,
-0.010695358,
-0.038556177,
-0.022182107,
-0.013702171,
-0.02606656,
0.0417685,
-0.03564253,
-0.065730296,
-0.048234634,
-0.031294968,
0.018793715,
0.0028812673,
0.059523605,
-0.07834006,
-0.041890293,
-0.007903964,
-0.05529348,
-0.010216022,
-0.05732938,
-0.008337224,
-0.004084479,
0.0032915517,
-0.04187034,
0.01608275,
0.06422492,
0.018843329,
-0.023873901,
0.061657883,
0.0042031026,
-0.035615478,
-0.0233748,
-0.01701599,
0.011956012,
0.034292623,
0.056101177,
0.00090226205,
0.0053342264,
0.0020548122,
0.01625327,
0.028918983,
-0.066553414,
0.017591959,
-0.055340543,
0.014200978,
0.0043894285,
-0.046320267,
0.009632542,
0.026329784,
0.037263606,
0.060245816,
0.047682427,
0.044949647,
-0.010772139,
-0.041810554,
-0.031361483,
0.0073113176,
-0.030563952,
0.04529861,
-0.009128403,
-0.0051679183,
-0.004846899,
-0.009234518,
-0.017252633,
0.039498128,
-0.019625667,
-0.0402034,
-0.005365279,
0.06279761,
0.027031269,
0.02773575,
0.032350197,
0.00057488075,
0.06752743,
-0.017945373,
0.03612706,
-0.038697086,
-0.029901898,
-0.0113743795,
-0.020817084,
-0.0028207486,
-0.0037516905,
0.016709562,
0.0070552756,
-0.025101524,
0.013061921,
-0.0097264135,
0.023312164,
-0.030784104,
-0.0029193545,
-0.02444496,
0.027738145,
-0.047183525,
-0.0056739203,
0.009817768,
0.028266534,
-0.06388905,
-0.019374298,
0.04362763,
-0.0057525537,
0.010138786,
0.025025772,
0.0056975563,
-0.013095728,
-0.010737826,
0.05379437,
0.0035773406,
-0.033730775,
-0.022392886,
-0.024516208,
0.03529997,
0.04245314,
0.029541131,
0.044283565,
-0.010923522,
-0.015672298,
0.031540904,
0.049757652,
0.0134175075,
0.026056338,
-0.045238763,
0.036880285,
0.019401666,
-0.01225724,
-0.011385536,
-0.039677687,
0.012001496,
-0.018710397,
0.051085025,
-0.07968707,
0.044598807,
0.020966908,
0.024486324,
0.030820722,
-0.035817347,
-0.005985216,
-0.077220775,
0.060087338,
-0.018667521,
0.00042907865,
0.04296211,
0.010683234,
0.03383496,
-0.000113617025,
-0.034164984,
-0.012604936,
0.013022496,
0.024046391,
-0.021777937,
-0.043731887,
0.0033063248,
0.0032457314,
-0.013931376,
0.0023861264,
0.0075240964,
0.007015829,
-0.05085907,
0.042630788,
-0.02087415,
-0.007658267,
0.013132027,
0.041472685,
-0.040956587,
0.05658287,
0.04250153,
0.0021518448,
0.044045568,
-0.040921584,
0.007132343,
-0.00048801105,
-0.036380254,
0.047273647,
-0.004309134,
-0.013429063,
-0.00019902465,
-0.0004708195,
-0.029873386,
0.027239243,
-0.03529831,
-0.023228176,
0.024661895,
0.05063533,
-0.028260268,
0.01129846,
-0.0045312783,
-0.031872246,
-0.046879377,
-0.007871232,
0.004367725,
-0.017214479,
-0.015753403,
-0.078615755,
-0.014234739,
-0.025533726,
0.029994033,
0.006888315,
-0.042100083,
-0.0016963482,
0.021459604,
-0.01591483,
-0.07365999,
-0.010291573,
0.0047568013,
0.03292463,
0.043200362,
0.014325783,
-0.048490327,
-0.024439182,
0.033686552,
0.029715305,
-0.010423145,
0.013148504,
0.0008267967,
-0.027305948,
-0.0060520596,
-0.0779034,
-0.06871077,
0.03765654,
-0.023108464,
-0.027462585,
0.022435384,
-0.010619645,
-0.019606477,
0.02848785,
-0.009619229,
-0.007973983,
-0.0029784956,
0.009451803,
-0.019557634,
-0.021816052,
0.028761018,
0.027324788,
0.031654317,
-0.058149435,
0.017170029,
0.034972027,
0.027760118,
-0.010306612,
0.012620151,
0.008334629,
0.012273061,
0.029800836,
0.058904618,
0.018408349,
-0.054807078,
0.0006477238,
0.022915987,
0.03338144,
0.03668132,
-0.0071606343,
-0.0016230526,
0.022836274,
0.01099753,
-0.015486893,
0.046064902,
0.03652358,
-0.021730995,
-0.04240822,
0.007839006,
0.010131339,
0.071891285,
0.08595036,
-0.036551163,
-0.036580227,
0.027753903,
0.013721581,
0.015000481,
0.009816424,
0.033280663,
0.06401278,
0.034881614,
-0.010603335,
0.02859825,
-0.02816573,
0.07249696,
0.005746021,
-0.026890617,
-0.05659028,
-0.007152308,
-0.024288459,
-0.018561136,
-0.013725504,
-0.030577758,
0.005742889,
0.0024392854,
-0.0399384,
0.020328993,
0.039503425,
-0.042268254,
-0.022119028,
-0.034113314,
-0.030274384,
0.011519863,
0.050782666,
0.004041363,
-0.023739118,
-0.0027546436,
-0.058498923,
-0.005471496,
-0.0053262375,
0.037513364,
-0.004591814,
0.021252032,
-0.001629569,
-0.04622212,
0.047883164,
0.03736839,
0.08020275,
0.00542343,
-0.03817893,
-0.009962559,
-0.040674374,
0.09175239,
0.1028728,
0.028166553,
0.04177519,
0.019556358,
-0.044252433,
-0.015929267,
0.042483907,
-0.031323276,
0.068415634,
-0.008449004,
-0.035050288,
0.037856326,
0.055856578,
0.00058986177,
0.032994922,
0.018346844,
0.038019393,
-0.03150018,
0.009805387,
-0.03539326,
-0.09154862,
0.009951651,
0.0144051695,
-0.041230854,
-0.010663703,
-0.023963679,
-0.029891582,
0.03757397,
0.031183342,
-0.01945111,
-0.016845128,
-0.023847176,
0.047975387,
-0.023667773,
-0.04123289,
-0.020595824,
-0.048070088,
-0.062379338,
-0.049796887,
0.038511876,
0.010982749,
-0.004460679,
0.07803074,
0.02439175,
0.02101776,
-0.0038604757,
0.05022388,
0.011080523,
-0.02685521,
-0.009115208,
-0.005774415,
-0.05743546,
0.07516603,
-0.040346682,
0.0063808565,
-0.02058147,
0.010124437,
-0.029869549,
-0.005972344,
-0.025552256,
0.0043650023,
-0.043274693,
-0.035563324,
0.008438223,
0.00926376,
0.010181649,
0.0063408106,
0.030337317,
-0.018971639,
-0.03495948,
-0.018965906,
0.03824476,
-0.037335593,
-0.035132956,
-0.0004800879,
0.0031907824,
0.005043757,
0.010878841,
0.02765467,
-0.03625543,
-0.056799237,
-0.010009897,
0.07060158,
-0.031162763,
-0.018445587,
0.036646154,
-0.025019318,
-0.0059613483,
0.012737257,
0.004886132,
-0.03758108,
-0.012071592,
-0.014093439,
0.011282327,
-0.017012196,
0.020709567,
-0.010598827,
0.024100173,
-0.066286445,
-0.020624982,
-0.019746993,
-0.04389995,
-0.000542952,
-0.00042189853,
0.047723014,
-0.015338273,
-0.0014234964,
0.08354232,
-0.0323755,
0.056150857,
-0.017370827,
-0.019247927,
0.036820125,
0.019029636,
-0.0148101,
0.033162937,
0.030420834,
-0.06173969,
0.045244128,
0.010388652,
0.014610128,
-0.024237249,
-0.005471384,
-0.05329097,
0.03361388,
-0.022210777,
0.042801995,
0.021740006,
-0.04432001,
0.020300837,
0.040372755,
0.071037516,
0.0064171883,
-0.003981306,
-0.048869807,
0.0020238254,
-0.009861756,
0.006638257,
-0.033705212,
0.0005100761,
0.03717974,
0.065557785,
0.047391072,
-0.03947765,
0.0040267883,
-0.008363395,
0.0065301796,
-0.011944791,
0.033006497,
0.07639093,
-0.0033113193,
-0.05430868,
0.07391257,
0.064527504,
-0.002406421,
0.0062794937,
0.011258814,
0.014174505,
0.051364396,
-0.049812824,
-0.063861094,
0.008121674,
-0.014099882,
-0.03951206,
-0.03534859,
0.031739417,
0.068740524,
0.057014074,
0.0065806364,
0.0014213074,
-0.054351427,
-0.0045105484,
-0.007082805,
0.016566794,
-0.01276022,
-0.030325878,
0.020703789,
0.05879084,
0.018262943,
-0.024337808,
-0.056616426,
-0.018280823,
0.016159344,
-0.026617214,
-0.032240644,
-0.01484388,
0.039500516,
-0.045082357,
0.054483585,
-0.018476259,
-0.022805728,
-0.06581501,
-0.02136263,
-0.02278495,
0.0022921907,
-0.055788554,
0.043488245,
-0.017217342,
-0.019207379,
-0.03229883,
0.014165345,
0.07650592,
0.0145935565,
0.023521688,
0.011726674,
0.051898655,
-0.06092941,
0.0049421154,
0.017239925,
0.029926429,
-0.011885315,
-0.053228807,
-0.022613214,
0.021623421,
0.048251476,
0.06570422,
0.035834767,
0.032429963,
-0.05052382,
-0.046073183,
-0.04484784,
0.01433757,
0.072260626,
-0.010861808,
-0.023238782,
0.015412952,
-0.0336904,
-0.0018390296,
-0.003844745,
-0.06879578,
0.0040851673,
-0.0033650463,
0.020701468,
0.022823572,
-0.055186763,
0.030715447,
-0.0077931485,
0.057467323,
-0.031872775,
-0.04632591,
-0.058218405,
0.0021320789,
0.011682204,
0.05363371,
-0.0022989055,
0.05224489,
0.008273623,
-0.024590664,
-0.015599656,
0.0622297,
0.05610885,
-0.03643005,
-0.029709268,
-0.008453385,
-0.047318127,
0.093379706,
-0.019986182,
-0.013489889,
-0.032653943,
0.0735651,
0.052270554,
0.0009286598,
0.01696985,
-0.012898181,
-0.012480467,
-0.028892197,
-0.03233334,
-0.00919493,
-0.0477996,
-0.017610596
],
"index": 0,
"object": "embedding"
}
],
"model": "nomic-embed-text:137m-v1.5-fp16",
"object": "list",
"usage": {
"prompt_tokens": 3,
"total_tokens": 3
}
}
},
"is_streaming": false
}
}

View file

@ -22,7 +22,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:50.436472Z", "created_at": "2025-10-01T01:34:06.144961341Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -40,7 +40,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:50.478138Z", "created_at": "2025-10-01T01:34:06.3373667Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -58,7 +58,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:50.519952Z", "created_at": "2025-10-01T01:34:06.532942727Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -76,7 +76,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:50.561433Z", "created_at": "2025-10-01T01:34:06.728352251Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -94,7 +94,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:50.603624Z", "created_at": "2025-10-01T01:34:06.924985367Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -112,7 +112,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:50.645851Z", "created_at": "2025-10-01T01:34:07.121349528Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -130,7 +130,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:50.688403Z", "created_at": "2025-10-01T01:34:07.318123626Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -148,7 +148,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:50.72991Z", "created_at": "2025-10-01T01:34:07.51621183Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -166,7 +166,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:50.771635Z", "created_at": "2025-10-01T01:34:07.715339999Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -184,7 +184,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:50.813711Z", "created_at": "2025-10-01T01:34:07.911837801Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -202,7 +202,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:50.856201Z", "created_at": "2025-10-01T01:34:08.111752821Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -220,7 +220,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:50.899048Z", "created_at": "2025-10-01T01:34:08.31294106Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -238,15 +238,15 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:50.94069Z", "created_at": "2025-10-01T01:34:08.520937013Z",
"done": true, "done": true,
"done_reason": "stop", "done_reason": "stop",
"total_duration": 688370708, "total_duration": 4447759914,
"load_duration": 107469833, "load_duration": 44225114,
"prompt_eval_count": 399, "prompt_eval_count": 399,
"prompt_eval_duration": 74988334, "prompt_eval_duration": 2025476521,
"eval_count": 13, "eval_count": 13,
"eval_duration": 505216458, "eval_duration": 2377545768,
"response": "", "response": "",
"thinking": null, "thinking": null,
"context": null "context": null

View file

@ -22,7 +22,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:56.566151Z", "created_at": "2025-10-01T01:35:11.444139198Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -40,7 +40,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:56.609308Z", "created_at": "2025-10-01T01:35:11.631417419Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -58,7 +58,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:56.651314Z", "created_at": "2025-10-01T01:35:11.837785952Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -76,7 +76,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:56.693185Z", "created_at": "2025-10-01T01:35:12.035361735Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -94,7 +94,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:56.734643Z", "created_at": "2025-10-01T01:35:12.231459021Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -112,7 +112,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:56.776343Z", "created_at": "2025-10-01T01:35:12.437587336Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -130,7 +130,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:56.81705Z", "created_at": "2025-10-01T01:35:12.645814233Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -148,7 +148,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:56.857959Z", "created_at": "2025-10-01T01:35:12.857399802Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -166,7 +166,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:56.899424Z", "created_at": "2025-10-01T01:35:13.069748955Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -184,7 +184,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:56.939218Z", "created_at": "2025-10-01T01:35:13.275446646Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -202,7 +202,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:56.980065Z", "created_at": "2025-10-01T01:35:13.472121232Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -220,7 +220,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:57.02214Z", "created_at": "2025-10-01T01:35:13.665744046Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -238,7 +238,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:57.0628Z", "created_at": "2025-10-01T01:35:13.861581737Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -256,7 +256,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:57.106061Z", "created_at": "2025-10-01T01:35:14.057543582Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -274,7 +274,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:57.1492Z", "created_at": "2025-10-01T01:35:14.250235864Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -292,7 +292,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:57.190075Z", "created_at": "2025-10-01T01:35:14.440950519Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -310,7 +310,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:57.23178Z", "created_at": "2025-10-01T01:35:14.633159237Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -328,7 +328,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:57.272738Z", "created_at": "2025-10-01T01:35:14.824645544Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -346,7 +346,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:57.313855Z", "created_at": "2025-10-01T01:35:15.015421713Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -364,7 +364,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:57.354964Z", "created_at": "2025-10-01T01:35:15.21010827Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -382,7 +382,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:57.395971Z", "created_at": "2025-10-01T01:35:15.406911964Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -400,7 +400,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:57.438471Z", "created_at": "2025-10-01T01:35:15.599086606Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -418,7 +418,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:57.479796Z", "created_at": "2025-10-01T01:35:15.789596143Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -436,7 +436,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:57.520641Z", "created_at": "2025-10-01T01:35:15.981551476Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -454,7 +454,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:57.561511Z", "created_at": "2025-10-01T01:35:16.170823008Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -472,7 +472,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:57.602875Z", "created_at": "2025-10-01T01:35:16.361099362Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -490,7 +490,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:57.643406Z", "created_at": "2025-10-01T01:35:16.554187248Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -508,7 +508,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:57.684279Z", "created_at": "2025-10-01T01:35:16.746364193Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -526,7 +526,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:57.725699Z", "created_at": "2025-10-01T01:35:16.937784556Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -544,7 +544,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:57.766658Z", "created_at": "2025-10-01T01:35:17.130739694Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -562,7 +562,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:57.80738Z", "created_at": "2025-10-01T01:35:17.324485154Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -580,7 +580,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:57.848466Z", "created_at": "2025-10-01T01:35:17.513221988Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -598,7 +598,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:57.889056Z", "created_at": "2025-10-01T01:35:17.704588587Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -616,7 +616,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:57.931554Z", "created_at": "2025-10-01T01:35:17.89491876Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -634,7 +634,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:57.974754Z", "created_at": "2025-10-01T01:35:18.085415685Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -652,7 +652,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:58.016978Z", "created_at": "2025-10-01T01:35:18.291123534Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -670,7 +670,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:58.057942Z", "created_at": "2025-10-01T01:35:18.481091772Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -688,7 +688,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:58.099015Z", "created_at": "2025-10-01T01:35:18.669330853Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -706,7 +706,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:58.140531Z", "created_at": "2025-10-01T01:35:18.862203802Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -724,7 +724,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:58.181382Z", "created_at": "2025-10-01T01:35:19.050586441Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -742,7 +742,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:58.223318Z", "created_at": "2025-10-01T01:35:19.243400941Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -760,7 +760,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:58.26358Z", "created_at": "2025-10-01T01:35:19.438492404Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -778,7 +778,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:58.305496Z", "created_at": "2025-10-01T01:35:19.625091169Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -796,7 +796,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:58.347254Z", "created_at": "2025-10-01T01:35:19.817882725Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -814,7 +814,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:58.390044Z", "created_at": "2025-10-01T01:35:20.006228518Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -832,7 +832,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:58.430867Z", "created_at": "2025-10-01T01:35:20.195451511Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -850,7 +850,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:58.471376Z", "created_at": "2025-10-01T01:35:20.38583856Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -868,7 +868,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:58.51208Z", "created_at": "2025-10-01T01:35:20.574736342Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -886,7 +886,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:58.553226Z", "created_at": "2025-10-01T01:35:20.770260046Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -904,7 +904,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:58.594787Z", "created_at": "2025-10-01T01:35:20.961391185Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -922,7 +922,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:58.63466Z", "created_at": "2025-10-01T01:35:21.15136915Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -940,7 +940,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:58.674628Z", "created_at": "2025-10-01T01:35:21.34012064Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -958,7 +958,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:58.714616Z", "created_at": "2025-10-01T01:35:21.530394237Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -976,7 +976,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:58.754906Z", "created_at": "2025-10-01T01:35:21.721043618Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -994,7 +994,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:58.795048Z", "created_at": "2025-10-01T01:35:21.911611623Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1012,7 +1012,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:58.835297Z", "created_at": "2025-10-01T01:35:22.100940877Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1030,7 +1030,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:58.875738Z", "created_at": "2025-10-01T01:35:22.289910353Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1048,7 +1048,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:58.91604Z", "created_at": "2025-10-01T01:35:22.476827205Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1066,7 +1066,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:58.956596Z", "created_at": "2025-10-01T01:35:22.663529325Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1084,7 +1084,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:58.996664Z", "created_at": "2025-10-01T01:35:22.851128482Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1102,7 +1102,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:59.037796Z", "created_at": "2025-10-01T01:35:23.042424694Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1120,7 +1120,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:59.078586Z", "created_at": "2025-10-01T01:35:23.234415016Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1138,7 +1138,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:59.119448Z", "created_at": "2025-10-01T01:35:23.422767727Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1156,7 +1156,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:59.160318Z", "created_at": "2025-10-01T01:35:23.611953916Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1174,7 +1174,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:59.201852Z", "created_at": "2025-10-01T01:35:23.802138602Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1192,7 +1192,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:59.243763Z", "created_at": "2025-10-01T01:35:23.993446989Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1210,7 +1210,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:59.284948Z", "created_at": "2025-10-01T01:35:24.186705934Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1228,7 +1228,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:59.325598Z", "created_at": "2025-10-01T01:35:24.39236955Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1246,7 +1246,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:59.366289Z", "created_at": "2025-10-01T01:35:24.579916625Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1264,7 +1264,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:59.406764Z", "created_at": "2025-10-01T01:35:24.768821839Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1282,7 +1282,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:59.447922Z", "created_at": "2025-10-01T01:35:24.957792215Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1300,7 +1300,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:59.488486Z", "created_at": "2025-10-01T01:35:25.147895529Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1318,7 +1318,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:59.529Z", "created_at": "2025-10-01T01:35:25.337348777Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1336,7 +1336,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:59.569417Z", "created_at": "2025-10-01T01:35:25.528043056Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1354,7 +1354,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:59.610542Z", "created_at": "2025-10-01T01:35:25.720598024Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1372,7 +1372,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:59.651411Z", "created_at": "2025-10-01T01:35:25.908813849Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1390,7 +1390,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:59.69241Z", "created_at": "2025-10-01T01:35:26.102538985Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1408,7 +1408,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:59.732339Z", "created_at": "2025-10-01T01:35:26.296587284Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1426,7 +1426,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:59.772462Z", "created_at": "2025-10-01T01:35:26.48997969Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1444,7 +1444,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:59.812507Z", "created_at": "2025-10-01T01:35:26.68461717Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1462,7 +1462,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:59.852762Z", "created_at": "2025-10-01T01:35:26.877976002Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1480,7 +1480,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:59.892984Z", "created_at": "2025-10-01T01:35:27.071304424Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1498,7 +1498,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:59.933555Z", "created_at": "2025-10-01T01:35:27.267083009Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1516,7 +1516,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:59.973778Z", "created_at": "2025-10-01T01:35:27.458752902Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1534,7 +1534,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:00.014923Z", "created_at": "2025-10-01T01:35:27.651757232Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1552,7 +1552,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:00.057464Z", "created_at": "2025-10-01T01:35:27.84093711Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1570,7 +1570,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:00.09902Z", "created_at": "2025-10-01T01:35:28.031166547Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1588,7 +1588,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:00.140492Z", "created_at": "2025-10-01T01:35:28.222014814Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1606,7 +1606,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:00.180239Z", "created_at": "2025-10-01T01:35:28.412024854Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1624,7 +1624,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:00.220364Z", "created_at": "2025-10-01T01:35:28.603242201Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1642,7 +1642,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:00.26097Z", "created_at": "2025-10-01T01:35:28.793015428Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1660,7 +1660,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:00.301228Z", "created_at": "2025-10-01T01:35:28.98105341Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1678,7 +1678,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:00.341631Z", "created_at": "2025-10-01T01:35:29.171562052Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1696,7 +1696,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:00.383006Z", "created_at": "2025-10-01T01:35:29.359960218Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1714,7 +1714,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:00.423509Z", "created_at": "2025-10-01T01:35:29.547663965Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1732,7 +1732,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:00.464702Z", "created_at": "2025-10-01T01:35:29.737967784Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1750,7 +1750,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:00.505914Z", "created_at": "2025-10-01T01:35:29.926196503Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1768,7 +1768,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:00.546505Z", "created_at": "2025-10-01T01:35:30.117904197Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1786,7 +1786,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:00.587839Z", "created_at": "2025-10-01T01:35:30.309146475Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -1804,15 +1804,15 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:00.629018Z", "created_at": "2025-10-01T01:35:30.497677975Z",
"done": true, "done": true,
"done_reason": "stop", "done_reason": "stop",
"total_duration": 4303339291, "total_duration": 21228194411,
"load_duration": 156231250, "load_duration": 46730034,
"prompt_eval_count": 36, "prompt_eval_count": 36,
"prompt_eval_duration": 81909875, "prompt_eval_duration": 2125755306,
"eval_count": 100, "eval_count": 100,
"eval_duration": 4064559292, "eval_duration": 19055134812,
"response": "", "response": "",
"thinking": null, "thinking": null,
"context": null "context": null

File diff suppressed because it is too large Load diff

View file

@ -38,7 +38,7 @@
} }
} }
], ],
"created": 1759245080, "created": 1759282470,
"model": "llama-guard3:1b", "model": "llama-guard3:1b",
"object": "chat.completion", "object": "chat.completion",
"service_tier": null, "service_tier": null,

View file

@ -1,39 +0,0 @@
{
"request": {
"method": "POST",
"url": "http://localhost:11434/api/generate",
"headers": {},
"body": {
"model": "llama3.2:3b-instruct-fp16",
"raw": true,
"prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nTest metrics generation 1<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n",
"options": {
"temperature": 0.0
},
"stream": false
},
"endpoint": "/api/generate",
"model": "llama3.2:3b-instruct-fp16"
},
"response": {
"body": {
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-08-11T15:51:18.170868Z",
"done": true,
"done_reason": "stop",
"total_duration": 5240614083,
"load_duration": 9823416,
"prompt_eval_count": 21,
"prompt_eval_duration": 21000000,
"eval_count": 310,
"eval_duration": 5209000000,
"response": "This is the start of a test. I'll provide some sample data and you can try to generate metrics based on it.\n\n**Data:**\n\nLet's say we have a dataset of user interactions with an e-commerce website. The data includes:\n\n| User ID | Product Name | Purchase Date | Quantity | Price |\n| --- | --- | --- | --- | --- |\n| 1 | iPhone 13 | 2022-01-01 | 2 | 999.99 |\n| 1 | MacBook Air | 2022-01-05 | 1 | 1299.99 |\n| 2 | Samsung TV | 2022-01-10 | 3 | 899.99 |\n| 3 | iPhone 13 | 2022-01-15 | 1 | 999.99 |\n| 4 | MacBook Pro | 2022-01-20 | 2 | 1799.99 |\n\n**Task:**\n\nYour task is to generate the following metrics based on this data:\n\n1. Average order value (AOV)\n2. Conversion rate\n3. Average revenue per user (ARPU)\n4. Customer lifetime value (CLV)\n\nPlease provide your answers in a format like this:\n\n| Metric | Value |\n| --- | --- |\n| AOV | 1234.56 |\n| Conversion Rate | 0.25 |\n| ARPU | 1000.00 |\n| CLV | 5000.00 |\n\nGo ahead and generate the metrics!",
"thinking": null,
"context": null
}
},
"is_streaming": false
}
}

View file

@ -0,0 +1,806 @@
{
"request": {
"method": "POST",
"url": "http://0.0.0.0:11434/v1/v1/embeddings",
"headers": {},
"body": {
"model": "nomic-embed-text:137m-v1.5-fp16",
"input": [
"The secret string is foobazbar."
],
"encoding_format": "float"
},
"endpoint": "/v1/embeddings",
"model": "nomic-embed-text:137m-v1.5-fp16"
},
"response": {
"body": {
"__type__": "openai.types.create_embedding_response.CreateEmbeddingResponse",
"__data__": {
"data": [
{
"embedding": [
0.00044567845,
0.069345646,
-0.13331954,
-0.046871964,
0.08016425,
-0.048083987,
-0.019010393,
0.015145315,
-0.046878867,
-0.05115706,
-0.11474304,
0.058239155,
0.016648395,
0.011023492,
0.041939907,
-0.029991476,
-9.543025e-05,
-0.02533831,
-0.02011866,
-0.07322108,
0.017030168,
-0.00957343,
0.004485929,
0.017447446,
0.1246118,
0.0117449965,
0.0014033606,
0.016348116,
-0.0005036347,
-0.040095236,
0.015161008,
-0.0034678434,
-0.025513498,
0.018403651,
-0.046444066,
-0.0633152,
0.017913556,
0.027162347,
-0.027503235,
0.07005407,
-0.06677951,
0.067936614,
-0.009670534,
0.03929378,
0.026953742,
-0.04413318,
0.012423691,
0.053801637,
0.068956025,
-0.07052555,
0.072077766,
-0.026170403,
0.0569044,
-0.014713597,
0.027845478,
0.004202079,
0.013470566,
-0.048575625,
0.026492853,
0.01398613,
0.061292946,
0.018669717,
-0.03883197,
0.08187032,
0.027836354,
0.007642394,
-0.056150433,
0.023952084,
0.031071052,
-0.049114376,
0.058882445,
-0.00040445005,
-0.02008241,
0.012982363,
-0.061310835,
0.008937138,
-0.020913182,
-0.0092431,
-0.031858914,
0.014872756,
0.029764224,
-0.016896453,
0.021685613,
0.018258028,
-0.04633906,
-0.03561103,
-0.033857256,
0.019963097,
-0.03752244,
0.015296732,
-0.017445896,
-0.014324619,
0.004804526,
0.04106732,
-0.017421542,
0.0192038,
0.027671007,
0.044899814,
-0.04936399,
-0.030076561,
0.016601052,
-0.013544007,
0.042761896,
0.0024784307,
-0.0022394105,
0.013565438,
0.0022860803,
-0.00041760976,
-0.05886792,
0.0074303076,
-0.0015840015,
0.05203811,
-0.013102137,
-0.09152751,
0.025666736,
-0.0022051502,
0.022787694,
-0.02524802,
-0.00011112814,
-0.0022206625,
-0.021147829,
-0.02161167,
0.01456756,
0.025838867,
-0.01404628,
0.026200539,
-0.014191877,
0.021828128,
0.019994682,
-0.07021417,
-0.009830949,
-0.01094356,
0.011583981,
-0.0037562435,
0.032894533,
0.048460174,
-0.017713327,
0.0038000469,
0.069233336,
-0.02220729,
0.012367555,
0.010958855,
0.017700545,
-0.06432872,
0.014903545,
-0.07342504,
0.029049437,
0.01858068,
-0.019002236,
-0.030976567,
0.001063091,
0.009665964,
0.017194226,
0.014693427,
-0.004587786,
-0.02747058,
0.061187223,
0.032178245,
0.009072266,
0.046665266,
0.036214747,
0.028900135,
-0.00039593378,
0.002205184,
-0.054302886,
-0.038410567,
0.01953658,
0.07283172,
0.0063177072,
0.048450936,
-0.062249575,
0.011464932,
0.009836349,
-0.019204034,
0.0212673,
0.0026400527,
-0.031265385,
0.005496048,
0.009981116,
-0.02005659,
0.035396017,
-0.055278853,
0.044190887,
0.023812689,
-0.0602695,
0.019462213,
-0.01969013,
-0.028041134,
0.02364917,
-0.049788468,
0.0022309152,
-0.040284824,
-0.059724264,
-0.03366438,
-0.028473698,
-0.018445726,
0.02930147,
0.028754137,
0.033635426,
0.017532766,
-0.08573839,
0.04823697,
-0.027376462,
0.0056161224,
-0.012013627,
-0.021365276,
0.008281257,
-0.028078597,
0.024465317,
0.024162576,
0.075117595,
-0.06746106,
0.0036551915,
-0.01740995,
0.006771356,
-0.021181645,
-0.010371318,
-0.015649507,
-0.028625006,
0.03872479,
0.06485805,
0.04116872,
0.014413853,
-0.023209086,
0.024703778,
0.008546008,
-0.055185292,
-0.0003334275,
-0.03359408,
0.006813681,
0.026214652,
-0.094747946,
0.05505837,
0.06588719,
-0.021185499,
-0.008195226,
0.024911653,
0.06094513,
-0.011626769,
0.0052414685,
0.00221315,
0.0049781743,
-0.006753542,
0.017345196,
-0.032445163,
0.04730397,
-0.030807534,
-0.011132825,
0.019257821,
0.037375852,
-0.01791027,
0.013328558,
0.0039301207,
0.02116138,
0.022959339,
-0.034923322,
0.020886097,
-0.03162536,
0.01642531,
-0.071851775,
0.0043929643,
-0.038616575,
0.013561031,
-0.046020526,
-0.009411261,
-0.01872071,
-0.004853035,
0.017835563,
0.016219897,
-0.040965024,
-0.015721563,
-0.011120184,
0.002712119,
-0.013525761,
-0.017541371,
0.002172893,
0.047437634,
-0.00055855716,
-0.019012688,
-0.0034372362,
-0.06898951,
-0.00070805446,
-0.066043876,
0.013205724,
-0.040814314,
0.05816519,
0.028029984,
-0.013227342,
0.0012570657,
0.0041219597,
0.053272642,
0.005242944,
-0.023647735,
0.037811704,
0.011506217,
0.019518841,
0.026147118,
0.015235484,
0.010721468,
-0.06350039,
0.03209373,
0.034801636,
0.0081500225,
0.005969703,
-0.017227497,
-0.025534213,
0.017176751,
0.039256673,
0.046966672,
0.03472027,
-0.047879733,
0.03222837,
0.03380229,
0.029047774,
-0.044715878,
0.050964445,
-0.008719146,
0.024849666,
0.06419251,
-0.030985096,
-0.018823322,
-0.054562908,
-0.00907499,
-0.10115823,
-0.024997335,
0.01242978,
-0.0019470031,
0.0333229,
-0.029330114,
-0.041030563,
0.023396686,
0.05379854,
-0.027988946,
-0.021597246,
-0.040569063,
0.04048141,
0.005340183,
0.019063592,
-0.025319468,
-0.003563014,
-0.0026412164,
-0.018177321,
0.03233157,
-0.067418195,
0.0076498054,
0.038282733,
-0.03286021,
-0.032854397,
0.046934273,
0.04355527,
-0.07515824,
0.013815288,
-0.04784709,
0.026895981,
0.0025065525,
0.025239244,
0.054204963,
-0.014532232,
0.028296318,
-0.010739294,
0.051052067,
-0.026637534,
0.0068342197,
-0.026805444,
0.02265711,
-0.007651249,
0.030557599,
-0.03413214,
-0.038503505,
0.017946247,
-0.031123659,
-0.022322055,
0.02973932,
0.011667091,
-0.014459768,
-0.028301675,
-0.11210148,
-0.00873513,
-0.017461887,
0.018714411,
0.02778843,
-0.03661049,
0.033506807,
-0.011684556,
0.01726771,
-0.003502183,
-0.0037348305,
-0.023243207,
0.05685141,
0.04693209,
-0.025070677,
-0.00013908459,
-0.027548794,
0.018317811,
-0.0178067,
0.0014910959,
0.01803822,
0.01608141,
0.007222165,
-0.0014852714,
-0.046118837,
-0.0026458004,
0.039712854,
-0.002699,
-0.04608312,
0.056430176,
0.005960536,
-0.04096914,
0.07490523,
-0.040113874,
0.050887205,
-0.0050432947,
0.025429089,
-0.040005684,
-0.016144099,
-0.027699653,
0.008637651,
-0.01148726,
-0.011380815,
0.007922618,
0.07924035,
0.063685514,
-0.0018839106,
-0.012124223,
0.0073183966,
0.00021943168,
-0.016844638,
0.043696962,
0.0029683067,
-0.040563498,
0.03907888,
0.037264947,
0.0111134555,
0.05346586,
-0.025725322,
0.023384957,
-0.060350742,
-0.026976733,
0.012131329,
0.03989188,
0.02435085,
-0.0075752987,
-0.0114409635,
0.035790615,
0.020276839,
0.07685958,
0.046703145,
-0.020972438,
-0.03259271,
0.06400826,
-0.00498698,
-0.024871409,
0.014828645,
0.0130927,
0.106245086,
-0.007118865,
0.012881113,
0.011313499,
0.0839651,
0.0125661325,
-0.0066993455,
-0.022454198,
-0.06478769,
0.020374268,
0.015577235,
-0.032526292,
0.020350832,
-0.0571311,
0.08554014,
0.08232226,
-0.037315074,
0.0021203265,
0.024621665,
-0.041138764,
0.0257467,
0.029454008,
0.01576975,
0.030322494,
-0.027369676,
0.035611905,
-0.033540208,
0.03968557,
-0.057308182,
-0.059743047,
-0.023096878,
0.040560856,
0.014436853,
-0.025654038,
-0.018847847,
0.025198145,
0.030089647,
0.024180522,
0.0022778937,
-0.002554793,
0.0022749486,
-0.08901101,
-0.06115288,
-0.01974829,
0.026249625,
-0.0053902855,
0.0070387293,
0.02137391,
0.0016356307,
0.034444757,
0.037089553,
-0.012963089,
0.015482281,
-0.016791286,
-0.066437095,
-0.020030353,
-0.036646403,
0.0022244542,
-0.028270856,
-0.0035234697,
0.043064065,
-0.007920013,
0.06887318,
0.033386547,
-0.024132386,
0.010797932,
-0.008047283,
0.024117367,
0.014206666,
-0.04957293,
-0.06584216,
0.07456989,
0.023377368,
-0.009300324,
-0.011824271,
-0.07421093,
0.025775433,
-0.03486574,
-0.011464092,
-0.033658788,
0.04973876,
-0.008150324,
0.016183274,
0.026232768,
-0.046371486,
0.05480489,
0.012598278,
0.033995587,
-0.026970293,
-0.02781425,
0.008035459,
-0.009073307,
-0.0346637,
-0.016842574,
-0.016181363,
-0.01383546,
0.0642562,
-0.050719734,
-0.055135835,
-0.006392721,
0.004836332,
-0.02701654,
-0.0027673533,
0.020192543,
-0.0038055407,
0.016163835,
-0.0107361125,
0.01661987,
0.009653905,
0.0023535355,
-0.0033649358,
-0.053976573,
0.018550616,
-0.034805,
0.029848143,
0.03626025,
-0.07495047,
-0.001908639,
-0.07656478,
0.038458325,
0.029302891,
0.023092957,
-0.007622042,
-0.030261463,
-0.021329772,
-0.018646786,
0.0127468,
-0.0658906,
-0.0026415756,
-0.02147435,
-0.021851867,
0.036363255,
-0.047830794,
-0.07678409,
-0.019886537,
-0.06597324,
-0.04127708,
0.04287775,
0.024867415,
0.031287063,
-0.014819534,
0.00026204466,
-0.015248521,
0.0058353236,
-0.024796542,
-0.054158095,
0.032939717,
0.0361686,
0.047894675,
0.0028992337,
-0.030339025,
0.03422538,
0.033026263,
0.03143931,
-0.011571698,
0.009420109,
0.029710123,
0.03437753,
-0.008656629,
-0.003830146,
0.03320896,
-0.050311238,
0.0586845,
0.023397285,
-0.045850404,
-0.010823152,
0.023126738,
-0.05035062,
-0.0030130981,
-0.0052116127,
0.053729337,
-0.036006823,
-0.052962758,
-0.008728322,
-0.01685641,
0.036570363,
-0.03503138,
-0.0058037033,
-0.018182477,
-0.036445614,
-0.05576862,
0.045270767,
-0.050004005,
0.046993006,
-0.06549657,
0.015647849,
0.047161687,
-0.003219364,
-0.0043631354,
0.032075495,
-0.0034678625,
0.07055552,
0.036095902,
-0.009122484,
0.036022466,
0.006809808,
0.040848542,
0.058361802,
-0.0054787197,
0.0046539647,
0.01463279,
-0.034826387,
0.028488237,
-0.06910212,
-0.04828465,
-0.058208026,
0.043390226,
-0.031781167,
-0.016992405,
-0.03197743,
0.05476584,
0.02947553,
0.044686142,
-0.043358956,
-0.00148739,
0.003283796,
0.004783566,
-0.0059531527,
0.048087712,
-0.04270814,
0.051301256,
0.034262523,
0.055976618,
0.042672966,
-0.020190198,
-0.043155447,
-0.0010662689,
0.030956378,
-0.061135452,
-0.022980267,
0.021279445,
0.00079709163,
0.016252836,
-0.0319085,
-0.03133885,
-0.03715316,
-0.014255662,
-0.03807531,
-0.013276923,
-0.075007856,
0.029038494,
0.003576076,
-0.04630256,
-0.013997682,
-0.06467764,
0.07094117,
-0.023424728,
0.008367736,
-0.011615238,
0.019250317,
-0.062135782,
-0.02721775,
0.009017732,
-0.01770822,
0.0019154089,
-0.022779467,
0.001992755,
0.0523557,
0.0039214473,
0.02655032,
-0.0090086395,
0.048243005,
-0.007176262,
-0.01898235,
-0.0053927833,
-0.0036218057,
0.044131264,
-0.032330353,
-0.011098804,
-0.0014564599,
0.0043925233,
-0.04351347,
0.04603144,
-0.047746886,
0.047553774,
-0.01860305,
0.005971783,
-0.040747114,
0.014575995,
-0.021958629,
0.01937992,
0.0009213148,
-0.05576995,
0.051647134,
0.014199863,
-0.026313303,
0.020335903,
0.041635584,
-0.022310706,
-0.01472034,
0.019536275,
-0.0036119658,
-0.05164503,
0.034833908,
0.0007355733,
-0.016247703,
0.050653964,
-0.057264917,
-0.027475258,
0.045744468,
0.037262745,
0.020553257,
-0.010156378,
0.060023002,
0.130969,
0.0118143745,
0.008351982,
-0.037791353,
0.0017138623,
0.032201435,
-0.037822705,
-0.04097315,
-0.0012332207,
0.008696999
],
"index": 0,
"object": "embedding"
}
],
"model": "nomic-embed-text:137m-v1.5-fp16",
"object": "list",
"usage": {
"prompt_tokens": 9,
"total_tokens": 9
}
}
},
"is_streaming": false
}
}

View file

@ -1,221 +0,0 @@
{
"request": {
"method": "POST",
"url": "http://localhost:11434/api/generate",
"headers": {},
"body": {
"model": "llama3.2:3b-instruct-fp16",
"raw": true,
"prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhat's the name of the Sun in latin?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n",
"options": {
"temperature": 0.0
},
"stream": true
},
"endpoint": "/api/generate",
"model": "llama3.2:3b-instruct-fp16"
},
"response": {
"body": [
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:36:18.136699Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "The",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:36:18.177622Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " Latin",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:36:18.218104Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " word",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:36:18.258837Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " for",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:36:18.299715Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " \"",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:36:18.341602Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "Sun",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:36:18.385504Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "\"",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:36:18.429427Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " is",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:36:18.473547Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " Sol",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:36:18.516327Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": ".",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:36:18.559332Z",
"done": true,
"done_reason": "stop",
"total_duration": 628034000,
"load_duration": 116384417,
"prompt_eval_count": 26,
"prompt_eval_duration": 87798792,
"eval_count": 11,
"eval_duration": 423189583,
"response": "",
"thinking": null,
"context": null
}
}
],
"is_streaming": true
}
}

View file

@ -1,221 +0,0 @@
{
"request": {
"method": "POST",
"url": "http://localhost:11434/api/generate",
"headers": {},
"body": {
"model": "llama3.2:3b-instruct-fp16",
"raw": true,
"prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant. You have access to functions, but you should only use them if they are required.\nYou are an expert in composing functions. You are given a question and a set of possible functions.\nBased on the question, you may or may not need to make one function/tool call to achieve the purpose.\n\nIf you decide to invoke any of the function(s), you MUST put it in the format of [func_name1(params_name1=params_value1, params_name2=params_value2...), func_name2(params)]\nIf you decide to invoke a function, you SHOULD NOT include any other text in the response. besides the function call in the above format.\nFor a boolean parameter, be sure to use `True` or `False` (capitalized) for the value.\n\n\nHere is a list of functions in JSON format that you can invoke.\n\n[\n {\n \"name\": \"get_weather\",\n \"description\": \"Get the current weather\",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"location\"],\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state (both required), e.g. San Francisco, CA.\"\n }\n }\n }\n }\n]\n\nYou can answer general questions or invoke tools when necessary.\nIn addition to tool calls, you should also augment your responses by using the tool outputs.\nPretend you are a weather assistant.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhat's the weather like in San Francisco?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n",
"options": {
"temperature": 0.0
},
"stream": true
},
"endpoint": "/api/generate",
"model": "llama3.2:3b-instruct-fp16"
},
"response": {
"body": [
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-08-01T20:56:51.314693Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "[",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-08-01T20:56:51.362989Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "get",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-08-01T20:56:51.408403Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "_weather",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-08-01T20:56:51.455832Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "(location",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-08-01T20:56:51.50384Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "=\"",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-08-01T20:56:51.552257Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "San",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-08-01T20:56:51.599938Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " Francisco",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-08-01T20:56:51.645807Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": ",",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-08-01T20:56:51.694632Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " CA",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-08-01T20:56:51.743454Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "\")]",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-08-01T20:56:51.790525Z",
"done": true,
"done_reason": "stop",
"total_duration": 687242541,
"load_duration": 131028916,
"prompt_eval_count": 324,
"prompt_eval_duration": 76000000,
"eval_count": 11,
"eval_duration": 479000000,
"response": "",
"thinking": null,
"context": null
}
}
],
"is_streaming": true
}
}

View file

@ -1,39 +0,0 @@
{
"request": {
"method": "POST",
"url": "http://localhost:11434/api/generate",
"headers": {},
"body": {
"model": "llama3.2:3b-instruct-fp16",
"raw": true,
"prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nTest trace 1<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n",
"options": {
"temperature": 0.0
},
"stream": false
},
"endpoint": "/api/generate",
"model": "llama3.2:3b-instruct-fp16"
},
"response": {
"body": {
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:41:49.581065Z",
"done": true,
"done_reason": "stop",
"total_duration": 2391571708,
"load_duration": 182022958,
"prompt_eval_count": 20,
"prompt_eval_duration": 74456583,
"eval_count": 51,
"eval_duration": 2134471458,
"response": "It seems like you're trying to test the system, but I'm not sure what specific functionality or feature you'd like to test. Could you please provide more context or clarify what you're looking for? I'll do my best to assist you!",
"thinking": null,
"context": null
}
},
"is_streaming": false
}
}

View file

@ -1,185 +0,0 @@
{
"request": {
"method": "POST",
"url": "http://localhost:11434/api/generate",
"headers": {},
"body": {
"model": "llama3.2:3b-instruct-fp16",
"raw": true,
"prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant. You have access to functions, but you should only use them if they are required.\nYou are an expert in composing functions. You are given a question and a set of possible functions.\nBased on the question, you may or may not need to make one function/tool call to achieve the purpose.\n\nIf you decide to invoke any of the function(s), you MUST put it in the format of [func_name1(params_name1=params_value1, params_name2=params_value2...), func_name2(params)]\nIf you decide to invoke a function, you SHOULD NOT include any other text in the response. besides the function call in the above format.\nFor a boolean parameter, be sure to use `True` or `False` (capitalized) for the value.\n\n\nHere is a list of functions in JSON format that you can invoke.\n\n[\n {\n \"name\": \"greet_everyone\",\n \"description\": \"\",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"url\"],\n \"properties\": {\n \"url\": {\n \"type\": \"string\",\n \"description\": \"\"\n }\n }\n }\n },\n {\n \"name\": \"get_boiling_point\",\n \"description\": \"\nReturns the boiling point of a liquid in Celsius or Fahrenheit.\n\n:param liquid_name: The name of the liquid\n:param celsius: Whether to return the boiling point in Celsius\n:return: The boiling point of the liquid in Celcius or Fahrenheit\n\",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"liquid_name\", \"celsius\"],\n \"properties\": {\n \"liquid_name\": {\n \"type\": \"string\",\n \"description\": \"\"\n },\n \"celsius\": {\n \"type\": \"boolean\",\n \"description\": \"\"\n }\n }\n }\n }\n]\n\nYou can answer general questions or invoke tools when necessary.\nIn addition to tool calls, you should also augment your responses by using the tool outputs.\nYou are a helpful assistant.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nSay hi to the world. Use tools to do so.<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n",
"options": {
"temperature": 0.0
},
"stream": true
},
"endpoint": "/api/generate",
"model": "llama3.2:3b-instruct-fp16"
},
"response": {
"body": [
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-07-29T23:26:17.476678Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "[g",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-07-29T23:26:17.520346Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "reet",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-07-29T23:26:17.563375Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "_every",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-07-29T23:26:17.606256Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "one",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-07-29T23:26:17.649215Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "(url",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-07-29T23:26:17.692049Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "=\"",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-07-29T23:26:17.734316Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "world",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-07-29T23:26:17.776615Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "\")]",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-07-29T23:26:17.819266Z",
"done": true,
"done_reason": "stop",
"total_duration": 5629478417,
"load_duration": 4092162625,
"prompt_eval_count": 448,
"prompt_eval_duration": 1191158583,
"eval_count": 9,
"eval_duration": 343915792,
"response": "",
"thinking": null,
"context": null
}
}
],
"is_streaming": true
}
}

View file

@ -0,0 +1,806 @@
{
"request": {
"method": "POST",
"url": "http://0.0.0.0:11434/v1/v1/embeddings",
"headers": {},
"body": {
"model": "nomic-embed-text:137m-v1.5-fp16",
"input": [
"Python is a high-level programming language with code readability and fewer lines than C++ or Java"
],
"encoding_format": "float"
},
"endpoint": "/v1/embeddings",
"model": "nomic-embed-text:137m-v1.5-fp16"
},
"response": {
"body": {
"__type__": "openai.types.create_embedding_response.CreateEmbeddingResponse",
"__data__": {
"data": [
{
"embedding": [
0.011488368,
0.08907293,
-0.13142161,
-0.07895268,
0.066022865,
0.026360855,
-0.043541305,
0.00094424584,
-0.024370281,
-0.06148249,
-0.0037689947,
0.02773672,
0.047909178,
-0.02939864,
0.011469905,
-0.08921797,
0.020931536,
-0.050551064,
0.0090582725,
0.058097444,
-0.021488983,
-0.04544651,
0.0076826564,
-0.029468112,
0.07073694,
0.0072513763,
-0.020081414,
-0.038918976,
-0.012795414,
0.020122375,
-0.028875042,
-0.021430979,
0.019585375,
-0.032045633,
-0.052031405,
-0.051445574,
0.058973435,
0.010949792,
0.05854762,
0.00939292,
-0.026500102,
0.007997425,
0.027984431,
-0.033203643,
0.0765589,
-0.047847986,
0.031280704,
-0.04031829,
-0.01630044,
-0.035522394,
-0.018725617,
-0.0643683,
-0.048050657,
-0.00145174,
0.08530237,
0.046948127,
0.0035006057,
0.026577089,
0.030813558,
-0.0314474,
0.0914591,
0.07347516,
-0.068352565,
0.06653788,
0.04145198,
2.2763175e-05,
-0.032795746,
0.033711713,
-0.011662007,
-0.02500982,
0.014806517,
-0.08404245,
0.034074288,
-0.02131799,
-0.04973383,
-0.019168304,
-0.01738479,
-0.03425713,
0.011496745,
0.049627766,
-0.004454383,
-0.007553486,
-0.008571264,
0.0481393,
0.048771415,
-0.049057007,
-0.04052862,
0.008660308,
-0.023085842,
0.05831716,
-0.058200188,
-0.0007301837,
0.031119596,
-0.001510113,
-0.06288094,
0.02649031,
-0.014243082,
0.013741406,
0.029891115,
-0.035321835,
-0.0007874549,
-0.017929547,
0.040374395,
-0.05022418,
0.047420263,
0.04879514,
0.022985416,
-0.036088556,
-0.056271147,
-0.019736229,
0.010743018,
0.04579346,
-0.04893372,
-0.03254895,
-0.047786195,
0.020005278,
0.09352314,
-0.032638513,
0.05403496,
0.058746118,
0.013902004,
-0.014856816,
0.046702012,
0.062844306,
0.024965078,
0.018879883,
-0.059720308,
0.06714566,
-0.004540917,
-0.05697842,
0.028589077,
0.010315179,
-0.04169755,
-0.0070149526,
-0.029461423,
0.07288989,
-0.061704572,
-0.025856813,
0.06512719,
0.0066599897,
0.03698303,
0.021579178,
-0.012590982,
-0.0119007975,
0.03978347,
-0.02246038,
0.015831197,
0.032543052,
0.011093418,
0.023233669,
0.034819156,
0.041866884,
0.0020055538,
0.014074135,
-0.019981578,
-0.008057632,
0.034222472,
0.0023065216,
0.04555034,
0.01121874,
0.0654458,
0.03134916,
-0.055534475,
0.03950526,
-0.021282282,
-0.02630521,
0.006853609,
-0.008049126,
-0.03182186,
0.0004068945,
-0.043355547,
-0.04058918,
0.008414404,
0.0021767297,
0.0066186627,
-0.019762259,
0.014519637,
-0.039688654,
0.045692563,
-0.010994483,
-0.008208485,
-0.043101825,
0.04670997,
0.043561783,
-0.046127435,
0.01632397,
0.016273865,
-0.045867354,
-0.005587781,
-0.019087313,
-0.01733775,
0.032173995,
-0.026338268,
-0.051710702,
-0.016714055,
-0.014880144,
0.0101565225,
0.005058725,
0.035922512,
-0.06759283,
-0.038288597,
-0.036956448,
-0.054448202,
0.015715994,
-0.043900188,
0.033019233,
-0.017369132,
0.008349448,
-0.042008255,
0.010484949,
0.060232487,
0.0044189435,
-0.025377398,
0.048769046,
0.0037088217,
-0.04514013,
-0.02408241,
-0.0057313573,
-0.0054432275,
0.021014731,
0.058329135,
-0.029602995,
0.0038945777,
-0.0059355316,
0.019913401,
0.016605137,
-0.0575594,
0.014817167,
-0.036886048,
0.01452465,
-0.0056891516,
-0.038757816,
0.034209594,
0.014828261,
0.010590116,
0.04560492,
0.03606981,
0.046451095,
-0.0022792094,
-0.015315108,
0.002956709,
0.009974895,
-0.014766702,
0.029623332,
-0.041294064,
0.022859031,
-0.0059115966,
-0.03724629,
-0.00086585025,
0.036032964,
-0.017468352,
-0.0182249,
0.012723173,
0.052306913,
0.0363147,
0.029758507,
0.056407142,
0.01234964,
0.0135322865,
-0.0076179984,
0.047202323,
-0.050033085,
-0.028000338,
-0.025103243,
-0.019605383,
0.023990436,
-0.0075666127,
0.009893213,
0.0042337226,
-0.034943476,
0.019118771,
0.025516555,
0.016372621,
-0.045386784,
-0.0076442338,
-0.016714053,
0.018130064,
-0.05281019,
0.0061577633,
0.007972123,
0.039240886,
-0.031219257,
-0.043458417,
0.023760727,
-0.0019233959,
0.034131095,
0.037140265,
0.001257368,
0.008872333,
-0.017802484,
0.06634031,
-0.018231707,
-0.040559564,
-0.03670049,
-0.009176452,
0.040855963,
0.083597414,
0.015891276,
0.019406065,
-0.028079053,
-0.02434008,
0.049721453,
0.08111963,
0.034266386,
0.027706612,
-0.024156323,
0.034014143,
-0.004383591,
-0.019008825,
-0.008942543,
-0.04909622,
0.04501953,
-0.045705624,
0.072272286,
-0.07661043,
0.022335226,
0.015420332,
0.029117696,
0.042505234,
-0.022585507,
0.0039081913,
-0.086267754,
0.03733843,
-0.031266082,
-0.0068033175,
0.04029885,
-0.017780999,
0.022028906,
-0.027171975,
-0.050008755,
0.008298878,
0.011933541,
0.0152934175,
-0.015793603,
-0.0673487,
-0.0064172964,
0.037676953,
-0.018025218,
0.018773079,
0.0051527745,
0.033772994,
-0.034934085,
0.014310966,
-0.04726107,
0.004405532,
4.2734075e-05,
0.026572658,
-0.044114474,
0.031074164,
0.03071906,
-0.009484853,
0.03711684,
-0.025813565,
-0.024846341,
-0.011359158,
-0.041466694,
0.01914002,
0.0012177938,
-0.0054687117,
0.0027515932,
0.04025552,
-0.0069444985,
0.030474605,
-0.057275087,
0.004736491,
0.002789965,
0.018351864,
-0.011660434,
-0.015821503,
-0.011462616,
-0.033419356,
-0.05104818,
-0.0030111782,
0.009709,
0.010288827,
-0.022103397,
-0.0642,
-0.029997412,
-0.016013661,
-0.002303385,
0.026114397,
-0.05361758,
-0.04575494,
0.002697649,
0.02567258,
-0.061158918,
-0.012497801,
-0.017992899,
0.019593071,
0.025052099,
0.03286399,
-0.042965606,
-0.035508,
0.032446146,
0.0371789,
-0.027910959,
0.040623948,
0.017507747,
-0.053210605,
-0.00633099,
-0.04437149,
-0.069885515,
0.020052157,
-0.008017359,
-0.027566357,
0.008547149,
0.004847182,
-0.028501885,
0.015757173,
-0.012012285,
-0.005947874,
0.0176843,
0.019584997,
-0.017860798,
-0.012815542,
0.05130764,
0.020271033,
0.03307423,
-0.049778644,
0.008983508,
0.026140546,
0.06028017,
-0.017653985,
0.011345359,
0.018171743,
0.020853298,
0.0264798,
0.062104598,
0.010310946,
-0.06562607,
0.01043746,
0.034825344,
0.021020371,
0.027116027,
-0.0037368021,
0.0042153355,
0.03373333,
0.008112555,
-0.02199968,
0.057989873,
0.026363613,
-0.019325271,
-0.06458278,
0.011872044,
0.024819711,
0.06554175,
0.07610625,
-0.017614668,
-0.08674962,
0.0088432925,
-0.005442114,
0.006102016,
0.006328422,
0.0060164,
0.037999444,
-0.0014527381,
-0.01356921,
0.016244326,
-0.01457221,
0.056518734,
-0.0011039514,
0.014004817,
-0.053100053,
0.028817357,
0.0064820037,
0.0012086668,
-0.009552054,
-0.004504296,
-0.007035088,
0.0556937,
-0.01315211,
0.029669777,
0.023995124,
-0.013237353,
-0.015704637,
-0.035238434,
-0.0037444944,
0.028946487,
0.023387091,
0.016726805,
-0.013977982,
-0.03047428,
-0.04594697,
-0.00228121,
0.0007855954,
0.02124062,
-0.008536624,
0.0048718117,
-0.014064172,
-0.036988426,
0.027667416,
0.0422569,
0.04806283,
0.01843529,
-0.025697526,
-0.0524962,
-0.020671658,
0.07923146,
0.08527786,
0.028903358,
0.026692472,
0.01747058,
-0.015024007,
0.0016035172,
0.057610784,
-0.031230353,
0.06121582,
-0.047109988,
-0.03725349,
0.01860743,
0.019578215,
-0.0025576772,
-0.0060827793,
0.054300606,
0.057380572,
-0.035506696,
0.032013237,
-0.022982,
-0.08711582,
0.026141228,
0.021207755,
-0.028961299,
0.00062547013,
-0.024462542,
-0.043661416,
0.035253577,
0.009077339,
-0.014111102,
0.0058460566,
-0.019649502,
0.044755884,
-0.0044299113,
-0.037719697,
-0.012573531,
-0.057711683,
-0.047507294,
-0.0704702,
0.05821025,
0.023852421,
0.0023238708,
0.059958983,
0.045650728,
0.0035823798,
0.021182124,
0.06536029,
0.0023902277,
-0.026674217,
0.0002469645,
0.0020064032,
-0.06034399,
0.040017728,
-0.049678437,
-0.0032678086,
-0.033326782,
0.017452622,
-0.026135415,
-0.004004807,
-0.029187452,
0.008761656,
-0.04633237,
-0.031040203,
0.03361154,
0.03364455,
0.016584601,
0.033674356,
0.012560564,
-0.0359252,
-0.018261429,
-0.0010633499,
0.048224416,
-0.05129638,
-0.055718843,
0.016412761,
0.019934708,
0.014391434,
0.0043129087,
0.016390469,
-0.009737628,
-0.047240984,
-0.027559847,
0.055247765,
-0.03220373,
-0.016151046,
0.0485871,
-0.037485205,
-0.01835451,
-0.01517561,
0.004869981,
-0.01780359,
-0.015432582,
-0.009408715,
-0.0071832985,
-0.029855747,
-0.012426293,
0.005129185,
0.025689391,
-0.06732369,
-0.04262489,
-0.014908167,
-0.05464126,
0.0047209524,
0.003995236,
0.032822587,
-0.052573748,
0.0352204,
0.09358622,
-0.02966806,
0.046852604,
-0.042644933,
-0.023728022,
0.04067723,
0.027035205,
-0.014150344,
0.0060548745,
0.007615636,
-0.06135294,
0.038593236,
0.0020092153,
0.0008044259,
-0.03532518,
-0.025208732,
-0.057940982,
0.063368574,
-0.03239539,
0.042998813,
0.005380122,
-0.025621908,
0.02933094,
0.060402885,
0.06707255,
-0.06290247,
0.0044211885,
-0.034580726,
0.018173682,
-0.014258836,
-0.0009336827,
-0.045159176,
-0.000609831,
0.046511274,
0.09704431,
0.017784506,
-0.04735181,
0.042557452,
-0.0006873186,
0.0061028055,
-0.033874914,
0.040295046,
0.06600115,
0.00991167,
-0.04475665,
0.05955679,
0.05559941,
-0.0021201232,
0.008088177,
0.0036764112,
0.002953009,
0.06759343,
-0.009915477,
-0.052873727,
-0.009668077,
0.002044497,
-0.00063458836,
-0.03656217,
0.054652866,
0.03798574,
0.056606956,
-0.007915265,
0.0013049815,
-0.09499897,
-0.0070800385,
0.0244362,
-0.012560818,
-0.0042640534,
-0.022324111,
0.0035668353,
0.053489763,
-0.0023222228,
-0.01696316,
-0.04065025,
-0.02098738,
0.0114039155,
-0.016950222,
-0.007028829,
-0.022667225,
0.02366999,
-0.05761968,
0.025501445,
-0.06229779,
-0.050604578,
-0.06865873,
-0.024909278,
-0.03078067,
0.017422339,
-0.04470559,
0.02937445,
-0.0016233833,
-0.02238118,
-0.020390697,
0.000878372,
0.046922233,
-0.023016753,
0.017631982,
0.03728526,
0.048234653,
-0.03094375,
0.0164381,
0.026422715,
0.049812343,
-0.040939927,
-0.054622803,
-0.03708105,
0.035311334,
0.02719904,
0.07242579,
0.00034508843,
0.036894504,
-0.04266779,
-0.070187844,
-0.051377587,
-0.007023316,
0.057383943,
-0.018449614,
-0.020260822,
0.0012650142,
-0.0075096413,
-0.0052665956,
0.011430787,
-0.053528212,
0.032891087,
0.014585182,
0.022210846,
0.023262084,
-0.05662875,
0.050923083,
-0.042420305,
0.0149962185,
-0.031335566,
-0.025867553,
-0.0785983,
0.009070857,
0.020916311,
0.049653318,
-0.0062730005,
0.04681294,
0.0012068546,
-0.03855772,
-0.035257522,
0.04051459,
0.04250193,
-0.045821767,
-0.005271129,
-0.007447701,
-0.043520868,
0.07666238,
-0.009431352,
0.010825085,
0.004938816,
0.07231181,
0.0627917,
-0.0001364236,
0.016336551,
-0.0049293903,
0.0138295395,
-0.023893986,
-0.044587392,
-0.006986627,
-0.05745243,
-0.031931262
],
"index": 0,
"object": "embedding"
}
],
"model": "nomic-embed-text:137m-v1.5-fp16",
"object": "list",
"usage": {
"prompt_tokens": 21,
"total_tokens": 21
}
}
},
"is_streaming": false
}
}

View file

@ -0,0 +1,806 @@
{
"request": {
"method": "POST",
"url": "http://0.0.0.0:11434/v1/v1/embeddings",
"headers": {},
"body": {
"model": "nomic-embed-text:137m-v1.5-fp16",
"input": [
"machine learning and artificial intelligence"
],
"encoding_format": "float"
},
"endpoint": "/v1/embeddings",
"model": "nomic-embed-text:137m-v1.5-fp16"
},
"response": {
"body": {
"__type__": "openai.types.create_embedding_response.CreateEmbeddingResponse",
"__data__": {
"data": [
{
"embedding": [
-0.0055676526,
0.037607595,
-0.14074987,
-0.002804985,
0.07148354,
0.025361888,
-0.006617389,
-0.008432862,
-0.027677476,
0.033805065,
0.012552972,
0.041450765,
0.13947411,
0.04415726,
-0.018268242,
-0.010596744,
-0.05406684,
-0.023316454,
-0.01917343,
-0.007486475,
-0.008004426,
0.025822539,
0.015411618,
0.018916113,
0.07705309,
0.0058656926,
-0.058034655,
-0.007960976,
0.014135634,
0.034185696,
0.025762286,
-0.041148923,
0.020820145,
-0.0036934123,
-0.059696127,
-0.048285812,
0.09696554,
-0.006299937,
0.02855948,
0.036708932,
0.004418546,
0.033692554,
0.00014569695,
-0.004598071,
0.058664955,
0.04386636,
-0.014703874,
-0.040981304,
0.070256576,
-0.01631749,
0.04358505,
-0.01474905,
0.0053627864,
0.020751968,
0.076655865,
0.011587456,
-0.026259147,
0.0043378496,
0.03386068,
-0.060910884,
0.13739845,
0.028939046,
-0.042746805,
0.07966744,
0.031755112,
-0.0031926725,
-0.0021385243,
0.023516048,
0.011488332,
0.005949599,
-0.001006356,
-0.021689167,
0.03777627,
0.033713214,
-0.025795706,
-0.015380865,
-0.019959806,
-0.010755837,
-0.02877149,
0.084691174,
0.05146873,
-0.04077167,
0.032549243,
-0.006378473,
0.035918225,
-0.0093235485,
-0.08135541,
-0.01730062,
-0.010902666,
0.10651181,
0.02412386,
0.03772865,
0.05793197,
0.011357906,
-0.010912312,
0.0039970484,
-0.056139898,
0.0001663857,
-0.049092147,
-0.03757449,
-0.06084076,
0.021710595,
0.016426036,
-0.046211846,
0.047347162,
0.021834597,
0.0008032862,
-0.039862543,
-0.013690757,
0.02270945,
-0.00546203,
0.05374652,
-0.02116721,
-0.006679464,
-0.051961154,
-0.051756233,
-0.010277374,
-0.004740697,
0.03921549,
0.012441582,
0.00071372476,
-0.04694471,
-0.008488195,
0.005572887,
-0.012411736,
0.043588247,
-0.049042385,
0.024810083,
-0.011161265,
-0.04244215,
0.039098956,
-0.0327504,
-0.02049274,
-0.006234103,
-0.025615763,
0.0863854,
-0.053460903,
-0.05029799,
0.035151068,
0.037194397,
0.01927741,
0.024714334,
-0.0025672915,
-0.0139264995,
-0.026953243,
-0.024757806,
0.027785258,
0.029920481,
-0.09716015,
0.030207563,
0.00088082976,
0.052972272,
-0.028489286,
-0.013131309,
0.022434616,
0.00065314706,
-0.055729564,
-0.0057886294,
0.038754933,
-0.012502802,
0.033816766,
-0.026282853,
-0.023173656,
0.028089669,
-0.0050990237,
-0.0082897,
0.026175315,
0.0375448,
0.027376607,
0.020405287,
-0.043161266,
0.0006997121,
0.00033588792,
0.014482382,
0.062248748,
0.009971126,
-0.017957326,
-0.083549835,
0.04807994,
-0.050247118,
0.031104453,
-0.04614943,
0.02402854,
0.03376869,
-0.0019501477,
-0.036129188,
-0.039748054,
-0.0029756199,
-0.03683378,
-0.030606419,
-0.020958807,
0.021332651,
-0.020598978,
-0.042064365,
-0.054918192,
-0.00901248,
0.022193708,
0.009651182,
0.01736177,
-0.034221455,
-0.0044257627,
-0.03959286,
-0.056846857,
-0.023341974,
-0.036591545,
0.05263008,
0.027988793,
0.00053739984,
-0.017889682,
0.00032725866,
0.05651838,
0.03722038,
0.021961791,
-0.015104896,
-0.027406182,
-0.0062658424,
-0.0077742916,
-0.04878277,
0.013014594,
-0.029580545,
0.053123508,
-0.0060568117,
0.02311685,
-0.017863069,
0.0057518133,
0.013460052,
-0.034497164,
-0.009695958,
-0.054542456,
0.03457276,
-0.019900212,
-0.04496697,
0.07930227,
0.00061430456,
0.030719148,
0.020608494,
0.017646661,
0.055049658,
0.008732203,
0.035740122,
-0.022534488,
0.057636857,
-0.02430445,
0.011238781,
-0.056625325,
-0.031212583,
0.010821367,
-0.042455893,
0.019988628,
0.025999557,
-0.02078072,
0.027336553,
-0.032524664,
0.019674964,
0.004634663,
-0.027575325,
0.006920462,
0.00849185,
0.0072606583,
0.010830559,
0.04373721,
-0.041281823,
0.034703884,
-0.0070332997,
0.02627788,
-0.008117525,
-0.0050063096,
0.0006726745,
0.013789757,
0.007871836,
0.020251142,
0.023514729,
0.04301568,
-0.001550706,
-0.006054088,
0.029966662,
-0.004359033,
-0.028079243,
-0.013859538,
-0.017065715,
-0.056285594,
-0.030364485,
-0.067502774,
-0.028567376,
-0.0036689844,
0.013287284,
0.014196438,
0.02717507,
0.01529897,
0.04067955,
0.021112315,
0.017248038,
-0.024668692,
-0.007050553,
-0.02688864,
0.038015496,
0.03523187,
0.03283678,
0.037456103,
-0.045826677,
0.032901708,
-0.00715299,
0.0734337,
0.0036020123,
0.050221503,
-0.022508303,
-0.0161466,
-0.014337791,
0.039818697,
0.012658511,
-0.06732133,
0.0023105624,
0.013785315,
0.005420772,
0.0023928639,
-0.010279525,
-0.042494286,
0.019604988,
0.0419654,
0.010014578,
0.0131692225,
-0.08502757,
-0.06022765,
-0.012788984,
0.029492218,
0.07531082,
-0.0014149746,
0.015584036,
-0.04072224,
-0.035372414,
0.015036397,
0.023529893,
0.018885048,
-0.022172105,
-0.06258309,
-0.003607014,
0.028332703,
0.0071907504,
-0.012343301,
0.023307528,
0.057685107,
-0.0027828452,
0.004447051,
-0.01735233,
-0.016245272,
0.013801741,
-0.0029756557,
-0.013213782,
0.015396319,
-0.010235075,
-0.03276548,
0.021457301,
0.023885816,
0.004579841,
0.036322046,
0.0031928096,
0.017268742,
0.06310177,
0.044325467,
-0.007820684,
0.027840687,
-0.055998452,
0.015811397,
-0.027679825,
-0.01689621,
-0.015704138,
0.02220624,
0.0036319862,
0.016407188,
-0.0028235482,
0.05849856,
-0.008090543,
-0.0037728718,
0.06077582,
-0.027032267,
0.018484741,
-0.055906855,
-0.04504379,
-0.03492977,
-0.019317614,
-0.041188404,
0.030125722,
-0.025321875,
0.006913241,
0.038495496,
-0.012324868,
0.0005036001,
-0.040139947,
-0.0061344374,
0.0005219825,
-0.018869184,
-0.014752749,
-0.07595433,
-0.018194932,
0.012401524,
-0.027864115,
0.006789087,
-0.009565956,
0.015790598,
0.046612665,
-0.04252712,
-0.021846049,
-0.005723392,
-0.048730128,
-0.015873676,
-0.011065935,
-0.047783904,
-0.03550279,
0.06778763,
0.020498566,
0.024177074,
0.01025881,
7.263766e-06,
-0.06263741,
0.024666198,
-0.05690874,
0.021188669,
0.017749513,
-0.05817258,
0.010562816,
0.030943366,
0.0007343872,
-0.016273286,
0.00787693,
-0.036151744,
0.014707449,
0.01039333,
0.050455544,
0.004762857,
-0.040837612,
0.063730456,
-0.017636815,
-0.025875637,
-0.034493577,
-0.00932124,
0.045578275,
0.0021959038,
0.02683857,
0.020068243,
0.02964936,
0.03125028,
-0.03228684,
-0.03409907,
-0.018953461,
0.032556947,
0.121822715,
0.04707043,
-0.020557143,
-0.07898298,
0.03803513,
0.009371626,
0.011706999,
0.023257945,
0.0077813817,
0.06505699,
-0.022636045,
-0.01171062,
0.030803725,
0.03876063,
0.038833153,
0.011656127,
0.031124521,
-0.06297426,
0.020178674,
-0.022308672,
-0.012454079,
-0.0018501335,
-0.025267268,
0.03139099,
0.06506641,
-0.006600023,
0.03257224,
0.038939405,
-0.03932672,
-0.011354874,
0.013061634,
-0.025645908,
-0.03807022,
0.031546343,
0.054272447,
0.0042550326,
-0.06261923,
-0.007274197,
-0.03840224,
-0.013757855,
0.03581693,
-0.0064127482,
0.02441153,
0.0042232205,
-0.03191279,
0.043696977,
0.008361217,
0.01741963,
-0.04443982,
-0.07408706,
-0.0302928,
-0.10016659,
0.025746375,
0.01681544,
0.008698005,
-0.0004667209,
0.0087767,
-0.021100726,
0.003711238,
-0.023373105,
-0.01503881,
0.04967642,
-0.0930721,
-0.046552327,
0.09804994,
-0.013835043,
-0.0037497964,
0.039764475,
0.033894103,
0.0012048046,
-0.037988536,
0.041074146,
0.04235108,
-0.08400901,
-0.018685354,
0.07228467,
-0.010743437,
0.010808383,
0.009577177,
-0.033949137,
-0.006326134,
0.026234496,
-0.041013833,
0.038343027,
0.00084823865,
0.02851006,
0.0077916514,
-0.030147677,
-0.027760647,
0.004643397,
0.005053343,
-0.008941861,
-0.026913425,
0.042983938,
0.01717477,
0.0663102,
-0.0019370201,
0.003287294,
-0.03727856,
0.0035034667,
-0.013155771,
-0.007892782,
0.041945223,
-0.0030665628,
-0.094774075,
0.034818046,
-0.036818203,
-0.0029307893,
-0.00884741,
-0.00743541,
-0.009145366,
-0.021448582,
-0.042497415,
-0.006537858,
0.0023786393,
-0.03640427,
0.0031237768,
0.06756371,
-0.015007449,
-0.045269705,
0.025938397,
-0.0102713555,
-0.02172098,
0.0008311765,
0.032281272,
0.028380793,
-0.055843204,
0.0016028135,
0.008903928,
0.0085764015,
-0.014910333,
-0.014104748,
-0.018106278,
-0.037222672,
-0.022182018,
0.08024584,
-0.06451804,
-0.02075624,
0.020843761,
0.03523371,
0.012193457,
-0.05703897,
-0.0013516175,
0.04106061,
-0.06275497,
-0.018204994,
0.02172471,
-0.014526833,
-0.054614007,
-0.04518983,
0.016957235,
-0.023265226,
-0.027596308,
-0.023523336,
-0.059039053,
0.0041685067,
-0.039938442,
0.04669978,
-0.0063979127,
0.020483416,
0.027639873,
-0.01206512,
0.051813617,
0.049028568,
0.0068901125,
-0.035108544,
-0.011231821,
-0.014607724,
0.014760893,
0.055028442,
-0.035556052,
0.042438332,
-0.093893364,
-0.087567605,
-0.016325593,
-0.052629195,
-0.07636775,
0.032836746,
-0.015486794,
0.052163288,
-0.0035887335,
0.0029697292,
-0.015571485,
0.016206617,
0.06955324,
-0.018355895,
0.051770963,
0.016798811,
-0.04840591,
-0.027142415,
0.007742883,
-0.01505668,
0.01949886,
0.027084991,
0.07451987,
0.01707506,
-0.009305742,
-0.031197278,
0.034334995,
0.03400155,
-0.023167107,
0.041818704,
0.08864219,
-0.010490497,
-0.015371323,
0.039439347,
0.041599363,
0.010343794,
-0.031765327,
-0.043507814,
0.046278544,
0.0073079155,
-0.012219337,
0.009139992,
-0.02176212,
-0.021882698,
0.0134527,
0.0050208997,
-0.008423276,
0.041090664,
-0.020635158,
-0.036146075,
0.01049579,
-0.079392806,
-0.06501304,
0.0335013,
-0.012802067,
0.024089638,
-0.04123427,
-0.005093254,
0.04965449,
0.01900141,
0.02468455,
-0.026793627,
-0.00853688,
-0.026478257,
-0.021256402,
0.019811329,
-0.02736609,
0.0008755891,
-0.03280057,
0.05230071,
-0.024271186,
0.017648304,
-0.07038161,
-0.024559036,
-0.07172936,
-0.01706447,
-0.006269835,
-0.014418907,
0.033071198,
-0.039413814,
0.028617091,
0.05658568,
0.0631377,
-0.011613074,
0.045226514,
0.03267759,
0.04698377,
-0.054020163,
0.004418562,
0.007869039,
0.03307921,
-0.01226311,
-0.021438342,
-0.015542127,
0.017207818,
-0.023682194,
0.08018181,
-0.022875395,
-0.01348799,
-0.028109841,
-0.0451768,
-0.023686612,
0.040311582,
0.04083543,
-0.03210762,
-0.03917693,
-0.017097685,
-0.036972158,
-0.04078481,
0.02192485,
-0.026830912,
-0.011077901,
0.0045215045,
0.023708722,
-0.024511881,
-0.048116196,
0.005063682,
-0.0072107734,
0.019443877,
-0.056393813,
-0.018381938,
-0.046558794,
0.011450821,
-0.010548083,
0.0033412941,
0.04300793,
0.023570552,
0.011047298,
-0.025875632,
-0.013352994,
0.05174488,
0.021105226,
-0.01785354,
-0.0063682324,
0.01556173,
-0.05248805,
0.01078658,
-0.017563447,
0.038102563,
-0.030159717,
0.07094031,
0.12957932,
-0.009026436,
0.038504194,
-0.058084693,
0.01352246,
-0.017025255,
-0.028957661,
0.015611035,
-0.06158929,
-0.0005010816
],
"index": 0,
"object": "embedding"
}
],
"model": "nomic-embed-text:137m-v1.5-fp16",
"object": "list",
"usage": {
"prompt_tokens": 5,
"total_tokens": 5
}
}
},
"is_streaming": false
}
}

View file

@ -1,86 +0,0 @@
{
"request": {
"method": "POST",
"url": "http://localhost:11434/api/generate",
"headers": {},
"body": {
"model": "llama3.2:3b-instruct-fp16",
"raw": true,
"prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant. Michael Jordan was born in 1963. He played basketball for the Chicago Bulls for 15 seasons.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nPlease give me information about Michael Jordan.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nPlease respond in JSON format with the schema: {\"$defs\": {\"NBAStats\": {\"properties\": {\"year_for_draft\": {\"title\": \"Year For Draft\", \"type\": \"integer\"}, \"num_seasons_in_nba\": {\"title\": \"Num Seasons In Nba\", \"type\": \"integer\"}}, \"required\": [\"year_for_draft\", \"num_seasons_in_nba\"], \"title\": \"NBAStats\", \"type\": \"object\"}}, \"properties\": {\"first_name\": {\"title\": \"First Name\", \"type\": \"string\"}, \"last_name\": {\"title\": \"Last Name\", \"type\": \"string\"}, \"year_of_birth\": {\"title\": \"Year Of Birth\", \"type\": \"integer\"}, \"nba_stats\": {\"$ref\": \"#/$defs/NBAStats\"}}, \"required\": [\"first_name\", \"last_name\", \"year_of_birth\", \"nba_stats\"], \"title\": \"AnswerFormat\", \"type\": \"object\"}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n",
"format": {
"$defs": {
"NBAStats": {
"properties": {
"year_for_draft": {
"title": "Year For Draft",
"type": "integer"
},
"num_seasons_in_nba": {
"title": "Num Seasons In Nba",
"type": "integer"
}
},
"required": [
"year_for_draft",
"num_seasons_in_nba"
],
"title": "NBAStats",
"type": "object"
}
},
"properties": {
"first_name": {
"title": "First Name",
"type": "string"
},
"last_name": {
"title": "Last Name",
"type": "string"
},
"year_of_birth": {
"title": "Year Of Birth",
"type": "integer"
},
"nba_stats": {
"$ref": "#/$defs/NBAStats"
}
},
"required": [
"first_name",
"last_name",
"year_of_birth",
"nba_stats"
],
"title": "AnswerFormat",
"type": "object"
},
"options": {
"temperature": 0.0
},
"stream": false
},
"endpoint": "/api/generate",
"model": "llama3.2:3b-instruct-fp16"
},
"response": {
"body": {
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:36:40.283084Z",
"done": true,
"done_reason": "stop",
"total_duration": 2900042958,
"load_duration": 83372125,
"prompt_eval_count": 259,
"prompt_eval_duration": 352890750,
"eval_count": 60,
"eval_duration": 2462885208,
"response": "{\n \"first_name\": \"Michael\",\n \"last_name\": \"Jordan\",\n \"year_of_birth\": 1963,\n \"nba_stats\": {\n \"year_for_draft\": 1984,\n \"num_seasons_in_nba\": 15\n }\n}",
"thinking": null,
"context": null
}
},
"is_streaming": false
}
}

File diff suppressed because it is too large Load diff

View file

@ -20,15 +20,15 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama-guard3:1b", "model": "llama-guard3:1b",
"created_at": "2025-09-03T17:38:01.239743Z", "created_at": "2025-09-30T17:39:23.766462922Z",
"done": true, "done": true,
"done_reason": "stop", "done_reason": "stop",
"total_duration": 207264667, "total_duration": 2859320770,
"load_duration": 73437959, "load_duration": 60934847,
"prompt_eval_count": 216, "prompt_eval_count": 216,
"prompt_eval_duration": 121657333, "prompt_eval_duration": 2749991822,
"eval_count": 2, "eval_count": 2,
"eval_duration": 11348417, "eval_duration": 47816462,
"response": "safe", "response": "safe",
"thinking": null, "thinking": null,
"context": null "context": null

View file

@ -0,0 +1,806 @@
{
"request": {
"method": "POST",
"url": "http://0.0.0.0:11434/v1/v1/embeddings",
"headers": {},
"body": {
"model": "nomic-embed-text:137m-v1.5-fp16",
"input": [
"This is a test file 0"
],
"encoding_format": "float"
},
"endpoint": "/v1/embeddings",
"model": "nomic-embed-text:137m-v1.5-fp16"
},
"response": {
"body": {
"__type__": "openai.types.create_embedding_response.CreateEmbeddingResponse",
"__data__": {
"data": [
{
"embedding": [
0.06569889,
0.0075979824,
-0.13355534,
-0.03087419,
0.06887596,
0.0022278922,
0.030457113,
0.029343065,
-0.041988637,
-0.085280016,
-0.030396713,
0.038043153,
0.025799021,
0.0029713905,
-0.028386902,
-0.027477825,
0.03623284,
-0.04154503,
0.00551161,
-0.020107845,
0.036813777,
-0.029126925,
-0.06819024,
-0.006683371,
0.12236409,
-0.0008511646,
-0.022556255,
0.051949136,
-0.07988408,
-0.032928497,
0.06524479,
0.0012762198,
-0.002292936,
-0.029198533,
-0.012377746,
-0.026174542,
0.021895576,
0.037113264,
0.03436928,
0.008258402,
-0.016730672,
-0.025307849,
0.0068733217,
-0.0034135508,
0.020250086,
0.03329193,
0.012187189,
0.076113224,
-0.019928403,
0.012776066,
0.007209404,
-0.022850547,
-0.0030079158,
0.01193757,
0.02421511,
-0.014447408,
-0.03570278,
-0.0005199167,
-0.021498382,
-0.03273841,
0.041634835,
0.0357598,
-0.051809516,
0.04717076,
0.014142166,
-0.044218663,
-0.04686818,
0.024508895,
0.0016807343,
0.03689631,
0.06549316,
-0.011174818,
-0.021753127,
0.0125305895,
-0.018603666,
-0.049111377,
-0.010490791,
-0.06439277,
-0.06457874,
-0.027793122,
0.012108071,
0.02228997,
0.023145016,
0.064356215,
0.06162452,
-0.023461625,
-0.011763129,
-0.017237727,
0.016087933,
0.026915565,
0.048432816,
0.019608956,
0.0446655,
-0.042998426,
-0.022571366,
-0.010334031,
0.022279797,
0.07883467,
-0.011191799,
-0.026524613,
0.0013984819,
0.005972282,
0.027293874,
-0.02065833,
0.0285912,
0.049571536,
-0.020621926,
0.008375827,
-0.04923765,
-0.010991332,
0.0071697976,
0.050934322,
-0.043111023,
-0.033160962,
-0.015131605,
-0.012539622,
0.041305505,
-0.033541363,
-0.041694295,
0.011190744,
0.007084672,
0.015450092,
0.042311884,
0.03940029,
0.01701689,
0.013807599,
-0.04999148,
0.0504365,
0.024707705,
-0.04813005,
-0.020354733,
0.024809042,
-0.038834315,
-0.033733364,
0.028245933,
0.0424937,
-0.013269442,
-0.025089223,
-0.02546163,
0.020151038,
-0.042214695,
0.0058155754,
0.02213424,
0.017433757,
0.05158181,
-0.02869754,
0.04465606,
0.012662332,
-0.028051574,
0.015604842,
0.050896738,
0.007599799,
0.006281129,
0.033418793,
0.021920709,
-0.07913975,
0.033958323,
-0.02553707,
0.0044211005,
0.051474363,
0.028896896,
-0.013811369,
-0.015269997,
-0.0027181397,
-0.074844725,
-0.04378042,
0.013777917,
0.0941123,
0.084751636,
-0.012578452,
-0.014671592,
-0.038143005,
-0.004176015,
0.007933388,
-0.05929473,
-0.021193247,
0.008781839,
-0.01596112,
0.026119918,
-0.025445312,
0.02648552,
-0.00568644,
0.010799765,
0.023444891,
-0.009518018,
-0.050896112,
0.01034954,
-0.02753636,
-0.03769859,
-0.03366245,
-0.009905339,
-0.045516003,
-0.068003535,
-0.07863914,
0.005519929,
-0.042954993,
-0.022231326,
-0.021004673,
0.02902556,
-0.017120933,
0.021249624,
0.02768383,
-0.06314554,
0.053207308,
-0.03886009,
0.00476874,
-0.022096757,
-0.01341045,
-0.030357309,
0.0137588475,
0.031562295,
-0.005539913,
-0.032822832,
0.034190398,
0.055425715,
-0.027244035,
0.006620907,
-0.022488393,
-0.026812593,
-0.027873514,
0.018166311,
0.003122373,
0.0018363056,
-0.027016325,
0.0046166135,
-0.0369997,
-0.034971904,
-0.018800624,
-0.0014946542,
-0.011367924,
0.0035812103,
-0.07085738,
0.033152454,
0.023359593,
-0.027913084,
-0.0077732382,
-0.048488766,
0.053926837,
-0.039162364,
0.044420574,
-0.021989806,
0.055259187,
-0.016539602,
-0.018407907,
0.007724413,
-0.020046087,
-0.023352552,
-0.047689717,
0.04136404,
0.042082027,
-0.017346364,
0.029248353,
0.031323876,
0.07688728,
-0.013567599,
-0.014497512,
-0.009294345,
-0.039481603,
-0.004710669,
-0.07827626,
0.026850224,
-0.0140288705,
0.02613264,
-0.0044927574,
-0.03384218,
-0.00079161214,
-0.056953214,
0.03628688,
-0.020171795,
-0.012991032,
-0.013236439,
0.0482173,
-0.0035148757,
-0.011471772,
0.026540088,
-0.031246386,
0.054621194,
0.059837423,
0.0044686636,
0.044278976,
-0.007069389,
-0.008574732,
0.005789034,
0.026414782,
-0.0075685466,
-0.014385823,
0.02829211,
0.017918091,
0.038316578,
0.009408247,
-0.013512078,
0.022944227,
-0.0155690005,
0.0043662353,
0.024858288,
0.035380267,
0.044127665,
-0.0147769265,
-0.0063019125,
0.0031974213,
-0.012091373,
0.02103759,
0.035669435,
-0.013142072,
0.022677507,
-0.06280885,
0.038994793,
-0.047527548,
0.010609448,
0.043443497,
-0.09725285,
-0.018532714,
-0.028497247,
0.030204087,
-0.006363635,
0.060399804,
-0.0107133705,
0.008450749,
0.05759074,
-0.04678292,
0.01396999,
-0.07399043,
0.0007504193,
0.031175617,
0.0060865046,
0.03421212,
0.023408618,
0.043368008,
-0.05970366,
-0.014861325,
0.053525794,
0.04850931,
-0.029100617,
-0.027497835,
0.044973027,
0.0405099,
0.00850536,
0.047304627,
-0.0038067936,
0.061405297,
0.03626454,
0.018543653,
0.0150030125,
0.014765505,
0.012231581,
-0.029379906,
-0.019150946,
0.019597163,
-0.007974375,
0.05469681,
-0.0018450669,
0.03555379,
0.022403168,
-0.022159277,
0.039409384,
-0.00950375,
0.015302587,
-0.002742015,
0.049243126,
-0.014761497,
0.028783482,
-0.021339092,
-0.0126494095,
-0.029378537,
0.027175143,
0.020410776,
-0.048842303,
0.012824888,
0.07513209,
0.02679242,
-0.014250363,
-0.03768017,
0.041978676,
0.06390848,
0.027395684,
0.012390605,
-0.068697326,
-0.026561985,
-0.013103001,
0.05081568,
0.056574605,
-0.03550072,
-0.0033409016,
0.041807074,
0.026001278,
-0.014371649,
0.03813918,
-0.019380845,
0.058272604,
0.031092493,
0.0054262243,
0.036123812,
-0.048604775,
0.025506865,
-0.00573351,
0.010888976,
0.044062544,
-0.0073227165,
-0.06031213,
0.02233619,
-0.011185928,
-0.020654337,
0.0056568985,
0.008660892,
-0.02760251,
0.012655247,
-0.045171466,
-0.045431744,
0.039053343,
-0.02334073,
0.051499687,
-0.037237596,
-0.036204305,
-0.0661045,
0.022786478,
0.04503965,
0.042866375,
0.049955808,
-0.0158006,
-0.006718668,
0.016262004,
0.036782544,
0.030297246,
-0.026872655,
-0.031357024,
0.008424332,
0.040544927,
0.054497696,
0.0003742172,
-0.09587798,
-0.016308863,
0.011799034,
-0.0055135977,
0.014207488,
-0.016967725,
0.08251366,
-0.011782458,
-0.0080608055,
-0.016523587,
0.04005391,
0.04516666,
-0.049395572,
-0.016308561,
0.006028617,
-0.040751286,
0.14053217,
0.10381706,
-0.07738247,
-0.044793732,
-0.008966316,
-0.02844784,
0.021164771,
-0.03330297,
-0.012639106,
0.037983377,
-0.013894287,
0.029972676,
-0.03384708,
-0.008776539,
0.033346817,
-0.0061010243,
0.0051652323,
0.06805391,
0.046029896,
0.029034972,
-0.002959955,
-0.0037809198,
-0.030130504,
-0.008491404,
0.045628317,
-0.004553677,
-0.06380821,
0.041239917,
-0.039542254,
-0.028727125,
0.007622591,
-0.015135407,
0.007827911,
0.0017602865,
0.016166357,
0.032133713,
0.0048149712,
-0.030142028,
-0.03905762,
0.04570094,
0.021713454,
-0.01015308,
0.030249437,
0.04793632,
-0.024754873,
0.057805218,
0.0062296274,
0.064786054,
0.027312867,
0.017458709,
-0.020422962,
-0.033931006,
-0.055576656,
-0.0022137442,
0.02330331,
0.013868948,
0.015872952,
0.027338386,
-0.014782425,
0.004494493,
-0.01329081,
-0.016142018,
-0.05443725,
-0.06303216,
-0.036463458,
-0.073589996,
0.00017102716,
0.027406873,
0.047198333,
0.051058855,
-0.005883208,
-0.0058205356,
-0.043531097,
-0.073391624,
0.060281724,
-0.021565571,
0.0029200057,
0.019395538,
-0.017327337,
-0.0653435,
0.025828788,
0.00382072,
-0.025127921,
0.028973421,
0.046483908,
0.02353495,
0.051256366,
0.027777418,
-0.016367994,
-0.031594142,
-0.014125466,
-0.0515892,
0.028936012,
-0.016301127,
0.064760074,
-0.042705704,
-0.03665835,
0.0058707185,
-0.036659144,
-0.023149284,
-0.04758676,
-0.060163625,
0.054598432,
-0.00078254647,
-0.112735756,
-0.0008261282,
-0.013952264,
-0.040117852,
-0.0019322386,
0.008373793,
-0.037860926,
-0.015743056,
-0.0234362,
-0.06493749,
-0.069608204,
0.029697478,
0.0013986954,
0.0041609188,
0.018288933,
0.019073283,
-0.041577518,
-0.0357768,
-0.0021765458,
-0.010237743,
-0.028734086,
0.0041319,
-0.013383362,
0.00577167,
-0.0053505367,
-0.022350835,
0.01406836,
0.034614973,
0.036873527,
-0.04093488,
-0.03230344,
0.018228276,
0.0156018995,
0.024933772,
0.02783354,
-0.0080469055,
0.023191504,
0.041615404,
-0.04611942,
0.068785064,
0.0004912869,
-0.057737023,
-0.017378213,
0.015246827,
-0.0045711,
0.024566535,
0.018834211,
-0.013144151,
-0.039206583,
-0.009895874,
-0.031059353,
-0.016976817,
0.0449504,
0.0032223936,
-0.025907526,
-0.056929037,
-0.013011389,
0.021181583,
0.0106028635,
-0.012212557,
-0.024159467,
0.054833174,
-0.018079655,
-0.06036847,
-0.019181063,
-0.0036599508,
-0.04247008,
0.06736818,
-0.05656677,
0.00063564116,
-0.030859886,
0.022682272,
-0.041298434,
0.046203904,
-0.025341783,
0.035256788,
-0.03913067,
-0.025138376,
0.021381568,
0.020233907,
0.04396407,
-0.05447175,
0.056231752,
-0.08152801,
-0.046155322,
-0.107502006,
-0.008449785,
-0.051441476,
0.02187801,
0.07710222,
0.058793396,
0.037536267,
0.022781303,
-0.021965852,
-0.025323188,
0.01036808,
0.043830823,
-0.02973099,
0.03564364,
0.010773202,
-0.052458562,
0.054098483,
0.08024228,
0.06560271,
0.0001508493,
-0.020404926,
-0.0033358065,
0.059732165,
-0.00095160346,
-0.04169797,
-0.08884556,
-0.021227196,
0.02134743,
-0.043752395,
-8.042651e-05,
-0.0033908791,
0.04362836,
-0.019251144,
-0.0071159727,
-0.01190997,
-0.05915786,
0.03255786,
0.012339297,
0.036949337,
0.015805522,
0.014613892,
0.04628766,
0.043885946,
0.07332898,
-0.020451782,
-0.016520225,
-0.0020803884,
-0.01159851,
0.0426532,
0.008053762,
0.040212996,
-0.07245195,
0.020705638,
-0.02203555,
-0.024147796,
-0.005401511,
-0.0035201178,
0.014357559,
-0.011565124,
-0.06113777,
0.00073033513,
0.004304726,
0.03700348,
-0.02675051,
0.0020004935,
0.03970252,
0.04645308,
0.031940658,
0.011803997,
0.047087885,
-0.020772861,
-0.02010736,
-0.008094346,
-0.017589118,
-0.05531338,
-0.037902128,
0.026629327,
0.014163693,
-0.028866766,
0.08358291,
-0.011674367,
0.030306904,
-0.016541358,
-0.00535445,
0.010175458,
-0.009855767,
0.051110856,
0.0030403563,
-0.04535673,
-0.007742969,
-0.008183598,
-0.0282291,
-0.028479243,
-0.018404141,
0.06131364,
-0.036709666,
-0.016097328,
-0.031855233,
-0.029608333,
0.0516191,
-0.016996393,
-0.0043252064,
-0.018871896,
-0.011307787,
-0.010877992,
0.030488119,
0.010948365,
0.029610623,
-0.032166634,
-0.032359682,
-0.020506512,
0.0050876667,
-0.009433013,
0.019670308,
-0.011595458,
0.012013566,
0.03396051,
-0.037603952,
-0.0032240797,
0.03181483,
-0.02194272,
-0.02439024,
-0.015391741,
-0.0139405355,
0.08458335,
-0.03672542,
0.010359679,
-0.02451109,
0.03226403,
0.01353021,
-0.029357241,
-0.07104932,
0.0121810455,
-0.010132696
],
"index": 0,
"object": "embedding"
}
],
"model": "nomic-embed-text:137m-v1.5-fp16",
"object": "list",
"usage": {
"prompt_tokens": 6,
"total_tokens": 6
}
}
},
"is_streaming": false
}
}

View file

@ -22,7 +22,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:42:32.625862Z", "created_at": "2025-10-01T01:38:20.882299989Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -40,7 +40,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:42:32.668885Z", "created_at": "2025-10-01T01:38:21.078187004Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -58,7 +58,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:42:32.710947Z", "created_at": "2025-10-01T01:38:21.272715034Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -76,7 +76,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:42:32.752286Z", "created_at": "2025-10-01T01:38:21.469070891Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -94,7 +94,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:42:32.793309Z", "created_at": "2025-10-01T01:38:21.673266264Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -112,7 +112,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:42:32.834578Z", "created_at": "2025-10-01T01:38:21.873306711Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -130,7 +130,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:42:32.876536Z", "created_at": "2025-10-01T01:38:22.070968284Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -148,7 +148,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:42:32.918807Z", "created_at": "2025-10-01T01:38:22.269036335Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -166,7 +166,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:42:32.960101Z", "created_at": "2025-10-01T01:38:22.465488517Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -184,7 +184,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:42:33.00196Z", "created_at": "2025-10-01T01:38:22.658421677Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -202,7 +202,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:42:33.043876Z", "created_at": "2025-10-01T01:38:22.852187817Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -220,7 +220,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:42:33.08756Z", "created_at": "2025-10-01T01:38:23.049518191Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -238,15 +238,15 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:42:33.12966Z", "created_at": "2025-10-01T01:38:23.248955312Z",
"done": true, "done": true,
"done_reason": "stop", "done_reason": "stop",
"total_duration": 648814958, "total_duration": 4434138141,
"load_duration": 75300875, "load_duration": 43018186,
"prompt_eval_count": 408, "prompt_eval_count": 408,
"prompt_eval_duration": 66740291, "prompt_eval_duration": 2022594115,
"eval_count": 13, "eval_count": 13,
"eval_duration": 505313125, "eval_duration": 2367937192,
"response": "", "response": "",
"thinking": null, "thinking": null,
"context": null "context": null

View file

@ -22,7 +22,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:51.805591Z", "created_at": "2025-10-01T01:34:19.167396532Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -40,7 +40,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:51.850067Z", "created_at": "2025-10-01T01:34:19.362195218Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -58,7 +58,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:51.892443Z", "created_at": "2025-10-01T01:34:19.556896355Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -76,7 +76,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:51.934364Z", "created_at": "2025-10-01T01:34:19.752258848Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -94,7 +94,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:51.978382Z", "created_at": "2025-10-01T01:34:19.949688527Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -112,7 +112,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:52.019332Z", "created_at": "2025-10-01T01:34:20.145337065Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -130,7 +130,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:52.060708Z", "created_at": "2025-10-01T01:34:20.340739605Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -148,7 +148,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:52.102717Z", "created_at": "2025-10-01T01:34:20.539146761Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -166,7 +166,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:52.143996Z", "created_at": "2025-10-01T01:34:20.73590849Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -184,7 +184,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:52.185479Z", "created_at": "2025-10-01T01:34:20.930252877Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -202,7 +202,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:52.227562Z", "created_at": "2025-10-01T01:34:21.124432932Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -220,7 +220,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:52.270178Z", "created_at": "2025-10-01T01:34:21.332871735Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -238,7 +238,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:52.31151Z", "created_at": "2025-10-01T01:34:21.52851911Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -256,7 +256,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:52.35278Z", "created_at": "2025-10-01T01:34:21.724649778Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -274,7 +274,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:52.393954Z", "created_at": "2025-10-01T01:34:21.922353561Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -292,7 +292,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:52.435238Z", "created_at": "2025-10-01T01:34:22.117061137Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -310,7 +310,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:52.476197Z", "created_at": "2025-10-01T01:34:22.31230442Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -328,7 +328,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:52.517914Z", "created_at": "2025-10-01T01:34:22.506582272Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -346,15 +346,15 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:52.55904Z", "created_at": "2025-10-01T01:34:22.702819703Z",
"done": true, "done": true,
"done_reason": "stop", "done_reason": "stop",
"total_duration": 971882292, "total_duration": 6447413112,
"load_duration": 116634209, "load_duration": 45664730,
"prompt_eval_count": 376, "prompt_eval_count": 376,
"prompt_eval_duration": 99382958, "prompt_eval_duration": 2864046437,
"eval_count": 19, "eval_count": 19,
"eval_duration": 755260750, "eval_duration": 3537012183,
"response": "", "response": "",
"thinking": null, "thinking": null,
"context": null "context": null

View file

@ -1,221 +0,0 @@
{
"request": {
"method": "POST",
"url": "http://localhost:11434/api/generate",
"headers": {},
"body": {
"model": "llama3.2:3b-instruct-fp16",
"raw": true,
"prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant. You have access to functions, but you should only use them if they are required.\nYou are an expert in composing functions. You are given a question and a set of possible functions.\nBased on the question, you may or may not need to make one function/tool call to achieve the purpose.\n\nIf you decide to invoke any of the function(s), you MUST put it in the format of [func_name1(params_name1=params_value1, params_name2=params_value2...), func_name2(params)]\nIf you decide to invoke a function, you SHOULD NOT include any other text in the response. besides the function call in the above format.\nFor a boolean parameter, be sure to use `True` or `False` (capitalized) for the value.\n\n\nHere is a list of functions in JSON format that you can invoke.\n\n[\n {\n \"name\": \"get_weather\",\n \"description\": \"Get the current weather\",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"location\"],\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state (both required), e.g. San Francisco, CA.\"\n }\n }\n }\n }\n]\n\nYou can answer general questions or invoke tools when necessary.\nIn addition to tool calls, you should also augment your responses by using the tool outputs.\nPretend you are a weather assistant.\nYou MUST use one of the provided functions/tools to answer the user query.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhat's the weather like in San Francisco, CA?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n",
"options": {
"temperature": 0.0
},
"stream": true
},
"endpoint": "/api/generate",
"model": "llama3.2:3b-instruct-fp16"
},
"response": {
"body": [
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:36:20.465701Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "[",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:36:20.507671Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "get",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:36:20.549443Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "_weather",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:36:20.590803Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "(location",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:36:20.631683Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "=\"",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:36:20.672443Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "San",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:36:20.713329Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " Francisco",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:36:20.754254Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": ",",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:36:20.795119Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " CA",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:36:20.836145Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "\")]",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:36:20.877784Z",
"done": true,
"done_reason": "stop",
"total_duration": 612057417,
"load_duration": 97443583,
"prompt_eval_count": 341,
"prompt_eval_duration": 100914750,
"eval_count": 11,
"eval_duration": 413024250,
"response": "",
"thinking": null,
"context": null
}
}
],
"is_streaming": true
}
}

View file

@ -1,39 +0,0 @@
{
"request": {
"method": "POST",
"url": "http://localhost:11434/api/generate",
"headers": {},
"body": {
"model": "llama3.2:3b-instruct-fp16",
"raw": true,
"prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant. You have access to functions, but you should only use them if they are required.\nYou are an expert in composing functions. You are given a question and a set of possible functions.\nBased on the question, you may or may not need to make one function/tool call to achieve the purpose.\n\nIf you decide to invoke any of the function(s), you MUST put it in the format of [func_name1(params_name1=params_value1, params_name2=params_value2...), func_name2(params)]\nIf you decide to invoke a function, you SHOULD NOT include any other text in the response. besides the function call in the above format.\nFor a boolean parameter, be sure to use `True` or `False` (capitalized) for the value.\n\n\nHere is a list of functions in JSON format that you can invoke.\n\n[\n {\n \"name\": \"get_weather\",\n \"description\": \"Get the current weather\",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"location\"],\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state (both required), e.g. San Francisco, CA.\"\n }\n }\n }\n }\n]\n\nYou can answer general questions or invoke tools when necessary.\nIn addition to tool calls, you should also augment your responses by using the tool outputs.\nPretend you are a weather assistant.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhat's the weather like in San Francisco, CA?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n",
"options": {
"temperature": 0.0
},
"stream": false
},
"endpoint": "/api/generate",
"model": "llama3.2:3b-instruct-fp16"
},
"response": {
"body": {
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:36:19.594923Z",
"done": true,
"done_reason": "stop",
"total_duration": 988472417,
"load_duration": 117976625,
"prompt_eval_count": 326,
"prompt_eval_duration": 451625542,
"eval_count": 11,
"eval_duration": 418313417,
"response": "[get_weather(location=\"San Francisco, CA\")]",
"thinking": null,
"context": null
}
},
"is_streaming": false
}
}

View file

@ -0,0 +1,101 @@
{
"request": {
"method": "POST",
"url": "http://0.0.0.0:11434/v1/v1/chat/completions",
"headers": {},
"body": {
"model": "llama3.2:3b-instruct-fp16",
"messages": [
{
"role": "user",
"content": "what's the current time? You MUST call the `get_current_time` function to find out."
}
],
"response_format": {
"type": "text"
},
"stream": true,
"tools": [
{
"type": "function",
"function": {
"type": "function",
"name": "get_current_time",
"description": "Get the current time",
"parameters": {},
"strict": null
}
}
]
},
"endpoint": "/v1/chat/completions",
"model": "llama3.2:3b-instruct-fp16"
},
"response": {
"body": [
{
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": {
"id": "chatcmpl-188",
"choices": [
{
"delta": {
"content": "",
"function_call": null,
"refusal": null,
"role": "assistant",
"tool_calls": [
{
"index": 0,
"id": "call_bij0w4gk",
"function": {
"arguments": "{}",
"name": "get_current_time"
},
"type": "function"
}
]
},
"finish_reason": null,
"index": 0,
"logprobs": null
}
],
"created": 1759253831,
"model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion.chunk",
"service_tier": null,
"system_fingerprint": "fp_ollama",
"usage": null
}
},
{
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": {
"id": "chatcmpl-188",
"choices": [
{
"delta": {
"content": "",
"function_call": null,
"refusal": null,
"role": "assistant",
"tool_calls": null
},
"finish_reason": "tool_calls",
"index": 0,
"logprobs": null
}
],
"created": 1759253831,
"model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion.chunk",
"service_tier": null,
"system_fingerprint": "fp_ollama",
"usage": null
}
}
],
"is_streaming": true
}
}

View file

@ -1,221 +0,0 @@
{
"request": {
"method": "POST",
"url": "http://localhost:11434/api/generate",
"headers": {},
"body": {
"model": "llama3.2:3b-instruct-fp16",
"raw": true,
"prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant. You have access to functions, but you should only use them if they are required.\nYou are an expert in composing functions. You are given a question and a set of possible functions.\nBased on the question, you may or may not need to make one function/tool call to achieve the purpose.\n\nIf you decide to invoke any of the function(s), you MUST put it in the format of [func_name1(params_name1=params_value1, params_name2=params_value2...), func_name2(params)]\nIf you decide to invoke a function, you SHOULD NOT include any other text in the response. besides the function call in the above format.\nFor a boolean parameter, be sure to use `True` or `False` (capitalized) for the value.\n\n\nHere is a list of functions in JSON format that you can invoke.\n\n[\n {\n \"name\": \"get_weather\",\n \"description\": \"Get the current weather\",\n \"parameters\": {\n \"type\": \"dict\",\n \"required\": [\"location\"],\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The city and state (both required), e.g. San Francisco, CA.\"\n }\n }\n }\n }\n]\n\nYou can answer general questions or invoke tools when necessary.\nIn addition to tool calls, you should also augment your responses by using the tool outputs.\nPretend you are a weather assistant.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhat's the weather like in San Francisco, CA?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n",
"options": {
"temperature": 0.0
},
"stream": true
},
"endpoint": "/api/generate",
"model": "llama3.2:3b-instruct-fp16"
},
"response": {
"body": [
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:36:19.808372Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "[",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:36:19.84991Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "get",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:36:19.892111Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "_weather",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:36:19.933857Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "(location",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:36:19.975148Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "=\"",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:36:20.016641Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "San",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:36:20.058229Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " Francisco",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:36:20.100222Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": ",",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:36:20.143456Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " CA",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:36:20.184657Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "\")]",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:36:20.226017Z",
"done": true,
"done_reason": "stop",
"total_duration": 598395375,
"load_duration": 129432167,
"prompt_eval_count": 326,
"prompt_eval_duration": 50057334,
"eval_count": 11,
"eval_duration": 418284791,
"response": "",
"thinking": null,
"context": null
}
}
],
"is_streaming": true
}
}

View file

@ -20,15 +20,15 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama-guard3:1b", "model": "llama-guard3:1b",
"created_at": "2025-09-03T17:38:00.98692Z", "created_at": "2025-09-30T17:39:20.866577556Z",
"done": true, "done": true,
"done_reason": "stop", "done_reason": "stop",
"total_duration": 332473583, "total_duration": 4350589762,
"load_duration": 90611333, "load_duration": 53782244,
"prompt_eval_count": 317, "prompt_eval_count": 317,
"prompt_eval_duration": 229691000, "prompt_eval_duration": 4243686737,
"eval_count": 2, "eval_count": 2,
"eval_duration": 11571291, "eval_duration": 52523173,
"response": "safe", "response": "safe",
"thinking": null, "thinking": null,
"context": null "context": null

View file

@ -21,7 +21,7 @@
"body": { "body": {
"__type__": "openai.types.chat.chat_completion.ChatCompletion", "__type__": "openai.types.chat.chat_completion.ChatCompletion",
"__data__": { "__data__": {
"id": "chatcmpl-738", "id": "chatcmpl-819",
"choices": [ "choices": [
{ {
"finish_reason": "stop", "finish_reason": "stop",
@ -38,7 +38,7 @@
} }
} }
], ],
"created": 1759245079, "created": 1759282466,
"model": "llama-guard3:1b", "model": "llama-guard3:1b",
"object": "chat.completion", "object": "chat.completion",
"service_tier": null, "service_tier": null,

View file

@ -1,383 +0,0 @@
{
"request": {
"method": "POST",
"url": "http://localhost:11434/api/generate",
"headers": {},
"body": {
"model": "llama3.2:3b-instruct-fp16",
"raw": true,
"prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhat is the name of the US captial?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n",
"options": {
"temperature": 0.0
},
"stream": true
},
"endpoint": "/api/generate",
"model": "llama3.2:3b-instruct-fp16"
},
"response": {
"body": [
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:42:17.402486Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "The",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:42:17.444334Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " capital",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:42:17.484625Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " of",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:42:17.525063Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " the",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:42:17.565015Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " United",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:42:17.60499Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " States",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:42:17.64509Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " is",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:42:17.685566Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " Washington",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:42:17.725855Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": ",",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:42:17.766056Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " D",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:42:17.806415Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": ".C",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:42:17.847273Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": ".",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:42:17.888576Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " (",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:42:17.928952Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "short",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:42:17.969744Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " for",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:42:18.010869Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " District",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:42:18.051109Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " of",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:42:18.093266Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " Columbia",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:42:18.135749Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": ").",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:42:18.176649Z",
"done": true,
"done_reason": "stop",
"total_duration": 907420000,
"load_duration": 66756750,
"prompt_eval_count": 26,
"prompt_eval_duration": 62900875,
"eval_count": 20,
"eval_duration": 777306958,
"response": "",
"thinking": null,
"context": null
}
}
],
"is_streaming": true
}
}

View file

@ -22,7 +22,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:03.549266Z", "created_at": "2025-10-01T01:36:25.060343636Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -40,7 +40,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:03.592203Z", "created_at": "2025-10-01T01:36:25.261200569Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -58,7 +58,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:03.63417Z", "created_at": "2025-10-01T01:36:25.462791752Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -76,7 +76,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:03.677268Z", "created_at": "2025-10-01T01:36:25.660954264Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -94,7 +94,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:03.719768Z", "created_at": "2025-10-01T01:36:25.857710285Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -112,7 +112,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:03.762204Z", "created_at": "2025-10-01T01:36:26.055796043Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -130,7 +130,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:03.80404Z", "created_at": "2025-10-01T01:36:26.256947843Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -148,7 +148,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:03.845678Z", "created_at": "2025-10-01T01:36:26.454224889Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -166,7 +166,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:03.887086Z", "created_at": "2025-10-01T01:36:26.663146208Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -184,7 +184,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:03.928422Z", "created_at": "2025-10-01T01:36:26.878266227Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -202,7 +202,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:03.969641Z", "created_at": "2025-10-01T01:36:27.086618766Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -220,7 +220,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:04.011212Z", "created_at": "2025-10-01T01:36:27.28577576Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -238,15 +238,15 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:04.052626Z", "created_at": "2025-10-01T01:36:27.484586207Z",
"done": true, "done": true,
"done_reason": "stop", "done_reason": "stop",
"total_duration": 731936583, "total_duration": 4491434092,
"load_duration": 147334791, "load_duration": 44110434,
"prompt_eval_count": 417, "prompt_eval_count": 417,
"prompt_eval_duration": 79443792, "prompt_eval_duration": 2021505668,
"eval_count": 13, "eval_count": 13,
"eval_duration": 504352750, "eval_duration": 2425224707,
"response": "", "response": "",
"thinking": null, "thinking": null,
"context": null "context": null

View file

@ -22,7 +22,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:01.89965Z", "created_at": "2025-10-01T01:36:11.873171882Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -40,7 +40,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:01.941253Z", "created_at": "2025-10-01T01:36:12.073738984Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -58,7 +58,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:01.982621Z", "created_at": "2025-10-01T01:36:12.272476639Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -76,7 +76,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:02.024144Z", "created_at": "2025-10-01T01:36:12.469220325Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -94,7 +94,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:02.065495Z", "created_at": "2025-10-01T01:36:12.665965955Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -112,7 +112,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:02.107529Z", "created_at": "2025-10-01T01:36:12.860442987Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -130,7 +130,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:02.149217Z", "created_at": "2025-10-01T01:36:13.055440385Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -148,7 +148,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:02.190357Z", "created_at": "2025-10-01T01:36:13.25612888Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -166,7 +166,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:02.231501Z", "created_at": "2025-10-01T01:36:13.454322876Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -184,7 +184,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:02.272546Z", "created_at": "2025-10-01T01:36:13.651445403Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -202,7 +202,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:02.313561Z", "created_at": "2025-10-01T01:36:13.851107226Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -220,7 +220,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:02.354563Z", "created_at": "2025-10-01T01:36:14.048095911Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -238,7 +238,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:02.395585Z", "created_at": "2025-10-01T01:36:14.250994986Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -256,7 +256,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:02.436854Z", "created_at": "2025-10-01T01:36:14.454971706Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -274,7 +274,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:02.47814Z", "created_at": "2025-10-01T01:36:14.654349738Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -292,7 +292,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:02.519661Z", "created_at": "2025-10-01T01:36:14.851507509Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -310,7 +310,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:02.561119Z", "created_at": "2025-10-01T01:36:15.044987002Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -328,7 +328,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:02.602821Z", "created_at": "2025-10-01T01:36:15.246563515Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -346,15 +346,15 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:38:02.644633Z", "created_at": "2025-10-01T01:36:15.447689838Z",
"done": true, "done": true,
"done_reason": "stop", "done_reason": "stop",
"total_duration": 1375629459, "total_duration": 35945660492,
"load_duration": 94090250, "load_duration": 42881569,
"prompt_eval_count": 386, "prompt_eval_count": 386,
"prompt_eval_duration": 535119167, "prompt_eval_duration": 32326727198,
"eval_count": 19, "eval_count": 19,
"eval_duration": 745684041, "eval_duration": 3575452190,
"response": "", "response": "",
"thinking": null, "thinking": null,
"context": null "context": null

File diff suppressed because it is too large Load diff

View file

@ -1,39 +0,0 @@
{
"request": {
"method": "POST",
"url": "http://localhost:11434/api/generate",
"headers": {},
"body": {
"model": "llama3.2:3b-instruct-fp16",
"raw": true,
"prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhich planet has rings around it with a name starting with letter S?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n",
"options": {
"temperature": 0.0
},
"stream": false
},
"endpoint": "/api/generate",
"model": "llama3.2:3b-instruct-fp16"
},
"response": {
"body": {
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:42:17.227488Z",
"done": true,
"done_reason": "stop",
"total_duration": 3003964916,
"load_duration": 111221916,
"prompt_eval_count": 30,
"prompt_eval_duration": 72578583,
"eval_count": 70,
"eval_duration": 2819555375,
"response": "The answer is Saturn! Saturn's ring system is one of the most iconic and well-known in our solar system. The rings are made up of ice particles, rock debris, and dust that orbit around the planet due to its gravitational pull.\n\nWould you like to know more about Saturn's rings or is there something else I can help you with?",
"thinking": null,
"context": null
}
},
"is_streaming": false
}
}

View file

@ -1,203 +0,0 @@
{
"request": {
"method": "POST",
"url": "http://localhost:11434/api/generate",
"headers": {},
"body": {
"model": "llama3.2:3b-instruct-fp16",
"raw": true,
"prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nGive me a sentence that contains the word: hello<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n",
"options": {
"temperature": 0.0
},
"stream": true
},
"endpoint": "/api/generate",
"model": "llama3.2:3b-instruct-fp16"
},
"response": {
"body": [
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-08-18T19:47:58.267146Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "Hello",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-08-18T19:47:58.309006Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": ",",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-08-18T19:47:58.351179Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " how",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-08-18T19:47:58.393262Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " can",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-08-18T19:47:58.436079Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " I",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-08-18T19:47:58.478393Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " assist",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-08-18T19:47:58.520608Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " you",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-08-18T19:47:58.562885Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": " today",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-08-18T19:47:58.604683Z",
"done": false,
"done_reason": null,
"total_duration": null,
"load_duration": null,
"prompt_eval_count": null,
"prompt_eval_duration": null,
"eval_count": null,
"eval_duration": null,
"response": "?",
"thinking": null,
"context": null
}
},
{
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-08-18T19:47:58.646586Z",
"done": true,
"done_reason": "stop",
"total_duration": 1011323917,
"load_duration": 76575458,
"prompt_eval_count": 31,
"prompt_eval_duration": 553259250,
"eval_count": 10,
"eval_duration": 380302792,
"response": "",
"thinking": null,
"context": null
}
}
],
"is_streaming": true
}
}

View file

@ -1,39 +0,0 @@
{
"request": {
"method": "POST",
"url": "http://localhost:11434/api/generate",
"headers": {},
"body": {
"model": "llama3.2:3b-instruct-fp16",
"raw": true,
"prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhat is the smallest country in the world?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n",
"options": {
"temperature": 0.0
},
"stream": false
},
"endpoint": "/api/generate",
"model": "llama3.2:3b-instruct-fp16"
},
"response": {
"body": {
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:39:54.374714Z",
"done": true,
"done_reason": "stop",
"total_duration": 6321793333,
"load_duration": 182255958,
"prompt_eval_count": 25,
"prompt_eval_duration": 67964459,
"eval_count": 150,
"eval_duration": 6070867875,
"response": "The smallest country in the world is the Vatican City, which has a total area of approximately 0.44 km\u00b2 (0.17 sq mi). It is an independent city-state located within Rome, Italy, and is home to the Pope and the central government of the Catholic Church.\n\nTo put that into perspective, the Vatican City is smaller than a golf course! Despite its tiny size, it has its own government, currency, postal system, and even its own police force. It's also home to numerous iconic landmarks like St. Peter's Basilica and the Sistine Chapel.\n\nInterestingly, the Vatican City is not only the smallest country in the world but also the most densely populated, with a population of just over 800 people!",
"thinking": null,
"context": null
}
},
"is_streaming": false
}
}

View file

@ -41,7 +41,7 @@
{ {
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": { "__data__": {
"id": "chatcmpl-116", "id": "chatcmpl-72",
"choices": [ "choices": [
{ {
"delta": { "delta": {
@ -52,7 +52,7 @@
"tool_calls": [ "tool_calls": [
{ {
"index": 0, "index": 0,
"id": "call_0c2qffvv", "id": "call_aone7ocw",
"function": { "function": {
"arguments": "{\"city\":\"Tokyo\"}", "arguments": "{\"city\":\"Tokyo\"}",
"name": "get_weather" "name": "get_weather"
@ -66,7 +66,7 @@
"logprobs": null "logprobs": null
} }
], ],
"created": 1759267492, "created": 1759282724,
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion.chunk", "object": "chat.completion.chunk",
"service_tier": null, "service_tier": null,
@ -77,7 +77,7 @@
{ {
"__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk", "__type__": "openai.types.chat.chat_completion_chunk.ChatCompletionChunk",
"__data__": { "__data__": {
"id": "chatcmpl-116", "id": "chatcmpl-72",
"choices": [ "choices": [
{ {
"delta": { "delta": {
@ -87,12 +87,12 @@
"role": "assistant", "role": "assistant",
"tool_calls": null "tool_calls": null
}, },
"finish_reason": "stop", "finish_reason": "tool_calls",
"index": 0, "index": 0,
"logprobs": null "logprobs": null
} }
], ],
"created": 1759267492, "created": 1759282724,
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"object": "chat.completion.chunk", "object": "chat.completion.chunk",
"service_tier": null, "service_tier": null,

View file

@ -1,64 +0,0 @@
{
"request": {
"method": "POST",
"url": "http://localhost:11434/api/generate",
"headers": {},
"body": {
"model": "llama3.2:3b-instruct-fp16",
"prompt": "<|begin_of_text|>Michael Jordan was born in 1963. He played basketball for the Chicago Bulls. He retired in 2003.Please respond in JSON format with the schema: {\"properties\": {\"name\": {\"title\": \"Name\", \"type\": \"string\"}, \"year_born\": {\"title\": \"Year Born\", \"type\": \"string\"}, \"year_retired\": {\"title\": \"Year Retired\", \"type\": \"string\"}}, \"required\": [\"name\", \"year_born\", \"year_retired\"], \"title\": \"AnswerFormat\", \"type\": \"object\"}",
"raw": true,
"format": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"year_born": {
"title": "Year Born",
"type": "string"
},
"year_retired": {
"title": "Year Retired",
"type": "string"
}
},
"required": [
"name",
"year_born",
"year_retired"
],
"title": "AnswerFormat",
"type": "object"
},
"options": {
"temperature": 0.0,
"max_tokens": 50,
"num_predict": 50
},
"stream": false
},
"endpoint": "/api/generate",
"model": "llama3.2:3b-instruct-fp16"
},
"response": {
"body": {
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:36:17.508028Z",
"done": true,
"done_reason": "stop",
"total_duration": 1529591917,
"load_duration": 84990667,
"prompt_eval_count": 119,
"prompt_eval_duration": 189045583,
"eval_count": 29,
"eval_duration": 1254813583,
"response": "{ \"name\": \"Michael Jordan\", \"year_born\": \"1963\", \"year_retired\": \"2003\"}\n ",
"thinking": null,
"context": null
}
},
"is_streaming": false
}
}

View file

@ -21,7 +21,7 @@
"body": { "body": {
"__type__": "openai.types.chat.chat_completion.ChatCompletion", "__type__": "openai.types.chat.chat_completion.ChatCompletion",
"__data__": { "__data__": {
"id": "chatcmpl-236", "id": "chatcmpl-737",
"choices": [ "choices": [
{ {
"finish_reason": "stop", "finish_reason": "stop",
@ -38,7 +38,7 @@
} }
} }
], ],
"created": 1759247859, "created": 1759282582,
"model": "llama-guard3:1b", "model": "llama-guard3:1b",
"object": "chat.completion", "object": "chat.completion",
"service_tier": null, "service_tier": null,

View file

@ -22,7 +22,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:48.840898Z", "created_at": "2025-10-01T01:33:52.93635761Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -40,7 +40,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:48.883619Z", "created_at": "2025-10-01T01:33:53.133195005Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -58,7 +58,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:48.92504Z", "created_at": "2025-10-01T01:33:53.332277092Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -76,7 +76,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:48.966274Z", "created_at": "2025-10-01T01:33:53.529012616Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -94,7 +94,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:49.007525Z", "created_at": "2025-10-01T01:33:53.724651797Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -112,7 +112,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:49.049125Z", "created_at": "2025-10-01T01:33:53.923248219Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -130,7 +130,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:49.090893Z", "created_at": "2025-10-01T01:33:54.117881107Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -148,7 +148,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:49.132101Z", "created_at": "2025-10-01T01:33:54.311986552Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -166,7 +166,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:49.17401Z", "created_at": "2025-10-01T01:33:54.505749874Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -184,7 +184,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:49.216115Z", "created_at": "2025-10-01T01:33:54.699245098Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -202,7 +202,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:49.257109Z", "created_at": "2025-10-01T01:33:54.890029079Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -220,7 +220,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:49.298731Z", "created_at": "2025-10-01T01:33:55.081182058Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -238,7 +238,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:49.338833Z", "created_at": "2025-10-01T01:33:55.27115012Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -256,7 +256,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:49.38053Z", "created_at": "2025-10-01T01:33:55.46403171Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -274,7 +274,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:49.421378Z", "created_at": "2025-10-01T01:33:55.655042212Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -292,7 +292,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:49.462646Z", "created_at": "2025-10-01T01:33:55.844320935Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -310,7 +310,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:49.503814Z", "created_at": "2025-10-01T01:33:56.035465828Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -328,7 +328,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:49.545397Z", "created_at": "2025-10-01T01:33:56.240155299Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -346,15 +346,15 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:49.586834Z", "created_at": "2025-10-01T01:33:56.432393304Z",
"done": true, "done": true,
"done_reason": "stop", "done_reason": "stop",
"total_duration": 1409239209, "total_duration": 34185152900,
"load_duration": 118889250, "load_duration": 44303323,
"prompt_eval_count": 368, "prompt_eval_count": 368,
"prompt_eval_duration": 543077166, "prompt_eval_duration": 30642631331,
"eval_count": 19, "eval_count": 19,
"eval_duration": 746733584, "eval_duration": 3497664639,
"response": "", "response": "",
"thinking": null, "thinking": null,
"context": null "context": null

View file

@ -1,39 +0,0 @@
{
"request": {
"method": "POST",
"url": "http://localhost:11434/api/generate",
"headers": {},
"body": {
"model": "llama3.2:3b-instruct-fp16",
"raw": true,
"prompt": "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nTest metrics generation 0<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n",
"options": {
"temperature": 0.0
},
"stream": false
},
"endpoint": "/api/generate",
"model": "llama3.2:3b-instruct-fp16"
},
"response": {
"body": {
"__type__": "ollama._types.GenerateResponse",
"__data__": {
"model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-08-11T15:51:12.918723Z",
"done": true,
"done_reason": "stop",
"total_duration": 8868987792,
"load_duration": 2793275292,
"prompt_eval_count": 21,
"prompt_eval_duration": 250000000,
"eval_count": 344,
"eval_duration": 5823000000,
"response": "Here are some common test metrics used to evaluate the performance of a system:\n\n1. **Accuracy**: The proportion of correct predictions or classifications out of total predictions made.\n2. **Precision**: The ratio of true positives (correctly predicted instances) to the sum of true positives and false positives (incorrectly predicted instances).\n3. **Recall**: The ratio of true positives to the sum of true positives and false negatives (missed instances).\n4. **F1-score**: The harmonic mean of precision and recall, providing a balanced measure of both.\n5. **Mean Squared Error (MSE)**: The average squared difference between predicted and actual values.\n6. **Mean Absolute Error (MAE)**: The average absolute difference between predicted and actual values.\n7. **Root Mean Squared Percentage Error (RMSPE)**: The square root of the mean of the squared percentage differences between predicted and actual values.\n8. **Coefficient of Determination (R-squared, R2)**: Measures how well a model fits the data, with higher values indicating better fit.\n9. **Mean Absolute Percentage Error (MAPE)**: The average absolute percentage difference between predicted and actual values.\n10. **Normalized Mean Squared Error (NMSE)**: Similar to MSE, but normalized by the mean of the actual values.\n\nThese metrics can be used for various types of data, including:\n\n* Regression problems (e.g., predicting continuous values)\n* Classification problems (e.g., predicting categorical labels)\n* Time series forecasting\n* Clustering and dimensionality reduction\n\nWhen choosing a metric, consider the specific problem you're trying to solve, the type of data, and the desired level of precision.",
"thinking": null,
"context": null
}
},
"is_streaming": false
}
}

View file

@ -22,7 +22,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:46.708948Z", "created_at": "2025-10-01T01:33:10.76700718Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -40,7 +40,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:46.749031Z", "created_at": "2025-10-01T01:33:10.956949035Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -58,7 +58,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:46.790192Z", "created_at": "2025-10-01T01:33:11.147886127Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -76,7 +76,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:46.831093Z", "created_at": "2025-10-01T01:33:11.337832912Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -94,7 +94,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:46.873135Z", "created_at": "2025-10-01T01:33:11.524017554Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -112,7 +112,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:46.91375Z", "created_at": "2025-10-01T01:33:11.712703934Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -130,7 +130,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:46.95439Z", "created_at": "2025-10-01T01:33:11.903877596Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -148,7 +148,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:46.995224Z", "created_at": "2025-10-01T01:33:12.095535165Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -166,7 +166,7 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:47.035887Z", "created_at": "2025-10-01T01:33:12.291614477Z",
"done": false, "done": false,
"done_reason": null, "done_reason": null,
"total_duration": null, "total_duration": null,
@ -184,15 +184,15 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama3.2:3b-instruct-fp16", "model": "llama3.2:3b-instruct-fp16",
"created_at": "2025-09-03T17:37:47.076806Z", "created_at": "2025-10-01T01:33:12.483844314Z",
"done": true, "done": true,
"done_reason": "stop", "done_reason": "stop",
"total_duration": 2069654958, "total_duration": 4303509972,
"load_duration": 177579833, "load_duration": 44748689,
"prompt_eval_count": 31, "prompt_eval_count": 31,
"prompt_eval_duration": 1521851250, "prompt_eval_duration": 2539513749,
"eval_count": 10, "eval_count": 10,
"eval_duration": 369478042, "eval_duration": 1718623697,
"response": "", "response": "",
"thinking": null, "thinking": null,
"context": null "context": null

View file

@ -20,15 +20,15 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama-guard3:1b", "model": "llama-guard3:1b",
"created_at": "2025-09-03T17:37:51.562847Z", "created_at": "2025-09-30T17:38:19.258171865Z",
"done": true, "done": true,
"done_reason": "stop", "done_reason": "stop",
"total_duration": 272296250, "total_duration": 2789705003,
"load_duration": 131747125, "load_duration": 60163509,
"prompt_eval_count": 214, "prompt_eval_count": 214,
"prompt_eval_duration": 124006709, "prompt_eval_duration": 2677292181,
"eval_count": 2, "eval_count": 2,
"eval_duration": 15572291, "eval_duration": 51690110,
"response": "safe", "response": "safe",
"thinking": null, "thinking": null,
"context": null "context": null

View file

@ -20,15 +20,15 @@
"__type__": "ollama._types.GenerateResponse", "__type__": "ollama._types.GenerateResponse",
"__data__": { "__data__": {
"model": "llama-guard3:1b", "model": "llama-guard3:1b",
"created_at": "2025-09-03T17:37:47.871962Z", "created_at": "2025-09-30T17:37:27.310573231Z",
"done": true, "done": true,
"done_reason": "stop", "done_reason": "stop",
"total_duration": 301629042, "total_duration": 3251121805,
"load_duration": 102832917, "load_duration": 47089617,
"prompt_eval_count": 233, "prompt_eval_count": 233,
"prompt_eval_duration": 154806625, "prompt_eval_duration": 3006835928,
"eval_count": 5, "eval_count": 5,
"eval_duration": 43361542, "eval_duration": 196620033,
"response": "unsafe\nS1", "response": "unsafe\nS1",
"thinking": null, "thinking": null,
"context": null "context": null

View file

@ -21,7 +21,7 @@
"body": { "body": {
"__type__": "openai.types.chat.chat_completion.ChatCompletion", "__type__": "openai.types.chat.chat_completion.ChatCompletion",
"__data__": { "__data__": {
"id": "chatcmpl-344", "id": "chatcmpl-165",
"choices": [ "choices": [
{ {
"finish_reason": "stop", "finish_reason": "stop",
@ -38,7 +38,7 @@
} }
} }
], ],
"created": 1759247858, "created": 1759282579,
"model": "llama-guard3:1b", "model": "llama-guard3:1b",
"object": "chat.completion", "object": "chat.completion",
"service_tier": null, "service_tier": null,

View file

@ -21,7 +21,7 @@
"body": { "body": {
"__type__": "openai.types.chat.chat_completion.ChatCompletion", "__type__": "openai.types.chat.chat_completion.ChatCompletion",
"__data__": { "__data__": {
"id": "chatcmpl-119", "id": "chatcmpl-609",
"choices": [ "choices": [
{ {
"finish_reason": "stop", "finish_reason": "stop",
@ -38,7 +38,7 @@
} }
} }
], ],
"created": 1759245069, "created": 1759282388,
"model": "llama-guard3:1b", "model": "llama-guard3:1b",
"object": "chat.completion", "object": "chat.completion",
"service_tier": null, "service_tier": null,

View file

@ -0,0 +1,806 @@
{
"request": {
"method": "POST",
"url": "http://0.0.0.0:11434/v1/v1/embeddings",
"headers": {},
"body": {
"model": "nomic-embed-text:137m-v1.5-fp16",
"input": [
"Why are data structures important?"
],
"encoding_format": "float"
},
"endpoint": "/v1/embeddings",
"model": "nomic-embed-text:137m-v1.5-fp16"
},
"response": {
"body": {
"__type__": "openai.types.create_embedding_response.CreateEmbeddingResponse",
"__data__": {
"data": [
{
"embedding": [
-0.0055067283,
0.0691788,
-0.12835562,
-0.054449122,
0.056506466,
0.008154408,
0.016579939,
-0.005861886,
-0.053147435,
-0.06689316,
-0.0125774965,
0.012131817,
0.10522907,
-0.022567436,
-0.010184469,
0.0047555137,
-0.09560516,
-0.02869415,
0.005823712,
0.026181953,
-0.050526746,
-0.019493021,
0.012390013,
0.014383491,
0.026209505,
0.061908394,
0.03508825,
-0.06008353,
-0.024454756,
0.060678,
0.06708033,
-0.0022188132,
0.034376595,
-0.03279394,
-0.06730504,
-0.07369063,
-0.037954886,
0.041736037,
-0.0022857673,
-0.036154196,
-0.0043730233,
0.02660196,
-0.043143313,
-0.016130125,
0.056613196,
0.0035527975,
-0.017358474,
-0.06225926,
0.063272394,
-0.025721373,
0.045175213,
-0.033949595,
0.009468214,
0.0092460355,
0.08431274,
0.01425319,
0.011694144,
0.031544022,
0.034130182,
-0.076243795,
0.068438105,
0.11499481,
-0.059728492,
0.02415792,
0.008430943,
-0.04239523,
-0.045541644,
0.0042671585,
-0.022412328,
-0.016552199,
0.038433194,
0.035031006,
0.01044125,
-0.035626266,
-0.018012544,
0.019699976,
-0.0018288917,
0.032518297,
-0.0177986,
0.042808123,
0.022334872,
-0.014575339,
0.051781073,
-0.026092554,
0.006079152,
0.02757349,
0.019296495,
-0.00514512,
0.00082866545,
0.06785129,
0.018279642,
-0.054320488,
0.03349167,
0.048226908,
-0.07671358,
0.028916309,
-0.0010493343,
0.02221549,
0.016000975,
0.01223793,
-0.017005093,
-0.033222955,
-0.0055971234,
0.03769521,
-0.008500556,
-0.0026479687,
0.018203754,
0.040224712,
-0.021299101,
-0.019668331,
-0.011704243,
0.07116387,
-0.03220624,
0.0041646096,
-0.012268384,
-0.007227694,
0.057473723,
-0.07691696,
-0.06090154,
-0.032882772,
-0.024933215,
-0.030841816,
0.063512295,
0.050505444,
-0.009545097,
-0.019137407,
-0.014251317,
0.035820402,
0.025301578,
-0.032520078,
-0.023825355,
-0.02894602,
-0.072710305,
0.003224811,
0.02377651,
0.027730972,
-0.07713202,
-0.0330053,
0.05449727,
0.044401404,
-0.006475545,
0.047970258,
-0.057762735,
-0.033274963,
0.018484,
-0.004733799,
0.048722517,
-0.015905516,
-0.012622708,
-0.04765113,
0.013506974,
0.044848952,
-0.0065122605,
0.0021293245,
0.0020283123,
-0.018023405,
0.025206288,
-0.021057727,
0.01721119,
0.029168243,
0.07257681,
0.022936262,
-0.011233473,
0.015861422,
-0.019733926,
-0.05565718,
0.026574634,
-0.007964335,
-0.00105196,
0.012244276,
-0.010458468,
0.00025068677,
0.029596092,
-0.02004873,
0.03952663,
-0.036656335,
0.016609907,
-0.050120637,
0.11185912,
-0.050909996,
-0.048775107,
-0.020030547,
0.0153389415,
0.0011901723,
-0.038483646,
0.02004873,
0.017939426,
-0.017415283,
-0.03634165,
-0.02609482,
0.021946523,
0.02326441,
-0.052063353,
-0.0030024708,
-0.008184734,
-0.011170216,
-0.008318481,
0.040304467,
0.019288791,
7.0962094e-05,
-0.047486935,
-0.019311698,
-0.04947344,
0.026369695,
-0.057666145,
0.034645956,
-0.050079547,
0.035380702,
-0.015542651,
-0.024575872,
0.07835102,
-0.025289344,
0.005440495,
0.015665129,
-0.01966988,
-0.07520282,
-0.02425893,
-0.047322523,
-0.020614233,
0.038350448,
-0.026481356,
-0.040539965,
0.0661944,
0.02502757,
-0.010155566,
-0.035468638,
-0.01562628,
-0.04135564,
-0.031548798,
-0.049242284,
-0.04551279,
-0.036385354,
0.035608906,
0.021134995,
0.018818628,
0.043228216,
0.042133935,
-0.015709238,
0.06552171,
-0.0044355174,
0.0021416203,
0.021100294,
-0.009039295,
0.00014870724,
0.040932197,
0.017849974,
-0.019864114,
-0.047478165,
-0.05676394,
0.049951475,
-0.048136313,
-0.017876703,
0.012142189,
0.02373712,
0.0334763,
-0.035479926,
-0.012235951,
-0.030320909,
0.021752922,
0.03523251,
0.04498809,
-0.03067527,
-0.020974364,
-0.046126693,
-0.03995082,
0.012467275,
0.022052003,
-0.018320043,
0.0013203244,
-0.004935072,
0.0050206785,
-0.0047598844,
0.011211644,
0.039831202,
0.027249418,
0.014987716,
-0.01940106,
-0.009642856,
-0.07113845,
0.054759383,
-0.018858217,
-0.024562797,
-0.08670976,
-0.004677105,
-9.054924e-05,
0.051185664,
0.01569594,
0.053627595,
0.0003285345,
0.027126677,
0.033433437,
0.033166908,
-0.023327576,
0.060068127,
0.08517537,
-0.039610267,
0.028960181,
0.027604481,
0.0029389325,
-0.076566145,
-0.0273395,
0.08770552,
0.05686777,
0.01246495,
-0.016718954,
0.010576854,
0.018693427,
-0.026167914,
-0.0641247,
0.00813129,
-0.008773337,
-0.010244281,
0.0024596818,
0.027441284,
-0.03914519,
0.03687808,
0.0073220856,
0.02342061,
0.0123781385,
-0.0035178016,
0.0015435648,
-0.029216826,
-0.031155663,
-0.073616505,
0.009858675,
0.06776608,
-0.015782345,
0.023255533,
-0.014765486,
-0.019421978,
0.050556473,
-0.03567379,
0.015625134,
-0.027594624,
-0.07591481,
0.025782052,
-0.0038178826,
-0.011459214,
-0.015950324,
0.0015048053,
-0.016965888,
-0.025626767,
-0.009411103,
-0.043649834,
0.010833025,
0.029808043,
-0.036940675,
-0.040114816,
0.034165625,
-0.014691349,
-0.059829887,
0.016475074,
-0.018302068,
0.00890752,
-0.018081741,
0.015727276,
0.017466683,
0.011933743,
-0.028065827,
0.0052258503,
0.0062493044,
0.0044333255,
-0.011237428,
-0.0069862586,
-0.033975184,
0.023760261,
-0.015055696,
0.0039600013,
0.020392103,
0.024047762,
-0.02872406,
0.007738409,
-0.01555987,
0.03011806,
0.040093675,
-0.0033892216,
-0.06931259,
-0.019519035,
-0.008750149,
0.04236017,
0.059455607,
-0.007929568,
-0.008857907,
-0.041450884,
0.029837137,
-0.0729099,
0.005836722,
-0.004100339,
-0.0029754906,
0.01634229,
-0.029647883,
-0.050842095,
-0.029163536,
0.009248952,
-0.0028640334,
-0.052900236,
-0.05512097,
0.055659927,
0.04992974,
-0.004757618,
-0.036179878,
-0.07280319,
-0.03567622,
-0.044285037,
-0.008555347,
0.04550832,
-0.00094304525,
-0.0656589,
-0.030906383,
-0.023528634,
0.004441927,
0.025694514,
0.0041591898,
-0.035672203,
-0.02444802,
0.013817473,
0.01189618,
0.0062793735,
0.0036719819,
0.014963965,
0.053757705,
0.06549391,
0.042496137,
0.010899155,
0.043035947,
0.032150052,
0.09407309,
0.024764558,
-0.011964197,
-0.048119746,
0.008351835,
0.06145398,
0.019204808,
-0.0030630424,
-0.06240826,
0.03536538,
0.018408166,
0.06362795,
-0.07275413,
0.068704925,
0.014603027,
-0.06760976,
-0.0031986972,
0.010279434,
0.03215372,
0.06905764,
-0.023212021,
-0.022716299,
-0.072324574,
0.08606839,
0.012951449,
0.021978272,
0.031508896,
-0.0057483097,
0.09630234,
-0.0063684364,
-0.012098242,
-0.03970645,
0.028056627,
0.087799124,
-0.03352194,
-0.016433993,
-0.046286825,
0.016221909,
0.009365449,
-0.053078208,
0.0009465837,
-0.048553433,
0.04233797,
0.042736158,
-0.022603348,
0.027159866,
0.0115378685,
-0.04380032,
0.0344026,
0.0620608,
-0.04509567,
-0.025683708,
0.052748833,
0.045589417,
-0.02661964,
-0.011906934,
-0.022709992,
-0.021741541,
0.030429155,
0.025474131,
-0.03997484,
-0.01695355,
0.039500427,
0.0066278055,
0.017997347,
-0.010868054,
0.034119062,
0.0492591,
-0.025168648,
-0.03258354,
0.017921297,
0.002936628,
-0.016890781,
-0.01574124,
0.0097997,
0.0144984145,
-0.0050222855,
-0.03178876,
-0.010070219,
0.0038994572,
0.082671225,
-0.064686015,
-0.0023998383,
-0.0709133,
-0.012587475,
0.004713978,
-0.008365287,
0.04570752,
0.019821582,
-0.045601755,
0.005780342,
0.023135826,
-0.03841521,
-0.014287952,
-0.040951498,
0.001222165,
-0.0015837784,
0.008921765,
-0.021013433,
0.029224606,
0.018224735,
-0.038594235,
-0.0011877345,
0.03056137,
0.045560293,
0.03386976,
-0.08028984,
-0.02174568,
0.010873439,
-0.02909561,
-0.028367657,
0.06934649,
0.03567452,
0.045095395,
0.017239548,
0.025105212,
-0.047474947,
0.027460333,
0.01906143,
-0.059046946,
0.011000827,
-0.030548505,
-0.00993384,
-0.047402643,
-0.03227493,
0.01925817,
-0.024694432,
-0.017810628,
-0.0051988256,
-0.046833005,
0.011399863,
-0.009450567,
-0.013994235,
-0.029993635,
0.03204231,
0.055144217,
0.02970146,
0.05029242,
0.04417347,
0.019293677,
0.011820924,
0.021562446,
0.025712157,
0.026714647,
0.015479491,
-0.029627334,
0.013564938,
0.022211872,
0.0008475917,
0.02283723,
-0.0019577122,
-0.028588077,
-0.032387972,
-0.047514796,
0.016408252,
-0.024263887,
0.04294992,
0.0058976035,
0.04238604,
-0.0014817569,
-0.008880384,
-0.01518041,
0.039314184,
-0.034863494,
-0.031348925,
0.02491094,
0.023272267,
-0.01213154,
-0.0029186436,
0.009363544,
-0.020474007,
0.022881426,
0.011876272,
-0.099849775,
0.04103065,
0.036249414,
0.018814126,
0.011653004,
0.01733942,
0.038440976,
0.031077309,
-0.023530783,
-0.060318835,
-0.01800236,
0.040951062,
-0.015199813,
-0.048856284,
0.007818538,
0.0192296,
-0.046680138,
4.1682793e-05,
-0.01107478,
0.033890743,
-0.036434487,
0.013583908,
-0.056057207,
0.015355855,
-0.0056020026,
0.027543671,
0.006491281,
-0.062176593,
-0.0027985624,
0.0154205365,
0.05427184,
-0.042704068,
0.08902915,
-0.0867114,
0.011701053,
-0.031208558,
0.0035119688,
0.020856252,
0.029149834,
-0.013294537,
0.006884604,
-0.004071396,
-0.016199552,
0.0140966065,
0.034344625,
0.044646475,
-0.014534568,
0.06434988,
0.057418663,
0.054409288,
-0.032788362,
0.025831478,
0.053699754,
0.01104724,
-0.013593943,
0.021206772,
-0.057033155,
0.002879689,
-0.02299407,
-0.025942653,
-0.01795699,
-0.0005103142,
0.009943925,
-0.0111974655,
-0.043488014,
0.02352647,
-0.00085910445,
0.036153458,
0.008397858,
-0.0125623,
0.045501575,
0.017022615,
0.02164789,
0.044366788,
-0.05922759,
0.06606177,
0.032538608,
0.015617672,
-0.05665216,
-0.048967004,
-0.008281686,
0.03639404,
0.013526518,
0.048029386,
-0.0032675986,
-0.02734557,
0.034290742,
-0.010661151,
-0.044663135,
-0.010002009,
-0.023236647,
-0.009099468,
-0.050651174,
-0.01877344,
-0.057528064,
-0.006980231,
0.020679744,
0.00032431784,
0.004773796,
0.0069069746,
0.016760433,
0.008305804,
-0.028032228,
0.024984887,
0.015810564,
0.028754044,
0.013413702,
0.04405434,
0.006831175,
-0.013154476,
0.025184985,
0.020763578,
-0.027210625,
0.047467683,
0.012808554,
0.019128239,
-0.006344172,
-0.0012825177,
-0.04123715,
-0.070471205,
0.026458906,
0.011127495,
-0.053800732,
-0.042026933,
0.014701638,
-0.009170802,
0.010387788,
0.014916444,
0.0058068377,
0.014975564,
0.0056835464,
-0.049073413,
-0.022337116,
-0.021429205,
0.011414711,
-0.059687294,
0.026811803,
-0.033584774,
0.03430464,
-0.061727095,
-0.002469326,
-0.025580805,
0.042926375,
-0.022121925,
0.0075072222,
-0.025951052,
-0.032126367,
-0.016206766,
0.05476613,
0.027255341,
0.017624483,
-0.053568747,
-0.009815464,
-0.021195231,
0.01143239,
-0.055088513,
0.05115604,
-0.020695584,
0.016151866,
0.09019919,
0.035570264,
0.027598873,
0.0329581,
0.051568285,
0.030362109,
-0.009580888,
-0.0100544235,
-0.024147386,
0.0180904
],
"index": 0,
"object": "embedding"
}
],
"model": "nomic-embed-text:137m-v1.5-fp16",
"object": "list",
"usage": {
"prompt_tokens": 6,
"total_tokens": 6
}
}
},
"is_streaming": false
}
}

Some files were not shown because too many files have changed in this diff Show more