mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-04 04:04:14 +00:00
Merge remote-tracking branch 'origin/main' into migrate-eval-to-openai
This commit is contained in:
commit
1222657626
31 changed files with 247 additions and 720 deletions
2
.github/workflows/conformance.yml
vendored
2
.github/workflows/conformance.yml
vendored
|
@ -43,7 +43,7 @@ jobs:
|
||||||
# Cache oasdiff to avoid checksum failures and speed up builds
|
# Cache oasdiff to avoid checksum failures and speed up builds
|
||||||
- name: Cache oasdiff
|
- name: Cache oasdiff
|
||||||
id: cache-oasdiff
|
id: cache-oasdiff
|
||||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809
|
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830
|
||||||
with:
|
with:
|
||||||
path: ~/oasdiff
|
path: ~/oasdiff
|
||||||
key: oasdiff-${{ runner.os }}
|
key: oasdiff-${{ runner.os }}
|
||||||
|
|
120
docs/static/llama-stack-spec.html
vendored
120
docs/static/llama-stack-spec.html
vendored
|
@ -1035,50 +1035,6 @@
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"/v1/inference/embeddings": {
|
|
||||||
"post": {
|
|
||||||
"responses": {
|
|
||||||
"200": {
|
|
||||||
"description": "An array of embeddings, one for each content. Each embedding is a list of floats. The dimensionality of the embedding is model-specific; you can check model metadata using /models/{model_id}.",
|
|
||||||
"content": {
|
|
||||||
"application/json": {
|
|
||||||
"schema": {
|
|
||||||
"$ref": "#/components/schemas/EmbeddingsResponse"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"400": {
|
|
||||||
"$ref": "#/components/responses/BadRequest400"
|
|
||||||
},
|
|
||||||
"429": {
|
|
||||||
"$ref": "#/components/responses/TooManyRequests429"
|
|
||||||
},
|
|
||||||
"500": {
|
|
||||||
"$ref": "#/components/responses/InternalServerError500"
|
|
||||||
},
|
|
||||||
"default": {
|
|
||||||
"$ref": "#/components/responses/DefaultError"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"tags": [
|
|
||||||
"Inference"
|
|
||||||
],
|
|
||||||
"summary": "Generate embeddings for content pieces using the specified model.",
|
|
||||||
"description": "Generate embeddings for content pieces using the specified model.",
|
|
||||||
"parameters": [],
|
|
||||||
"requestBody": {
|
|
||||||
"content": {
|
|
||||||
"application/json": {
|
|
||||||
"schema": {
|
|
||||||
"$ref": "#/components/schemas/EmbeddingsRequest"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"required": true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"/v1alpha/eval/benchmarks/{benchmark_id}/evaluations": {
|
"/v1alpha/eval/benchmarks/{benchmark_id}/evaluations": {
|
||||||
"post": {
|
"post": {
|
||||||
"responses": {
|
"responses": {
|
||||||
|
@ -5475,7 +5431,7 @@
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"/v1/inference/rerank": {
|
"/v1alpha/inference/rerank": {
|
||||||
"post": {
|
"post": {
|
||||||
"responses": {
|
"responses": {
|
||||||
"200": {
|
"200": {
|
||||||
|
@ -10547,80 +10503,6 @@
|
||||||
"title": "OpenAIDeleteResponseObject",
|
"title": "OpenAIDeleteResponseObject",
|
||||||
"description": "Response object confirming deletion of an OpenAI response."
|
"description": "Response object confirming deletion of an OpenAI response."
|
||||||
},
|
},
|
||||||
"EmbeddingsRequest": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"model_id": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "The identifier of the model to use. The model must be an embedding model registered with Llama Stack and available via the /models endpoint."
|
|
||||||
},
|
|
||||||
"contents": {
|
|
||||||
"oneOf": [
|
|
||||||
{
|
|
||||||
"type": "array",
|
|
||||||
"items": {
|
|
||||||
"type": "string"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "array",
|
|
||||||
"items": {
|
|
||||||
"$ref": "#/components/schemas/InterleavedContentItem"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"description": "List of contents to generate embeddings for. Each content can be a string or an InterleavedContentItem (and hence can be multimodal). The behavior depends on the model and provider. Some models may only support text."
|
|
||||||
},
|
|
||||||
"text_truncation": {
|
|
||||||
"type": "string",
|
|
||||||
"enum": [
|
|
||||||
"none",
|
|
||||||
"start",
|
|
||||||
"end"
|
|
||||||
],
|
|
||||||
"description": "(Optional) Config for how to truncate text for embedding when text is longer than the model's max sequence length."
|
|
||||||
},
|
|
||||||
"output_dimension": {
|
|
||||||
"type": "integer",
|
|
||||||
"description": "(Optional) Output dimensionality for the embeddings. Only supported by Matryoshka models."
|
|
||||||
},
|
|
||||||
"task_type": {
|
|
||||||
"type": "string",
|
|
||||||
"enum": [
|
|
||||||
"query",
|
|
||||||
"document"
|
|
||||||
],
|
|
||||||
"description": "(Optional) How is the embedding being used? This is only supported by asymmetric embedding models."
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"additionalProperties": false,
|
|
||||||
"required": [
|
|
||||||
"model_id",
|
|
||||||
"contents"
|
|
||||||
],
|
|
||||||
"title": "EmbeddingsRequest"
|
|
||||||
},
|
|
||||||
"EmbeddingsResponse": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"embeddings": {
|
|
||||||
"type": "array",
|
|
||||||
"items": {
|
|
||||||
"type": "array",
|
|
||||||
"items": {
|
|
||||||
"type": "number"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"description": "List of embedding vectors, one per input content. Each embedding is a list of floats. The dimensionality of the embedding is model-specific; you can check model metadata using /models/{model_id}"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"additionalProperties": false,
|
|
||||||
"required": [
|
|
||||||
"embeddings"
|
|
||||||
],
|
|
||||||
"title": "EmbeddingsResponse",
|
|
||||||
"description": "Response containing generated embeddings."
|
|
||||||
},
|
|
||||||
"AgentCandidate": {
|
"AgentCandidate": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
|
103
docs/static/llama-stack-spec.yaml
vendored
103
docs/static/llama-stack-spec.yaml
vendored
|
@ -720,41 +720,6 @@ paths:
|
||||||
required: true
|
required: true
|
||||||
schema:
|
schema:
|
||||||
type: string
|
type: string
|
||||||
/v1/inference/embeddings:
|
|
||||||
post:
|
|
||||||
responses:
|
|
||||||
'200':
|
|
||||||
description: >-
|
|
||||||
An array of embeddings, one for each content. Each embedding is a list
|
|
||||||
of floats. The dimensionality of the embedding is model-specific; you
|
|
||||||
can check model metadata using /models/{model_id}.
|
|
||||||
content:
|
|
||||||
application/json:
|
|
||||||
schema:
|
|
||||||
$ref: '#/components/schemas/EmbeddingsResponse'
|
|
||||||
'400':
|
|
||||||
$ref: '#/components/responses/BadRequest400'
|
|
||||||
'429':
|
|
||||||
$ref: >-
|
|
||||||
#/components/responses/TooManyRequests429
|
|
||||||
'500':
|
|
||||||
$ref: >-
|
|
||||||
#/components/responses/InternalServerError500
|
|
||||||
default:
|
|
||||||
$ref: '#/components/responses/DefaultError'
|
|
||||||
tags:
|
|
||||||
- Inference
|
|
||||||
summary: >-
|
|
||||||
Generate embeddings for content pieces using the specified model.
|
|
||||||
description: >-
|
|
||||||
Generate embeddings for content pieces using the specified model.
|
|
||||||
parameters: []
|
|
||||||
requestBody:
|
|
||||||
content:
|
|
||||||
application/json:
|
|
||||||
schema:
|
|
||||||
$ref: '#/components/schemas/EmbeddingsRequest'
|
|
||||||
required: true
|
|
||||||
/v1alpha/eval/benchmarks/{benchmark_id}/evaluations:
|
/v1alpha/eval/benchmarks/{benchmark_id}/evaluations:
|
||||||
post:
|
post:
|
||||||
responses:
|
responses:
|
||||||
|
@ -3930,7 +3895,7 @@ paths:
|
||||||
schema:
|
schema:
|
||||||
$ref: '#/components/schemas/QueryTracesRequest'
|
$ref: '#/components/schemas/QueryTracesRequest'
|
||||||
required: true
|
required: true
|
||||||
/v1/inference/rerank:
|
/v1alpha/inference/rerank:
|
||||||
post:
|
post:
|
||||||
responses:
|
responses:
|
||||||
'200':
|
'200':
|
||||||
|
@ -7795,72 +7760,6 @@ components:
|
||||||
title: OpenAIDeleteResponseObject
|
title: OpenAIDeleteResponseObject
|
||||||
description: >-
|
description: >-
|
||||||
Response object confirming deletion of an OpenAI response.
|
Response object confirming deletion of an OpenAI response.
|
||||||
EmbeddingsRequest:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
model_id:
|
|
||||||
type: string
|
|
||||||
description: >-
|
|
||||||
The identifier of the model to use. The model must be an embedding model
|
|
||||||
registered with Llama Stack and available via the /models endpoint.
|
|
||||||
contents:
|
|
||||||
oneOf:
|
|
||||||
- type: array
|
|
||||||
items:
|
|
||||||
type: string
|
|
||||||
- type: array
|
|
||||||
items:
|
|
||||||
$ref: '#/components/schemas/InterleavedContentItem'
|
|
||||||
description: >-
|
|
||||||
List of contents to generate embeddings for. Each content can be a string
|
|
||||||
or an InterleavedContentItem (and hence can be multimodal). The behavior
|
|
||||||
depends on the model and provider. Some models may only support text.
|
|
||||||
text_truncation:
|
|
||||||
type: string
|
|
||||||
enum:
|
|
||||||
- none
|
|
||||||
- start
|
|
||||||
- end
|
|
||||||
description: >-
|
|
||||||
(Optional) Config for how to truncate text for embedding when text is
|
|
||||||
longer than the model's max sequence length.
|
|
||||||
output_dimension:
|
|
||||||
type: integer
|
|
||||||
description: >-
|
|
||||||
(Optional) Output dimensionality for the embeddings. Only supported by
|
|
||||||
Matryoshka models.
|
|
||||||
task_type:
|
|
||||||
type: string
|
|
||||||
enum:
|
|
||||||
- query
|
|
||||||
- document
|
|
||||||
description: >-
|
|
||||||
(Optional) How is the embedding being used? This is only supported by
|
|
||||||
asymmetric embedding models.
|
|
||||||
additionalProperties: false
|
|
||||||
required:
|
|
||||||
- model_id
|
|
||||||
- contents
|
|
||||||
title: EmbeddingsRequest
|
|
||||||
EmbeddingsResponse:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
embeddings:
|
|
||||||
type: array
|
|
||||||
items:
|
|
||||||
type: array
|
|
||||||
items:
|
|
||||||
type: number
|
|
||||||
description: >-
|
|
||||||
List of embedding vectors, one per input content. Each embedding is a
|
|
||||||
list of floats. The dimensionality of the embedding is model-specific;
|
|
||||||
you can check model metadata using /models/{model_id}
|
|
||||||
additionalProperties: false
|
|
||||||
required:
|
|
||||||
- embeddings
|
|
||||||
title: EmbeddingsResponse
|
|
||||||
description: >-
|
|
||||||
Response containing generated embeddings.
|
|
||||||
AgentCandidate:
|
AgentCandidate:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
|
|
@ -17,11 +17,11 @@ from typing import (
|
||||||
from pydantic import BaseModel, Field, field_validator
|
from pydantic import BaseModel, Field, field_validator
|
||||||
from typing_extensions import TypedDict
|
from typing_extensions import TypedDict
|
||||||
|
|
||||||
from llama_stack.apis.common.content_types import ContentDelta, InterleavedContent, InterleavedContentItem
|
from llama_stack.apis.common.content_types import ContentDelta, InterleavedContent
|
||||||
from llama_stack.apis.common.responses import Order
|
from llama_stack.apis.common.responses import Order
|
||||||
from llama_stack.apis.models import Model
|
from llama_stack.apis.models import Model
|
||||||
from llama_stack.apis.telemetry import MetricResponseMixin
|
from llama_stack.apis.telemetry import MetricResponseMixin
|
||||||
from llama_stack.apis.version import LLAMA_STACK_API_V1
|
from llama_stack.apis.version import LLAMA_STACK_API_V1, LLAMA_STACK_API_V1ALPHA
|
||||||
from llama_stack.models.llama.datatypes import (
|
from llama_stack.models.llama.datatypes import (
|
||||||
BuiltinTool,
|
BuiltinTool,
|
||||||
StopReason,
|
StopReason,
|
||||||
|
@ -1070,27 +1070,7 @@ class InferenceProvider(Protocol):
|
||||||
"""
|
"""
|
||||||
...
|
...
|
||||||
|
|
||||||
@webmethod(route="/inference/embeddings", method="POST", level=LLAMA_STACK_API_V1)
|
@webmethod(route="/inference/rerank", method="POST", level=LLAMA_STACK_API_V1ALPHA)
|
||||||
async def embeddings(
|
|
||||||
self,
|
|
||||||
model_id: str,
|
|
||||||
contents: list[str] | list[InterleavedContentItem],
|
|
||||||
text_truncation: TextTruncation | None = TextTruncation.none,
|
|
||||||
output_dimension: int | None = None,
|
|
||||||
task_type: EmbeddingTaskType | None = None,
|
|
||||||
) -> EmbeddingsResponse:
|
|
||||||
"""Generate embeddings for content pieces using the specified model.
|
|
||||||
|
|
||||||
:param model_id: The identifier of the model to use. The model must be an embedding model registered with Llama Stack and available via the /models endpoint.
|
|
||||||
:param contents: List of contents to generate embeddings for. Each content can be a string or an InterleavedContentItem (and hence can be multimodal). The behavior depends on the model and provider. Some models may only support text.
|
|
||||||
:param output_dimension: (Optional) Output dimensionality for the embeddings. Only supported by Matryoshka models.
|
|
||||||
:param text_truncation: (Optional) Config for how to truncate text for embedding when text is longer than the model's max sequence length.
|
|
||||||
:param task_type: (Optional) How is the embedding being used? This is only supported by asymmetric embedding models.
|
|
||||||
:returns: An array of embeddings, one for each content. Each embedding is a list of floats. The dimensionality of the embedding is model-specific; you can check model metadata using /models/{model_id}.
|
|
||||||
"""
|
|
||||||
...
|
|
||||||
|
|
||||||
@webmethod(route="/inference/rerank", method="POST", experimental=True, level=LLAMA_STACK_API_V1)
|
|
||||||
async def rerank(
|
async def rerank(
|
||||||
self,
|
self,
|
||||||
model: str,
|
model: str,
|
||||||
|
|
|
@ -433,6 +433,12 @@ class InferenceStoreConfig(BaseModel):
|
||||||
num_writers: int = Field(default=4, description="Number of concurrent background writers")
|
num_writers: int = Field(default=4, description="Number of concurrent background writers")
|
||||||
|
|
||||||
|
|
||||||
|
class ResponsesStoreConfig(BaseModel):
|
||||||
|
sql_store_config: SqlStoreConfig
|
||||||
|
max_write_queue_size: int = Field(default=10000, description="Max queued writes for responses store")
|
||||||
|
num_writers: int = Field(default=4, description="Number of concurrent background writers")
|
||||||
|
|
||||||
|
|
||||||
class StackRunConfig(BaseModel):
|
class StackRunConfig(BaseModel):
|
||||||
version: int = LLAMA_STACK_RUN_CONFIG_VERSION
|
version: int = LLAMA_STACK_RUN_CONFIG_VERSION
|
||||||
|
|
||||||
|
|
|
@ -29,6 +29,7 @@ from llama_stack.apis.telemetry import Telemetry
|
||||||
from llama_stack.apis.tools import ToolGroups, ToolRuntime
|
from llama_stack.apis.tools import ToolGroups, ToolRuntime
|
||||||
from llama_stack.apis.vector_dbs import VectorDBs
|
from llama_stack.apis.vector_dbs import VectorDBs
|
||||||
from llama_stack.apis.vector_io import VectorIO
|
from llama_stack.apis.vector_io import VectorIO
|
||||||
|
from llama_stack.apis.version import LLAMA_STACK_API_V1ALPHA
|
||||||
from llama_stack.core.client import get_client_impl
|
from llama_stack.core.client import get_client_impl
|
||||||
from llama_stack.core.datatypes import (
|
from llama_stack.core.datatypes import (
|
||||||
AccessRule,
|
AccessRule,
|
||||||
|
@ -412,8 +413,14 @@ def check_protocol_compliance(obj: Any, protocol: Any) -> None:
|
||||||
|
|
||||||
mro = type(obj).__mro__
|
mro = type(obj).__mro__
|
||||||
for name, value in inspect.getmembers(protocol):
|
for name, value in inspect.getmembers(protocol):
|
||||||
if inspect.isfunction(value) and hasattr(value, "__webmethod__"):
|
if inspect.isfunction(value) and hasattr(value, "__webmethods__"):
|
||||||
if value.__webmethod__.experimental:
|
has_alpha_api = False
|
||||||
|
for webmethod in value.__webmethods__:
|
||||||
|
if webmethod.level == LLAMA_STACK_API_V1ALPHA:
|
||||||
|
has_alpha_api = True
|
||||||
|
break
|
||||||
|
# if this API has multiple webmethods, and one of them is an alpha API, this API should be skipped when checking for missing or not callable routes
|
||||||
|
if has_alpha_api:
|
||||||
continue
|
continue
|
||||||
if not hasattr(obj, name):
|
if not hasattr(obj, name):
|
||||||
missing_methods.append((name, "missing"))
|
missing_methods.append((name, "missing"))
|
||||||
|
|
|
@ -16,7 +16,6 @@ from pydantic import Field, TypeAdapter
|
||||||
|
|
||||||
from llama_stack.apis.common.content_types import (
|
from llama_stack.apis.common.content_types import (
|
||||||
InterleavedContent,
|
InterleavedContent,
|
||||||
InterleavedContentItem,
|
|
||||||
)
|
)
|
||||||
from llama_stack.apis.common.errors import ModelNotFoundError, ModelTypeError
|
from llama_stack.apis.common.errors import ModelNotFoundError, ModelTypeError
|
||||||
from llama_stack.apis.inference import (
|
from llama_stack.apis.inference import (
|
||||||
|
@ -26,8 +25,6 @@ from llama_stack.apis.inference import (
|
||||||
CompletionMessage,
|
CompletionMessage,
|
||||||
CompletionResponse,
|
CompletionResponse,
|
||||||
CompletionResponseStreamChunk,
|
CompletionResponseStreamChunk,
|
||||||
EmbeddingsResponse,
|
|
||||||
EmbeddingTaskType,
|
|
||||||
Inference,
|
Inference,
|
||||||
ListOpenAIChatCompletionResponse,
|
ListOpenAIChatCompletionResponse,
|
||||||
LogProbConfig,
|
LogProbConfig,
|
||||||
|
@ -48,7 +45,6 @@ from llama_stack.apis.inference import (
|
||||||
ResponseFormat,
|
ResponseFormat,
|
||||||
SamplingParams,
|
SamplingParams,
|
||||||
StopReason,
|
StopReason,
|
||||||
TextTruncation,
|
|
||||||
ToolChoice,
|
ToolChoice,
|
||||||
ToolConfig,
|
ToolConfig,
|
||||||
ToolDefinition,
|
ToolDefinition,
|
||||||
|
@ -312,25 +308,6 @@ class InferenceRouter(Inference):
|
||||||
|
|
||||||
return response
|
return response
|
||||||
|
|
||||||
async def embeddings(
|
|
||||||
self,
|
|
||||||
model_id: str,
|
|
||||||
contents: list[str] | list[InterleavedContentItem],
|
|
||||||
text_truncation: TextTruncation | None = TextTruncation.none,
|
|
||||||
output_dimension: int | None = None,
|
|
||||||
task_type: EmbeddingTaskType | None = None,
|
|
||||||
) -> EmbeddingsResponse:
|
|
||||||
logger.debug(f"InferenceRouter.embeddings: {model_id}")
|
|
||||||
await self._get_model(model_id, ModelType.embedding)
|
|
||||||
provider = await self.routing_table.get_provider_impl(model_id)
|
|
||||||
return await provider.embeddings(
|
|
||||||
model_id=model_id,
|
|
||||||
contents=contents,
|
|
||||||
text_truncation=text_truncation,
|
|
||||||
output_dimension=output_dimension,
|
|
||||||
task_type=task_type,
|
|
||||||
)
|
|
||||||
|
|
||||||
async def openai_completion(
|
async def openai_completion(
|
||||||
self,
|
self,
|
||||||
model: str,
|
model: str,
|
||||||
|
|
|
@ -924,7 +924,7 @@ async def get_raw_document_text(document: Document) -> str:
|
||||||
DeprecationWarning,
|
DeprecationWarning,
|
||||||
stacklevel=2,
|
stacklevel=2,
|
||||||
)
|
)
|
||||||
elif not (document.mime_type.startswith("text/") or document.mime_type == "application/yaml"):
|
elif not (document.mime_type.startswith("text/") or document.mime_type in ("application/yaml", "application/json")):
|
||||||
raise ValueError(f"Unexpected document mime type: {document.mime_type}")
|
raise ValueError(f"Unexpected document mime type: {document.mime_type}")
|
||||||
|
|
||||||
if isinstance(document.content, URL):
|
if isinstance(document.content, URL):
|
||||||
|
|
|
@ -11,21 +11,17 @@ from botocore.client import BaseClient
|
||||||
|
|
||||||
from llama_stack.apis.common.content_types import (
|
from llama_stack.apis.common.content_types import (
|
||||||
InterleavedContent,
|
InterleavedContent,
|
||||||
InterleavedContentItem,
|
|
||||||
)
|
)
|
||||||
from llama_stack.apis.inference import (
|
from llama_stack.apis.inference import (
|
||||||
ChatCompletionRequest,
|
ChatCompletionRequest,
|
||||||
ChatCompletionResponse,
|
ChatCompletionResponse,
|
||||||
ChatCompletionResponseStreamChunk,
|
ChatCompletionResponseStreamChunk,
|
||||||
EmbeddingsResponse,
|
|
||||||
EmbeddingTaskType,
|
|
||||||
Inference,
|
Inference,
|
||||||
LogProbConfig,
|
LogProbConfig,
|
||||||
Message,
|
Message,
|
||||||
OpenAIEmbeddingsResponse,
|
OpenAIEmbeddingsResponse,
|
||||||
ResponseFormat,
|
ResponseFormat,
|
||||||
SamplingParams,
|
SamplingParams,
|
||||||
TextTruncation,
|
|
||||||
ToolChoice,
|
ToolChoice,
|
||||||
ToolConfig,
|
ToolConfig,
|
||||||
ToolDefinition,
|
ToolDefinition,
|
||||||
|
@ -47,8 +43,6 @@ from llama_stack.providers.utils.inference.openai_compat import (
|
||||||
)
|
)
|
||||||
from llama_stack.providers.utils.inference.prompt_adapter import (
|
from llama_stack.providers.utils.inference.prompt_adapter import (
|
||||||
chat_completion_request_to_prompt,
|
chat_completion_request_to_prompt,
|
||||||
content_has_media,
|
|
||||||
interleaved_content_as_str,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
from .models import MODEL_ENTRIES
|
from .models import MODEL_ENTRIES
|
||||||
|
@ -218,36 +212,6 @@ class BedrockInferenceAdapter(
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
async def embeddings(
|
|
||||||
self,
|
|
||||||
model_id: str,
|
|
||||||
contents: list[str] | list[InterleavedContentItem],
|
|
||||||
text_truncation: TextTruncation | None = TextTruncation.none,
|
|
||||||
output_dimension: int | None = None,
|
|
||||||
task_type: EmbeddingTaskType | None = None,
|
|
||||||
) -> EmbeddingsResponse:
|
|
||||||
model = await self.model_store.get_model(model_id)
|
|
||||||
|
|
||||||
# Convert foundation model ID to inference profile ID
|
|
||||||
region_name = self.client.meta.region_name
|
|
||||||
inference_profile_id = _to_inference_profile_id(model.provider_resource_id, region_name)
|
|
||||||
|
|
||||||
embeddings = []
|
|
||||||
for content in contents:
|
|
||||||
assert not content_has_media(content), "Bedrock does not support media for embeddings"
|
|
||||||
input_text = interleaved_content_as_str(content)
|
|
||||||
input_body = {"inputText": input_text}
|
|
||||||
body = json.dumps(input_body)
|
|
||||||
response = self.client.invoke_model(
|
|
||||||
body=body,
|
|
||||||
modelId=inference_profile_id,
|
|
||||||
accept="application/json",
|
|
||||||
contentType="application/json",
|
|
||||||
)
|
|
||||||
response_body = json.loads(response.get("body").read())
|
|
||||||
embeddings.append(response_body.get("embedding"))
|
|
||||||
return EmbeddingsResponse(embeddings=embeddings)
|
|
||||||
|
|
||||||
async def openai_embeddings(
|
async def openai_embeddings(
|
||||||
self,
|
self,
|
||||||
model: str,
|
model: str,
|
||||||
|
|
|
@ -11,21 +11,17 @@ from cerebras.cloud.sdk import AsyncCerebras
|
||||||
|
|
||||||
from llama_stack.apis.common.content_types import (
|
from llama_stack.apis.common.content_types import (
|
||||||
InterleavedContent,
|
InterleavedContent,
|
||||||
InterleavedContentItem,
|
|
||||||
)
|
)
|
||||||
from llama_stack.apis.inference import (
|
from llama_stack.apis.inference import (
|
||||||
ChatCompletionRequest,
|
ChatCompletionRequest,
|
||||||
CompletionRequest,
|
CompletionRequest,
|
||||||
CompletionResponse,
|
CompletionResponse,
|
||||||
EmbeddingsResponse,
|
|
||||||
EmbeddingTaskType,
|
|
||||||
Inference,
|
Inference,
|
||||||
LogProbConfig,
|
LogProbConfig,
|
||||||
Message,
|
Message,
|
||||||
OpenAIEmbeddingsResponse,
|
OpenAIEmbeddingsResponse,
|
||||||
ResponseFormat,
|
ResponseFormat,
|
||||||
SamplingParams,
|
SamplingParams,
|
||||||
TextTruncation,
|
|
||||||
ToolChoice,
|
ToolChoice,
|
||||||
ToolConfig,
|
ToolConfig,
|
||||||
ToolDefinition,
|
ToolDefinition,
|
||||||
|
@ -187,16 +183,6 @@ class CerebrasInferenceAdapter(
|
||||||
**get_sampling_options(request.sampling_params),
|
**get_sampling_options(request.sampling_params),
|
||||||
}
|
}
|
||||||
|
|
||||||
async def embeddings(
|
|
||||||
self,
|
|
||||||
model_id: str,
|
|
||||||
contents: list[str] | list[InterleavedContentItem],
|
|
||||||
text_truncation: TextTruncation | None = TextTruncation.none,
|
|
||||||
output_dimension: int | None = None,
|
|
||||||
task_type: EmbeddingTaskType | None = None,
|
|
||||||
) -> EmbeddingsResponse:
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
async def openai_embeddings(
|
async def openai_embeddings(
|
||||||
self,
|
self,
|
||||||
model: str,
|
model: str,
|
||||||
|
|
|
@ -11,15 +11,12 @@ from databricks.sdk import WorkspaceClient
|
||||||
|
|
||||||
from llama_stack.apis.common.content_types import (
|
from llama_stack.apis.common.content_types import (
|
||||||
InterleavedContent,
|
InterleavedContent,
|
||||||
InterleavedContentItem,
|
|
||||||
)
|
)
|
||||||
from llama_stack.apis.inference import (
|
from llama_stack.apis.inference import (
|
||||||
ChatCompletionResponse,
|
ChatCompletionResponse,
|
||||||
ChatCompletionResponseStreamChunk,
|
ChatCompletionResponseStreamChunk,
|
||||||
CompletionResponse,
|
CompletionResponse,
|
||||||
CompletionResponseStreamChunk,
|
CompletionResponseStreamChunk,
|
||||||
EmbeddingsResponse,
|
|
||||||
EmbeddingTaskType,
|
|
||||||
Inference,
|
Inference,
|
||||||
LogProbConfig,
|
LogProbConfig,
|
||||||
Message,
|
Message,
|
||||||
|
@ -27,7 +24,6 @@ from llama_stack.apis.inference import (
|
||||||
OpenAICompletion,
|
OpenAICompletion,
|
||||||
ResponseFormat,
|
ResponseFormat,
|
||||||
SamplingParams,
|
SamplingParams,
|
||||||
TextTruncation,
|
|
||||||
ToolChoice,
|
ToolChoice,
|
||||||
ToolConfig,
|
ToolConfig,
|
||||||
ToolDefinition,
|
ToolDefinition,
|
||||||
|
@ -118,16 +114,6 @@ class DatabricksInferenceAdapter(
|
||||||
) -> ChatCompletionResponse | AsyncIterator[ChatCompletionResponseStreamChunk]:
|
) -> ChatCompletionResponse | AsyncIterator[ChatCompletionResponseStreamChunk]:
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
async def embeddings(
|
|
||||||
self,
|
|
||||||
model_id: str,
|
|
||||||
contents: list[str] | list[InterleavedContentItem],
|
|
||||||
text_truncation: TextTruncation | None = TextTruncation.none,
|
|
||||||
output_dimension: int | None = None,
|
|
||||||
task_type: EmbeddingTaskType | None = None,
|
|
||||||
) -> EmbeddingsResponse:
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
async def list_models(self) -> list[Model] | None:
|
async def list_models(self) -> list[Model] | None:
|
||||||
self._model_cache = {} # from OpenAIMixin
|
self._model_cache = {} # from OpenAIMixin
|
||||||
ws_client = WorkspaceClient(host=self.config.url, token=self.get_api_key()) # TODO: this is not async
|
ws_client = WorkspaceClient(host=self.config.url, token=self.get_api_key()) # TODO: this is not async
|
||||||
|
|
|
@ -10,22 +10,18 @@ from fireworks.client import Fireworks
|
||||||
|
|
||||||
from llama_stack.apis.common.content_types import (
|
from llama_stack.apis.common.content_types import (
|
||||||
InterleavedContent,
|
InterleavedContent,
|
||||||
InterleavedContentItem,
|
|
||||||
)
|
)
|
||||||
from llama_stack.apis.inference import (
|
from llama_stack.apis.inference import (
|
||||||
ChatCompletionRequest,
|
ChatCompletionRequest,
|
||||||
ChatCompletionResponse,
|
ChatCompletionResponse,
|
||||||
CompletionRequest,
|
CompletionRequest,
|
||||||
CompletionResponse,
|
CompletionResponse,
|
||||||
EmbeddingsResponse,
|
|
||||||
EmbeddingTaskType,
|
|
||||||
Inference,
|
Inference,
|
||||||
LogProbConfig,
|
LogProbConfig,
|
||||||
Message,
|
Message,
|
||||||
ResponseFormat,
|
ResponseFormat,
|
||||||
ResponseFormatType,
|
ResponseFormatType,
|
||||||
SamplingParams,
|
SamplingParams,
|
||||||
TextTruncation,
|
|
||||||
ToolChoice,
|
ToolChoice,
|
||||||
ToolConfig,
|
ToolConfig,
|
||||||
ToolDefinition,
|
ToolDefinition,
|
||||||
|
@ -48,8 +44,6 @@ from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
|
||||||
from llama_stack.providers.utils.inference.prompt_adapter import (
|
from llama_stack.providers.utils.inference.prompt_adapter import (
|
||||||
chat_completion_request_to_prompt,
|
chat_completion_request_to_prompt,
|
||||||
completion_request_to_prompt,
|
completion_request_to_prompt,
|
||||||
content_has_media,
|
|
||||||
interleaved_content_as_str,
|
|
||||||
request_has_media,
|
request_has_media,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -259,28 +253,3 @@ class FireworksInferenceAdapter(OpenAIMixin, ModelRegistryHelper, Inference, Nee
|
||||||
logger.debug(f"params to fireworks: {params}")
|
logger.debug(f"params to fireworks: {params}")
|
||||||
|
|
||||||
return params
|
return params
|
||||||
|
|
||||||
async def embeddings(
|
|
||||||
self,
|
|
||||||
model_id: str,
|
|
||||||
contents: list[str] | list[InterleavedContentItem],
|
|
||||||
text_truncation: TextTruncation | None = TextTruncation.none,
|
|
||||||
output_dimension: int | None = None,
|
|
||||||
task_type: EmbeddingTaskType | None = None,
|
|
||||||
) -> EmbeddingsResponse:
|
|
||||||
model = await self.model_store.get_model(model_id)
|
|
||||||
|
|
||||||
kwargs = {}
|
|
||||||
if model.metadata.get("embedding_dimension"):
|
|
||||||
kwargs["dimensions"] = model.metadata.get("embedding_dimension")
|
|
||||||
assert all(not content_has_media(content) for content in contents), (
|
|
||||||
"Fireworks does not support media for embeddings"
|
|
||||||
)
|
|
||||||
response = self._get_client().embeddings.create(
|
|
||||||
model=model.provider_resource_id,
|
|
||||||
input=[interleaved_content_as_str(content) for content in contents],
|
|
||||||
**kwargs,
|
|
||||||
)
|
|
||||||
|
|
||||||
embeddings = [data.embedding for data in response.data]
|
|
||||||
return EmbeddingsResponse(embeddings=embeddings)
|
|
||||||
|
|
|
@ -11,8 +11,6 @@ from openai import NOT_GIVEN, APIConnectionError
|
||||||
|
|
||||||
from llama_stack.apis.common.content_types import (
|
from llama_stack.apis.common.content_types import (
|
||||||
InterleavedContent,
|
InterleavedContent,
|
||||||
InterleavedContentItem,
|
|
||||||
TextContentItem,
|
|
||||||
)
|
)
|
||||||
from llama_stack.apis.inference import (
|
from llama_stack.apis.inference import (
|
||||||
ChatCompletionRequest,
|
ChatCompletionRequest,
|
||||||
|
@ -21,8 +19,6 @@ from llama_stack.apis.inference import (
|
||||||
CompletionRequest,
|
CompletionRequest,
|
||||||
CompletionResponse,
|
CompletionResponse,
|
||||||
CompletionResponseStreamChunk,
|
CompletionResponseStreamChunk,
|
||||||
EmbeddingsResponse,
|
|
||||||
EmbeddingTaskType,
|
|
||||||
Inference,
|
Inference,
|
||||||
LogProbConfig,
|
LogProbConfig,
|
||||||
Message,
|
Message,
|
||||||
|
@ -31,7 +27,6 @@ from llama_stack.apis.inference import (
|
||||||
OpenAIEmbeddingUsage,
|
OpenAIEmbeddingUsage,
|
||||||
ResponseFormat,
|
ResponseFormat,
|
||||||
SamplingParams,
|
SamplingParams,
|
||||||
TextTruncation,
|
|
||||||
ToolChoice,
|
ToolChoice,
|
||||||
ToolConfig,
|
ToolConfig,
|
||||||
)
|
)
|
||||||
|
@ -156,60 +151,6 @@ class NVIDIAInferenceAdapter(OpenAIMixin, Inference):
|
||||||
# we pass n=1 to get only one completion
|
# we pass n=1 to get only one completion
|
||||||
return convert_openai_completion_choice(response.choices[0])
|
return convert_openai_completion_choice(response.choices[0])
|
||||||
|
|
||||||
async def embeddings(
|
|
||||||
self,
|
|
||||||
model_id: str,
|
|
||||||
contents: list[str] | list[InterleavedContentItem],
|
|
||||||
text_truncation: TextTruncation | None = TextTruncation.none,
|
|
||||||
output_dimension: int | None = None,
|
|
||||||
task_type: EmbeddingTaskType | None = None,
|
|
||||||
) -> EmbeddingsResponse:
|
|
||||||
if any(content_has_media(content) for content in contents):
|
|
||||||
raise NotImplementedError("Media is not supported")
|
|
||||||
|
|
||||||
#
|
|
||||||
# Llama Stack: contents = list[str] | list[InterleavedContentItem]
|
|
||||||
# ->
|
|
||||||
# OpenAI: input = str | list[str]
|
|
||||||
#
|
|
||||||
# we can ignore str and always pass list[str] to OpenAI
|
|
||||||
#
|
|
||||||
flat_contents = [content.text if isinstance(content, TextContentItem) else content for content in contents]
|
|
||||||
input = [content.text if isinstance(content, TextContentItem) else content for content in flat_contents]
|
|
||||||
provider_model_id = await self._get_provider_model_id(model_id)
|
|
||||||
|
|
||||||
extra_body = {}
|
|
||||||
|
|
||||||
if text_truncation is not None:
|
|
||||||
text_truncation_options = {
|
|
||||||
TextTruncation.none: "NONE",
|
|
||||||
TextTruncation.end: "END",
|
|
||||||
TextTruncation.start: "START",
|
|
||||||
}
|
|
||||||
extra_body["truncate"] = text_truncation_options[text_truncation]
|
|
||||||
|
|
||||||
if output_dimension is not None:
|
|
||||||
extra_body["dimensions"] = output_dimension
|
|
||||||
|
|
||||||
if task_type is not None:
|
|
||||||
task_type_options = {
|
|
||||||
EmbeddingTaskType.document: "passage",
|
|
||||||
EmbeddingTaskType.query: "query",
|
|
||||||
}
|
|
||||||
extra_body["input_type"] = task_type_options[task_type]
|
|
||||||
|
|
||||||
response = await self.client.embeddings.create(
|
|
||||||
model=provider_model_id,
|
|
||||||
input=input,
|
|
||||||
extra_body=extra_body,
|
|
||||||
)
|
|
||||||
#
|
|
||||||
# OpenAI: CreateEmbeddingResponse(data=[Embedding(embedding=list[float], ...)], ...)
|
|
||||||
# ->
|
|
||||||
# Llama Stack: EmbeddingsResponse(embeddings=list[list[float]])
|
|
||||||
#
|
|
||||||
return EmbeddingsResponse(embeddings=[embedding.embedding for embedding in response.data])
|
|
||||||
|
|
||||||
async def openai_embeddings(
|
async def openai_embeddings(
|
||||||
self,
|
self,
|
||||||
model: str,
|
model: str,
|
||||||
|
|
|
@ -14,7 +14,6 @@ from ollama import AsyncClient as AsyncOllamaClient
|
||||||
from llama_stack.apis.common.content_types import (
|
from llama_stack.apis.common.content_types import (
|
||||||
ImageContentItem,
|
ImageContentItem,
|
||||||
InterleavedContent,
|
InterleavedContent,
|
||||||
InterleavedContentItem,
|
|
||||||
TextContentItem,
|
TextContentItem,
|
||||||
)
|
)
|
||||||
from llama_stack.apis.common.errors import UnsupportedModelError
|
from llama_stack.apis.common.errors import UnsupportedModelError
|
||||||
|
@ -25,8 +24,6 @@ from llama_stack.apis.inference import (
|
||||||
CompletionRequest,
|
CompletionRequest,
|
||||||
CompletionResponse,
|
CompletionResponse,
|
||||||
CompletionResponseStreamChunk,
|
CompletionResponseStreamChunk,
|
||||||
EmbeddingsResponse,
|
|
||||||
EmbeddingTaskType,
|
|
||||||
GrammarResponseFormat,
|
GrammarResponseFormat,
|
||||||
InferenceProvider,
|
InferenceProvider,
|
||||||
JsonSchemaResponseFormat,
|
JsonSchemaResponseFormat,
|
||||||
|
@ -34,7 +31,6 @@ from llama_stack.apis.inference import (
|
||||||
Message,
|
Message,
|
||||||
ResponseFormat,
|
ResponseFormat,
|
||||||
SamplingParams,
|
SamplingParams,
|
||||||
TextTruncation,
|
|
||||||
ToolChoice,
|
ToolChoice,
|
||||||
ToolConfig,
|
ToolConfig,
|
||||||
ToolDefinition,
|
ToolDefinition,
|
||||||
|
@ -66,9 +62,7 @@ from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
|
||||||
from llama_stack.providers.utils.inference.prompt_adapter import (
|
from llama_stack.providers.utils.inference.prompt_adapter import (
|
||||||
chat_completion_request_to_prompt,
|
chat_completion_request_to_prompt,
|
||||||
completion_request_to_prompt,
|
completion_request_to_prompt,
|
||||||
content_has_media,
|
|
||||||
convert_image_content_to_url,
|
convert_image_content_to_url,
|
||||||
interleaved_content_as_str,
|
|
||||||
request_has_media,
|
request_has_media,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -363,27 +357,6 @@ class OllamaInferenceAdapter(
|
||||||
async for chunk in process_chat_completion_stream_response(stream, request):
|
async for chunk in process_chat_completion_stream_response(stream, request):
|
||||||
yield chunk
|
yield chunk
|
||||||
|
|
||||||
async def embeddings(
|
|
||||||
self,
|
|
||||||
model_id: str,
|
|
||||||
contents: list[str] | list[InterleavedContentItem],
|
|
||||||
text_truncation: TextTruncation | None = TextTruncation.none,
|
|
||||||
output_dimension: int | None = None,
|
|
||||||
task_type: EmbeddingTaskType | None = None,
|
|
||||||
) -> EmbeddingsResponse:
|
|
||||||
model = await self._get_model(model_id)
|
|
||||||
|
|
||||||
assert all(not content_has_media(content) for content in contents), (
|
|
||||||
"Ollama does not support media for embeddings"
|
|
||||||
)
|
|
||||||
response = await self.ollama_client.embed(
|
|
||||||
model=model.provider_resource_id,
|
|
||||||
input=[interleaved_content_as_str(content) for content in contents],
|
|
||||||
)
|
|
||||||
embeddings = response["embeddings"]
|
|
||||||
|
|
||||||
return EmbeddingsResponse(embeddings=embeddings)
|
|
||||||
|
|
||||||
async def register_model(self, model: Model) -> Model:
|
async def register_model(self, model: Model) -> Model:
|
||||||
if await self.check_model_availability(model.provider_model_id):
|
if await self.check_model_availability(model.provider_model_id):
|
||||||
return model
|
return model
|
||||||
|
|
|
@ -14,8 +14,6 @@ from llama_stack.apis.inference import (
|
||||||
ChatCompletionResponse,
|
ChatCompletionResponse,
|
||||||
ChatCompletionResponseStreamChunk,
|
ChatCompletionResponseStreamChunk,
|
||||||
CompletionMessage,
|
CompletionMessage,
|
||||||
EmbeddingsResponse,
|
|
||||||
EmbeddingTaskType,
|
|
||||||
Inference,
|
Inference,
|
||||||
LogProbConfig,
|
LogProbConfig,
|
||||||
Message,
|
Message,
|
||||||
|
@ -27,7 +25,6 @@ from llama_stack.apis.inference import (
|
||||||
OpenAIResponseFormatParam,
|
OpenAIResponseFormatParam,
|
||||||
ResponseFormat,
|
ResponseFormat,
|
||||||
SamplingParams,
|
SamplingParams,
|
||||||
TextTruncation,
|
|
||||||
ToolChoice,
|
ToolChoice,
|
||||||
ToolConfig,
|
ToolConfig,
|
||||||
ToolDefinition,
|
ToolDefinition,
|
||||||
|
@ -190,25 +187,6 @@ class PassthroughInferenceAdapter(Inference):
|
||||||
chunk = convert_to_pydantic(ChatCompletionResponseStreamChunk, chunk)
|
chunk = convert_to_pydantic(ChatCompletionResponseStreamChunk, chunk)
|
||||||
yield chunk
|
yield chunk
|
||||||
|
|
||||||
async def embeddings(
|
|
||||||
self,
|
|
||||||
model_id: str,
|
|
||||||
contents: list[InterleavedContent],
|
|
||||||
text_truncation: TextTruncation | None = TextTruncation.none,
|
|
||||||
output_dimension: int | None = None,
|
|
||||||
task_type: EmbeddingTaskType | None = None,
|
|
||||||
) -> EmbeddingsResponse:
|
|
||||||
client = self._get_client()
|
|
||||||
model = await self.model_store.get_model(model_id)
|
|
||||||
|
|
||||||
return await client.inference.embeddings(
|
|
||||||
model_id=model.provider_resource_id,
|
|
||||||
contents=contents,
|
|
||||||
text_truncation=text_truncation,
|
|
||||||
output_dimension=output_dimension,
|
|
||||||
task_type=task_type,
|
|
||||||
)
|
|
||||||
|
|
||||||
async def openai_embeddings(
|
async def openai_embeddings(
|
||||||
self,
|
self,
|
||||||
model: str,
|
model: str,
|
||||||
|
|
|
@ -136,16 +136,6 @@ class RunpodInferenceAdapter(
|
||||||
**get_sampling_options(request.sampling_params),
|
**get_sampling_options(request.sampling_params),
|
||||||
}
|
}
|
||||||
|
|
||||||
async def embeddings(
|
|
||||||
self,
|
|
||||||
model: str,
|
|
||||||
contents: list[str] | list[InterleavedContentItem],
|
|
||||||
text_truncation: TextTruncation | None = TextTruncation.none,
|
|
||||||
output_dimension: int | None = None,
|
|
||||||
task_type: EmbeddingTaskType | None = None,
|
|
||||||
) -> EmbeddingsResponse:
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
async def openai_embeddings(
|
async def openai_embeddings(
|
||||||
self,
|
self,
|
||||||
model: str,
|
model: str,
|
||||||
|
|
|
@ -12,14 +12,11 @@ from pydantic import SecretStr
|
||||||
|
|
||||||
from llama_stack.apis.common.content_types import (
|
from llama_stack.apis.common.content_types import (
|
||||||
InterleavedContent,
|
InterleavedContent,
|
||||||
InterleavedContentItem,
|
|
||||||
)
|
)
|
||||||
from llama_stack.apis.inference import (
|
from llama_stack.apis.inference import (
|
||||||
ChatCompletionRequest,
|
ChatCompletionRequest,
|
||||||
ChatCompletionResponse,
|
ChatCompletionResponse,
|
||||||
CompletionRequest,
|
CompletionRequest,
|
||||||
EmbeddingsResponse,
|
|
||||||
EmbeddingTaskType,
|
|
||||||
Inference,
|
Inference,
|
||||||
LogProbConfig,
|
LogProbConfig,
|
||||||
Message,
|
Message,
|
||||||
|
@ -27,7 +24,6 @@ from llama_stack.apis.inference import (
|
||||||
ResponseFormat,
|
ResponseFormat,
|
||||||
ResponseFormatType,
|
ResponseFormatType,
|
||||||
SamplingParams,
|
SamplingParams,
|
||||||
TextTruncation,
|
|
||||||
ToolChoice,
|
ToolChoice,
|
||||||
ToolConfig,
|
ToolConfig,
|
||||||
ToolDefinition,
|
ToolDefinition,
|
||||||
|
@ -306,16 +302,6 @@ class _HfAdapter(
|
||||||
**self._build_options(request.sampling_params, request.response_format),
|
**self._build_options(request.sampling_params, request.response_format),
|
||||||
)
|
)
|
||||||
|
|
||||||
async def embeddings(
|
|
||||||
self,
|
|
||||||
model_id: str,
|
|
||||||
contents: list[str] | list[InterleavedContentItem],
|
|
||||||
text_truncation: TextTruncation | None = TextTruncation.none,
|
|
||||||
output_dimension: int | None = None,
|
|
||||||
task_type: EmbeddingTaskType | None = None,
|
|
||||||
) -> EmbeddingsResponse:
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
async def openai_embeddings(
|
async def openai_embeddings(
|
||||||
self,
|
self,
|
||||||
model: str,
|
model: str,
|
||||||
|
|
|
@ -12,14 +12,11 @@ from together.constants import BASE_URL
|
||||||
|
|
||||||
from llama_stack.apis.common.content_types import (
|
from llama_stack.apis.common.content_types import (
|
||||||
InterleavedContent,
|
InterleavedContent,
|
||||||
InterleavedContentItem,
|
|
||||||
)
|
)
|
||||||
from llama_stack.apis.inference import (
|
from llama_stack.apis.inference import (
|
||||||
ChatCompletionRequest,
|
ChatCompletionRequest,
|
||||||
ChatCompletionResponse,
|
ChatCompletionResponse,
|
||||||
CompletionRequest,
|
CompletionRequest,
|
||||||
EmbeddingsResponse,
|
|
||||||
EmbeddingTaskType,
|
|
||||||
Inference,
|
Inference,
|
||||||
LogProbConfig,
|
LogProbConfig,
|
||||||
Message,
|
Message,
|
||||||
|
@ -27,7 +24,6 @@ from llama_stack.apis.inference import (
|
||||||
ResponseFormat,
|
ResponseFormat,
|
||||||
ResponseFormatType,
|
ResponseFormatType,
|
||||||
SamplingParams,
|
SamplingParams,
|
||||||
TextTruncation,
|
|
||||||
ToolChoice,
|
ToolChoice,
|
||||||
ToolConfig,
|
ToolConfig,
|
||||||
ToolDefinition,
|
ToolDefinition,
|
||||||
|
@ -50,8 +46,6 @@ from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
|
||||||
from llama_stack.providers.utils.inference.prompt_adapter import (
|
from llama_stack.providers.utils.inference.prompt_adapter import (
|
||||||
chat_completion_request_to_prompt,
|
chat_completion_request_to_prompt,
|
||||||
completion_request_to_prompt,
|
completion_request_to_prompt,
|
||||||
content_has_media,
|
|
||||||
interleaved_content_as_str,
|
|
||||||
request_has_media,
|
request_has_media,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -247,26 +241,6 @@ class TogetherInferenceAdapter(OpenAIMixin, ModelRegistryHelper, Inference, Need
|
||||||
logger.debug(f"params to together: {params}")
|
logger.debug(f"params to together: {params}")
|
||||||
return params
|
return params
|
||||||
|
|
||||||
async def embeddings(
|
|
||||||
self,
|
|
||||||
model_id: str,
|
|
||||||
contents: list[str] | list[InterleavedContentItem],
|
|
||||||
text_truncation: TextTruncation | None = TextTruncation.none,
|
|
||||||
output_dimension: int | None = None,
|
|
||||||
task_type: EmbeddingTaskType | None = None,
|
|
||||||
) -> EmbeddingsResponse:
|
|
||||||
model = await self.model_store.get_model(model_id)
|
|
||||||
assert all(not content_has_media(content) for content in contents), (
|
|
||||||
"Together does not support media for embeddings"
|
|
||||||
)
|
|
||||||
client = self._get_client()
|
|
||||||
r = await client.embeddings.create(
|
|
||||||
model=model.provider_resource_id,
|
|
||||||
input=[interleaved_content_as_str(content) for content in contents],
|
|
||||||
)
|
|
||||||
embeddings = [item.embedding for item in r.data]
|
|
||||||
return EmbeddingsResponse(embeddings=embeddings)
|
|
||||||
|
|
||||||
async def list_models(self) -> list[Model] | None:
|
async def list_models(self) -> list[Model] | None:
|
||||||
self._model_cache = {}
|
self._model_cache = {}
|
||||||
# Together's /v1/models is not compatible with OpenAI's /v1/models. Together support ticket #13355 -> will not fix, use Together's own client
|
# Together's /v1/models is not compatible with OpenAI's /v1/models. Together support ticket #13355 -> will not fix, use Together's own client
|
||||||
|
|
|
@ -16,7 +16,6 @@ from openai.types.chat.chat_completion_chunk import (
|
||||||
|
|
||||||
from llama_stack.apis.common.content_types import (
|
from llama_stack.apis.common.content_types import (
|
||||||
InterleavedContent,
|
InterleavedContent,
|
||||||
InterleavedContentItem,
|
|
||||||
TextDelta,
|
TextDelta,
|
||||||
ToolCallDelta,
|
ToolCallDelta,
|
||||||
ToolCallParseStatus,
|
ToolCallParseStatus,
|
||||||
|
@ -31,8 +30,6 @@ from llama_stack.apis.inference import (
|
||||||
CompletionRequest,
|
CompletionRequest,
|
||||||
CompletionResponse,
|
CompletionResponse,
|
||||||
CompletionResponseStreamChunk,
|
CompletionResponseStreamChunk,
|
||||||
EmbeddingsResponse,
|
|
||||||
EmbeddingTaskType,
|
|
||||||
GrammarResponseFormat,
|
GrammarResponseFormat,
|
||||||
Inference,
|
Inference,
|
||||||
JsonSchemaResponseFormat,
|
JsonSchemaResponseFormat,
|
||||||
|
@ -41,7 +38,6 @@ from llama_stack.apis.inference import (
|
||||||
ModelStore,
|
ModelStore,
|
||||||
ResponseFormat,
|
ResponseFormat,
|
||||||
SamplingParams,
|
SamplingParams,
|
||||||
TextTruncation,
|
|
||||||
ToolChoice,
|
ToolChoice,
|
||||||
ToolConfig,
|
ToolConfig,
|
||||||
ToolDefinition,
|
ToolDefinition,
|
||||||
|
@ -74,8 +70,6 @@ from llama_stack.providers.utils.inference.openai_compat import (
|
||||||
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
|
from llama_stack.providers.utils.inference.openai_mixin import OpenAIMixin
|
||||||
from llama_stack.providers.utils.inference.prompt_adapter import (
|
from llama_stack.providers.utils.inference.prompt_adapter import (
|
||||||
completion_request_to_prompt,
|
completion_request_to_prompt,
|
||||||
content_has_media,
|
|
||||||
interleaved_content_as_str,
|
|
||||||
request_has_media,
|
request_has_media,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -550,27 +544,3 @@ class VLLMInferenceAdapter(OpenAIMixin, LiteLLMOpenAIMixin, Inference, ModelsPro
|
||||||
"stream": request.stream,
|
"stream": request.stream,
|
||||||
**options,
|
**options,
|
||||||
}
|
}
|
||||||
|
|
||||||
async def embeddings(
|
|
||||||
self,
|
|
||||||
model_id: str,
|
|
||||||
contents: list[str] | list[InterleavedContentItem],
|
|
||||||
text_truncation: TextTruncation | None = TextTruncation.none,
|
|
||||||
output_dimension: int | None = None,
|
|
||||||
task_type: EmbeddingTaskType | None = None,
|
|
||||||
) -> EmbeddingsResponse:
|
|
||||||
model = await self._get_model(model_id)
|
|
||||||
|
|
||||||
kwargs = {}
|
|
||||||
assert model.model_type == ModelType.embedding
|
|
||||||
assert model.metadata.get("embedding_dimension")
|
|
||||||
kwargs["dimensions"] = model.metadata.get("embedding_dimension")
|
|
||||||
assert all(not content_has_media(content) for content in contents), "VLLM does not support media for embeddings"
|
|
||||||
response = await self.client.embeddings.create(
|
|
||||||
model=model.provider_resource_id,
|
|
||||||
input=[interleaved_content_as_str(content) for content in contents],
|
|
||||||
**kwargs,
|
|
||||||
)
|
|
||||||
|
|
||||||
embeddings = [data.embedding for data in response.data]
|
|
||||||
return EmbeddingsResponse(embeddings=embeddings)
|
|
||||||
|
|
|
@ -11,13 +11,11 @@ from ibm_watsonx_ai.foundation_models import Model
|
||||||
from ibm_watsonx_ai.metanames import GenTextParamsMetaNames as GenParams
|
from ibm_watsonx_ai.metanames import GenTextParamsMetaNames as GenParams
|
||||||
from openai import AsyncOpenAI
|
from openai import AsyncOpenAI
|
||||||
|
|
||||||
from llama_stack.apis.common.content_types import InterleavedContent, InterleavedContentItem
|
from llama_stack.apis.common.content_types import InterleavedContent
|
||||||
from llama_stack.apis.inference import (
|
from llama_stack.apis.inference import (
|
||||||
ChatCompletionRequest,
|
ChatCompletionRequest,
|
||||||
ChatCompletionResponse,
|
ChatCompletionResponse,
|
||||||
CompletionRequest,
|
CompletionRequest,
|
||||||
EmbeddingsResponse,
|
|
||||||
EmbeddingTaskType,
|
|
||||||
GreedySamplingStrategy,
|
GreedySamplingStrategy,
|
||||||
Inference,
|
Inference,
|
||||||
LogProbConfig,
|
LogProbConfig,
|
||||||
|
@ -30,7 +28,6 @@ from llama_stack.apis.inference import (
|
||||||
OpenAIResponseFormatParam,
|
OpenAIResponseFormatParam,
|
||||||
ResponseFormat,
|
ResponseFormat,
|
||||||
SamplingParams,
|
SamplingParams,
|
||||||
TextTruncation,
|
|
||||||
ToolChoice,
|
ToolChoice,
|
||||||
ToolConfig,
|
ToolConfig,
|
||||||
ToolDefinition,
|
ToolDefinition,
|
||||||
|
@ -265,16 +262,6 @@ class WatsonXInferenceAdapter(Inference, ModelRegistryHelper):
|
||||||
}
|
}
|
||||||
return params
|
return params
|
||||||
|
|
||||||
async def embeddings(
|
|
||||||
self,
|
|
||||||
model_id: str,
|
|
||||||
contents: list[str] | list[InterleavedContentItem],
|
|
||||||
text_truncation: TextTruncation | None = TextTruncation.none,
|
|
||||||
output_dimension: int | None = None,
|
|
||||||
task_type: EmbeddingTaskType | None = None,
|
|
||||||
) -> EmbeddingsResponse:
|
|
||||||
raise NotImplementedError("embedding is not supported for watsonx")
|
|
||||||
|
|
||||||
async def openai_embeddings(
|
async def openai_embeddings(
|
||||||
self,
|
self,
|
||||||
model: str,
|
model: str,
|
||||||
|
|
|
@ -15,16 +15,11 @@ if TYPE_CHECKING:
|
||||||
from sentence_transformers import SentenceTransformer
|
from sentence_transformers import SentenceTransformer
|
||||||
|
|
||||||
from llama_stack.apis.inference import (
|
from llama_stack.apis.inference import (
|
||||||
EmbeddingsResponse,
|
|
||||||
EmbeddingTaskType,
|
|
||||||
InterleavedContentItem,
|
|
||||||
ModelStore,
|
ModelStore,
|
||||||
OpenAIEmbeddingData,
|
OpenAIEmbeddingData,
|
||||||
OpenAIEmbeddingsResponse,
|
OpenAIEmbeddingsResponse,
|
||||||
OpenAIEmbeddingUsage,
|
OpenAIEmbeddingUsage,
|
||||||
TextTruncation,
|
|
||||||
)
|
)
|
||||||
from llama_stack.providers.utils.inference.prompt_adapter import interleaved_content_as_str
|
|
||||||
|
|
||||||
EMBEDDING_MODELS = {}
|
EMBEDDING_MODELS = {}
|
||||||
|
|
||||||
|
@ -35,23 +30,6 @@ log = get_logger(name=__name__, category="providers::utils")
|
||||||
class SentenceTransformerEmbeddingMixin:
|
class SentenceTransformerEmbeddingMixin:
|
||||||
model_store: ModelStore
|
model_store: ModelStore
|
||||||
|
|
||||||
async def embeddings(
|
|
||||||
self,
|
|
||||||
model_id: str,
|
|
||||||
contents: list[str] | list[InterleavedContentItem],
|
|
||||||
text_truncation: TextTruncation | None = TextTruncation.none,
|
|
||||||
output_dimension: int | None = None,
|
|
||||||
task_type: EmbeddingTaskType | None = None,
|
|
||||||
) -> EmbeddingsResponse:
|
|
||||||
model = await self.model_store.get_model(model_id)
|
|
||||||
embedding_model = await self._load_sentence_transformer_model(model.provider_resource_id)
|
|
||||||
embeddings = await asyncio.to_thread(
|
|
||||||
embedding_model.encode,
|
|
||||||
[interleaved_content_as_str(content) for content in contents],
|
|
||||||
show_progress_bar=False,
|
|
||||||
)
|
|
||||||
return EmbeddingsResponse(embeddings=embeddings)
|
|
||||||
|
|
||||||
async def openai_embeddings(
|
async def openai_embeddings(
|
||||||
self,
|
self,
|
||||||
model: str,
|
model: str,
|
||||||
|
|
|
@ -11,14 +11,11 @@ import litellm
|
||||||
|
|
||||||
from llama_stack.apis.common.content_types import (
|
from llama_stack.apis.common.content_types import (
|
||||||
InterleavedContent,
|
InterleavedContent,
|
||||||
InterleavedContentItem,
|
|
||||||
)
|
)
|
||||||
from llama_stack.apis.inference import (
|
from llama_stack.apis.inference import (
|
||||||
ChatCompletionRequest,
|
ChatCompletionRequest,
|
||||||
ChatCompletionResponse,
|
ChatCompletionResponse,
|
||||||
ChatCompletionResponseStreamChunk,
|
ChatCompletionResponseStreamChunk,
|
||||||
EmbeddingsResponse,
|
|
||||||
EmbeddingTaskType,
|
|
||||||
InferenceProvider,
|
InferenceProvider,
|
||||||
JsonSchemaResponseFormat,
|
JsonSchemaResponseFormat,
|
||||||
LogProbConfig,
|
LogProbConfig,
|
||||||
|
@ -32,7 +29,6 @@ from llama_stack.apis.inference import (
|
||||||
OpenAIResponseFormatParam,
|
OpenAIResponseFormatParam,
|
||||||
ResponseFormat,
|
ResponseFormat,
|
||||||
SamplingParams,
|
SamplingParams,
|
||||||
TextTruncation,
|
|
||||||
ToolChoice,
|
ToolChoice,
|
||||||
ToolConfig,
|
ToolConfig,
|
||||||
ToolDefinition,
|
ToolDefinition,
|
||||||
|
@ -50,9 +46,6 @@ from llama_stack.providers.utils.inference.openai_compat import (
|
||||||
get_sampling_options,
|
get_sampling_options,
|
||||||
prepare_openai_completion_params,
|
prepare_openai_completion_params,
|
||||||
)
|
)
|
||||||
from llama_stack.providers.utils.inference.prompt_adapter import (
|
|
||||||
interleaved_content_as_str,
|
|
||||||
)
|
|
||||||
|
|
||||||
logger = get_logger(name=__name__, category="providers::utils")
|
logger = get_logger(name=__name__, category="providers::utils")
|
||||||
|
|
||||||
|
@ -269,24 +262,6 @@ class LiteLLMOpenAIMixin(
|
||||||
)
|
)
|
||||||
return api_key
|
return api_key
|
||||||
|
|
||||||
async def embeddings(
|
|
||||||
self,
|
|
||||||
model_id: str,
|
|
||||||
contents: list[str] | list[InterleavedContentItem],
|
|
||||||
text_truncation: TextTruncation | None = TextTruncation.none,
|
|
||||||
output_dimension: int | None = None,
|
|
||||||
task_type: EmbeddingTaskType | None = None,
|
|
||||||
) -> EmbeddingsResponse:
|
|
||||||
model = await self.model_store.get_model(model_id)
|
|
||||||
|
|
||||||
response = litellm.embedding(
|
|
||||||
model=self.get_litellm_model_name(model.provider_resource_id),
|
|
||||||
input=[interleaved_content_as_str(content) for content in contents],
|
|
||||||
)
|
|
||||||
|
|
||||||
embeddings = [data["embedding"] for data in response["data"]]
|
|
||||||
return EmbeddingsResponse(embeddings=embeddings)
|
|
||||||
|
|
||||||
async def openai_embeddings(
|
async def openai_embeddings(
|
||||||
self,
|
self,
|
||||||
model: str,
|
model: str,
|
||||||
|
|
|
@ -3,6 +3,9 @@
|
||||||
#
|
#
|
||||||
# This source code is licensed under the terms described in the LICENSE file in
|
# This source code is licensed under the terms described in the LICENSE file in
|
||||||
# the root directory of this source tree.
|
# the root directory of this source tree.
|
||||||
|
import asyncio
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
from llama_stack.apis.agents import (
|
from llama_stack.apis.agents import (
|
||||||
Order,
|
Order,
|
||||||
)
|
)
|
||||||
|
@ -14,24 +17,51 @@ from llama_stack.apis.agents.openai_responses import (
|
||||||
OpenAIResponseObject,
|
OpenAIResponseObject,
|
||||||
OpenAIResponseObjectWithInput,
|
OpenAIResponseObjectWithInput,
|
||||||
)
|
)
|
||||||
from llama_stack.core.datatypes import AccessRule
|
from llama_stack.core.datatypes import AccessRule, ResponsesStoreConfig
|
||||||
from llama_stack.core.utils.config_dirs import RUNTIME_BASE_DIR
|
from llama_stack.core.utils.config_dirs import RUNTIME_BASE_DIR
|
||||||
|
from llama_stack.log import get_logger
|
||||||
|
|
||||||
from ..sqlstore.api import ColumnDefinition, ColumnType
|
from ..sqlstore.api import ColumnDefinition, ColumnType
|
||||||
from ..sqlstore.authorized_sqlstore import AuthorizedSqlStore
|
from ..sqlstore.authorized_sqlstore import AuthorizedSqlStore
|
||||||
from ..sqlstore.sqlstore import SqliteSqlStoreConfig, SqlStoreConfig, sqlstore_impl
|
from ..sqlstore.sqlstore import SqliteSqlStoreConfig, SqlStoreConfig, SqlStoreType, sqlstore_impl
|
||||||
|
|
||||||
|
logger = get_logger(name=__name__, category="responses_store")
|
||||||
|
|
||||||
|
|
||||||
class ResponsesStore:
|
class ResponsesStore:
|
||||||
def __init__(self, sql_store_config: SqlStoreConfig, policy: list[AccessRule]):
|
def __init__(
|
||||||
if not sql_store_config:
|
self,
|
||||||
sql_store_config = SqliteSqlStoreConfig(
|
config: ResponsesStoreConfig | SqlStoreConfig,
|
||||||
|
policy: list[AccessRule],
|
||||||
|
):
|
||||||
|
# Handle backward compatibility
|
||||||
|
if not isinstance(config, ResponsesStoreConfig):
|
||||||
|
# Legacy: SqlStoreConfig passed directly as config
|
||||||
|
config = ResponsesStoreConfig(
|
||||||
|
sql_store_config=config,
|
||||||
|
)
|
||||||
|
|
||||||
|
self.config = config
|
||||||
|
self.sql_store_config = config.sql_store_config
|
||||||
|
if not self.sql_store_config:
|
||||||
|
self.sql_store_config = SqliteSqlStoreConfig(
|
||||||
db_path=(RUNTIME_BASE_DIR / "sqlstore.db").as_posix(),
|
db_path=(RUNTIME_BASE_DIR / "sqlstore.db").as_posix(),
|
||||||
)
|
)
|
||||||
self.sql_store = AuthorizedSqlStore(sqlstore_impl(sql_store_config), policy)
|
self.sql_store = None
|
||||||
|
self.policy = policy
|
||||||
|
|
||||||
|
# Disable write queue for SQLite to avoid concurrency issues
|
||||||
|
self.enable_write_queue = self.sql_store_config.type != SqlStoreType.sqlite
|
||||||
|
|
||||||
|
# Async write queue and worker control
|
||||||
|
self._queue: asyncio.Queue[tuple[OpenAIResponseObject, list[OpenAIResponseInput]]] | None = None
|
||||||
|
self._worker_tasks: list[asyncio.Task[Any]] = []
|
||||||
|
self._max_write_queue_size: int = config.max_write_queue_size
|
||||||
|
self._num_writers: int = max(1, config.num_writers)
|
||||||
|
|
||||||
async def initialize(self):
|
async def initialize(self):
|
||||||
"""Create the necessary tables if they don't exist."""
|
"""Create the necessary tables if they don't exist."""
|
||||||
|
self.sql_store = AuthorizedSqlStore(sqlstore_impl(self.sql_store_config), self.policy)
|
||||||
await self.sql_store.create_table(
|
await self.sql_store.create_table(
|
||||||
"openai_responses",
|
"openai_responses",
|
||||||
{
|
{
|
||||||
|
@ -42,9 +72,68 @@ class ResponsesStore:
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if self.enable_write_queue:
|
||||||
|
self._queue = asyncio.Queue(maxsize=self._max_write_queue_size)
|
||||||
|
for _ in range(self._num_writers):
|
||||||
|
self._worker_tasks.append(asyncio.create_task(self._worker_loop()))
|
||||||
|
else:
|
||||||
|
logger.info("Write queue disabled for SQLite to avoid concurrency issues")
|
||||||
|
|
||||||
|
async def shutdown(self) -> None:
|
||||||
|
if not self._worker_tasks:
|
||||||
|
return
|
||||||
|
if self._queue is not None:
|
||||||
|
await self._queue.join()
|
||||||
|
for t in self._worker_tasks:
|
||||||
|
if not t.done():
|
||||||
|
t.cancel()
|
||||||
|
for t in self._worker_tasks:
|
||||||
|
try:
|
||||||
|
await t
|
||||||
|
except asyncio.CancelledError:
|
||||||
|
pass
|
||||||
|
self._worker_tasks.clear()
|
||||||
|
|
||||||
|
async def flush(self) -> None:
|
||||||
|
"""Wait for all queued writes to complete. Useful for testing."""
|
||||||
|
if self.enable_write_queue and self._queue is not None:
|
||||||
|
await self._queue.join()
|
||||||
|
|
||||||
async def store_response_object(
|
async def store_response_object(
|
||||||
self, response_object: OpenAIResponseObject, input: list[OpenAIResponseInput]
|
self, response_object: OpenAIResponseObject, input: list[OpenAIResponseInput]
|
||||||
) -> None:
|
) -> None:
|
||||||
|
if self.enable_write_queue:
|
||||||
|
if self._queue is None:
|
||||||
|
raise ValueError("Responses store is not initialized")
|
||||||
|
try:
|
||||||
|
self._queue.put_nowait((response_object, input))
|
||||||
|
except asyncio.QueueFull:
|
||||||
|
logger.warning(f"Write queue full; adding response id={getattr(response_object, 'id', '<unknown>')}")
|
||||||
|
await self._queue.put((response_object, input))
|
||||||
|
else:
|
||||||
|
await self._write_response_object(response_object, input)
|
||||||
|
|
||||||
|
async def _worker_loop(self) -> None:
|
||||||
|
assert self._queue is not None
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
item = await self._queue.get()
|
||||||
|
except asyncio.CancelledError:
|
||||||
|
break
|
||||||
|
response_object, input = item
|
||||||
|
try:
|
||||||
|
await self._write_response_object(response_object, input)
|
||||||
|
except Exception as e: # noqa: BLE001
|
||||||
|
logger.error(f"Error writing response object: {e}")
|
||||||
|
finally:
|
||||||
|
self._queue.task_done()
|
||||||
|
|
||||||
|
async def _write_response_object(
|
||||||
|
self, response_object: OpenAIResponseObject, input: list[OpenAIResponseInput]
|
||||||
|
) -> None:
|
||||||
|
if self.sql_store is None:
|
||||||
|
raise ValueError("Responses store is not initialized")
|
||||||
|
|
||||||
data = response_object.model_dump()
|
data = response_object.model_dump()
|
||||||
data["input"] = [input_item.model_dump() for input_item in input]
|
data["input"] = [input_item.model_dump() for input_item in input]
|
||||||
|
|
||||||
|
@ -73,6 +162,9 @@ class ResponsesStore:
|
||||||
:param model: The model to filter by.
|
:param model: The model to filter by.
|
||||||
:param order: The order to sort the responses by.
|
:param order: The order to sort the responses by.
|
||||||
"""
|
"""
|
||||||
|
if not self.sql_store:
|
||||||
|
raise ValueError("Responses store is not initialized")
|
||||||
|
|
||||||
if not order:
|
if not order:
|
||||||
order = Order.desc
|
order = Order.desc
|
||||||
|
|
||||||
|
@ -100,6 +192,9 @@ class ResponsesStore:
|
||||||
"""
|
"""
|
||||||
Get a response object with automatic access control checking.
|
Get a response object with automatic access control checking.
|
||||||
"""
|
"""
|
||||||
|
if not self.sql_store:
|
||||||
|
raise ValueError("Responses store is not initialized")
|
||||||
|
|
||||||
row = await self.sql_store.fetch_one(
|
row = await self.sql_store.fetch_one(
|
||||||
"openai_responses",
|
"openai_responses",
|
||||||
where={"id": response_id},
|
where={"id": response_id},
|
||||||
|
@ -113,6 +208,9 @@ class ResponsesStore:
|
||||||
return OpenAIResponseObjectWithInput(**row["response_object"])
|
return OpenAIResponseObjectWithInput(**row["response_object"])
|
||||||
|
|
||||||
async def delete_response_object(self, response_id: str) -> OpenAIDeleteResponseObject:
|
async def delete_response_object(self, response_id: str) -> OpenAIDeleteResponseObject:
|
||||||
|
if not self.sql_store:
|
||||||
|
raise ValueError("Responses store is not initialized")
|
||||||
|
|
||||||
row = await self.sql_store.fetch_one("openai_responses", where={"id": response_id})
|
row = await self.sql_store.fetch_one("openai_responses", where={"id": response_id})
|
||||||
if not row:
|
if not row:
|
||||||
raise ValueError(f"Response with id {response_id} not found")
|
raise ValueError(f"Response with id {response_id} not found")
|
||||||
|
|
|
@ -22,7 +22,6 @@ class WebMethod:
|
||||||
raw_bytes_request_body: bool | None = False
|
raw_bytes_request_body: bool | None = False
|
||||||
# A descriptive name of the corresponding span created by tracing
|
# A descriptive name of the corresponding span created by tracing
|
||||||
descriptive_name: str | None = None
|
descriptive_name: str | None = None
|
||||||
experimental: bool | None = False
|
|
||||||
required_scope: str | None = None
|
required_scope: str | None = None
|
||||||
deprecated: bool | None = False
|
deprecated: bool | None = False
|
||||||
|
|
||||||
|
@ -39,7 +38,6 @@ def webmethod(
|
||||||
response_examples: list[Any] | None = None,
|
response_examples: list[Any] | None = None,
|
||||||
raw_bytes_request_body: bool | None = False,
|
raw_bytes_request_body: bool | None = False,
|
||||||
descriptive_name: str | None = None,
|
descriptive_name: str | None = None,
|
||||||
experimental: bool | None = False,
|
|
||||||
required_scope: str | None = None,
|
required_scope: str | None = None,
|
||||||
deprecated: bool | None = False,
|
deprecated: bool | None = False,
|
||||||
) -> Callable[[T], T]:
|
) -> Callable[[T], T]:
|
||||||
|
@ -50,7 +48,6 @@ def webmethod(
|
||||||
:param public: True if the operation can be invoked without prior authentication.
|
:param public: True if the operation can be invoked without prior authentication.
|
||||||
:param request_examples: Sample requests that the operation might take. Pass a list of objects, not JSON.
|
:param request_examples: Sample requests that the operation might take. Pass a list of objects, not JSON.
|
||||||
:param response_examples: Sample responses that the operation might produce. Pass a list of objects, not JSON.
|
:param response_examples: Sample responses that the operation might produce. Pass a list of objects, not JSON.
|
||||||
:param experimental: True if the operation is experimental and subject to change.
|
|
||||||
:param required_scope: Required scope for this endpoint (e.g., 'monitoring.viewer').
|
:param required_scope: Required scope for this endpoint (e.g., 'monitoring.viewer').
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@ -64,7 +61,6 @@ def webmethod(
|
||||||
response_examples=response_examples,
|
response_examples=response_examples,
|
||||||
raw_bytes_request_body=raw_bytes_request_body,
|
raw_bytes_request_body=raw_bytes_request_body,
|
||||||
descriptive_name=descriptive_name,
|
descriptive_name=descriptive_name,
|
||||||
experimental=experimental,
|
|
||||||
required_scope=required_scope,
|
required_scope=required_scope,
|
||||||
deprecated=deprecated,
|
deprecated=deprecated,
|
||||||
)
|
)
|
||||||
|
|
127
llama_stack/ui/package-lock.json
generated
127
llama_stack/ui/package-lock.json
generated
|
@ -28,7 +28,7 @@
|
||||||
"react-markdown": "^10.1.0",
|
"react-markdown": "^10.1.0",
|
||||||
"remark-gfm": "^4.0.1",
|
"remark-gfm": "^4.0.1",
|
||||||
"remeda": "^2.32.0",
|
"remeda": "^2.32.0",
|
||||||
"shiki": "^1.29.2",
|
"shiki": "^3.13.0",
|
||||||
"sonner": "^2.0.7",
|
"sonner": "^2.0.7",
|
||||||
"tailwind-merge": "^3.3.1"
|
"tailwind-merge": "^3.3.1"
|
||||||
},
|
},
|
||||||
|
@ -51,7 +51,7 @@
|
||||||
"prettier": "3.6.2",
|
"prettier": "3.6.2",
|
||||||
"tailwindcss": "^4",
|
"tailwindcss": "^4",
|
||||||
"ts-node": "^10.9.2",
|
"ts-node": "^10.9.2",
|
||||||
"tw-animate-css": "^1.2.9",
|
"tw-animate-css": "^1.4.0",
|
||||||
"typescript": "^5"
|
"typescript": "^5"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -3250,65 +3250,63 @@
|
||||||
"license": "MIT"
|
"license": "MIT"
|
||||||
},
|
},
|
||||||
"node_modules/@shikijs/core": {
|
"node_modules/@shikijs/core": {
|
||||||
"version": "1.29.2",
|
"version": "3.13.0",
|
||||||
"resolved": "https://registry.npmjs.org/@shikijs/core/-/core-1.29.2.tgz",
|
"resolved": "https://registry.npmjs.org/@shikijs/core/-/core-3.13.0.tgz",
|
||||||
"integrity": "sha512-vju0lY9r27jJfOY4Z7+Rt/nIOjzJpZ3y+nYpqtUZInVoXQ/TJZcfGnNOGnKjFdVZb8qexiCuSlZRKcGfhhTTZQ==",
|
"integrity": "sha512-3P8rGsg2Eh2qIHekwuQjzWhKI4jV97PhvYjYUzGqjvJfqdQPz+nMlfWahU24GZAyW1FxFI1sYjyhfh5CoLmIUA==",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@shikijs/engine-javascript": "1.29.2",
|
"@shikijs/types": "3.13.0",
|
||||||
"@shikijs/engine-oniguruma": "1.29.2",
|
"@shikijs/vscode-textmate": "^10.0.2",
|
||||||
"@shikijs/types": "1.29.2",
|
|
||||||
"@shikijs/vscode-textmate": "^10.0.1",
|
|
||||||
"@types/hast": "^3.0.4",
|
"@types/hast": "^3.0.4",
|
||||||
"hast-util-to-html": "^9.0.4"
|
"hast-util-to-html": "^9.0.5"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@shikijs/engine-javascript": {
|
"node_modules/@shikijs/engine-javascript": {
|
||||||
"version": "1.29.2",
|
"version": "3.13.0",
|
||||||
"resolved": "https://registry.npmjs.org/@shikijs/engine-javascript/-/engine-javascript-1.29.2.tgz",
|
"resolved": "https://registry.npmjs.org/@shikijs/engine-javascript/-/engine-javascript-3.13.0.tgz",
|
||||||
"integrity": "sha512-iNEZv4IrLYPv64Q6k7EPpOCE/nuvGiKl7zxdq0WFuRPF5PAE9PRo2JGq/d8crLusM59BRemJ4eOqrFrC4wiQ+A==",
|
"integrity": "sha512-Ty7xv32XCp8u0eQt8rItpMs6rU9Ki6LJ1dQOW3V/56PKDcpvfHPnYFbsx5FFUP2Yim34m/UkazidamMNVR4vKg==",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@shikijs/types": "1.29.2",
|
"@shikijs/types": "3.13.0",
|
||||||
"@shikijs/vscode-textmate": "^10.0.1",
|
"@shikijs/vscode-textmate": "^10.0.2",
|
||||||
"oniguruma-to-es": "^2.2.0"
|
"oniguruma-to-es": "^4.3.3"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@shikijs/engine-oniguruma": {
|
"node_modules/@shikijs/engine-oniguruma": {
|
||||||
"version": "1.29.2",
|
"version": "3.13.0",
|
||||||
"resolved": "https://registry.npmjs.org/@shikijs/engine-oniguruma/-/engine-oniguruma-1.29.2.tgz",
|
"resolved": "https://registry.npmjs.org/@shikijs/engine-oniguruma/-/engine-oniguruma-3.13.0.tgz",
|
||||||
"integrity": "sha512-7iiOx3SG8+g1MnlzZVDYiaeHe7Ez2Kf2HrJzdmGwkRisT7r4rak0e655AcM/tF9JG/kg5fMNYlLLKglbN7gBqA==",
|
"integrity": "sha512-O42rBGr4UDSlhT2ZFMxqM7QzIU+IcpoTMzb3W7AlziI1ZF7R8eS2M0yt5Ry35nnnTX/LTLXFPUjRFCIW+Operg==",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@shikijs/types": "1.29.2",
|
"@shikijs/types": "3.13.0",
|
||||||
"@shikijs/vscode-textmate": "^10.0.1"
|
"@shikijs/vscode-textmate": "^10.0.2"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@shikijs/langs": {
|
"node_modules/@shikijs/langs": {
|
||||||
"version": "1.29.2",
|
"version": "3.13.0",
|
||||||
"resolved": "https://registry.npmjs.org/@shikijs/langs/-/langs-1.29.2.tgz",
|
"resolved": "https://registry.npmjs.org/@shikijs/langs/-/langs-3.13.0.tgz",
|
||||||
"integrity": "sha512-FIBA7N3LZ+223U7cJDUYd5shmciFQlYkFXlkKVaHsCPgfVLiO+e12FmQE6Tf9vuyEsFe3dIl8qGWKXgEHL9wmQ==",
|
"integrity": "sha512-672c3WAETDYHwrRP0yLy3W1QYB89Hbpj+pO4KhxK6FzIrDI2FoEXNiNCut6BQmEApYLfuYfpgOZaqbY+E9b8wQ==",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@shikijs/types": "1.29.2"
|
"@shikijs/types": "3.13.0"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@shikijs/themes": {
|
"node_modules/@shikijs/themes": {
|
||||||
"version": "1.29.2",
|
"version": "3.13.0",
|
||||||
"resolved": "https://registry.npmjs.org/@shikijs/themes/-/themes-1.29.2.tgz",
|
"resolved": "https://registry.npmjs.org/@shikijs/themes/-/themes-3.13.0.tgz",
|
||||||
"integrity": "sha512-i9TNZlsq4uoyqSbluIcZkmPL9Bfi3djVxRnofUHwvx/h6SRW3cwgBC5SML7vsDcWyukY0eCzVN980rqP6qNl9g==",
|
"integrity": "sha512-Vxw1Nm1/Od8jyA7QuAenaV78BG2nSr3/gCGdBkLpfLscddCkzkL36Q5b67SrLLfvAJTOUzW39x4FHVCFriPVgg==",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@shikijs/types": "1.29.2"
|
"@shikijs/types": "3.13.0"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@shikijs/types": {
|
"node_modules/@shikijs/types": {
|
||||||
"version": "1.29.2",
|
"version": "3.13.0",
|
||||||
"resolved": "https://registry.npmjs.org/@shikijs/types/-/types-1.29.2.tgz",
|
"resolved": "https://registry.npmjs.org/@shikijs/types/-/types-3.13.0.tgz",
|
||||||
"integrity": "sha512-VJjK0eIijTZf0QSTODEXCqinjBn0joAHQ+aPSBzrv4O2d/QSbsMw+ZeSRx03kV34Hy7NzUvV/7NqfYGRLrASmw==",
|
"integrity": "sha512-oM9P+NCFri/mmQ8LoFGVfVyemm5Hi27330zuOBp0annwJdKH1kOLndw3zCtAVDehPLg9fKqoEx3Ht/wNZxolfw==",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@shikijs/vscode-textmate": "^10.0.1",
|
"@shikijs/vscode-textmate": "^10.0.2",
|
||||||
"@types/hast": "^3.0.4"
|
"@types/hast": "^3.0.4"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -6084,12 +6082,6 @@
|
||||||
"dev": true,
|
"dev": true,
|
||||||
"license": "MIT"
|
"license": "MIT"
|
||||||
},
|
},
|
||||||
"node_modules/emoji-regex-xs": {
|
|
||||||
"version": "1.0.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/emoji-regex-xs/-/emoji-regex-xs-1.0.0.tgz",
|
|
||||||
"integrity": "sha512-LRlerrMYoIDrT6jgpeZ2YYl/L8EulRTt5hQcYjy5AInh7HWXKimpqx68aknBFpGL2+/IcogTcaydJEgaTmOpDg==",
|
|
||||||
"license": "MIT"
|
|
||||||
},
|
|
||||||
"node_modules/encodeurl": {
|
"node_modules/encodeurl": {
|
||||||
"version": "2.0.0",
|
"version": "2.0.0",
|
||||||
"resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz",
|
"resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz",
|
||||||
|
@ -11813,15 +11805,21 @@
|
||||||
"url": "https://github.com/sponsors/sindresorhus"
|
"url": "https://github.com/sponsors/sindresorhus"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/oniguruma-parser": {
|
||||||
|
"version": "0.12.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/oniguruma-parser/-/oniguruma-parser-0.12.1.tgz",
|
||||||
|
"integrity": "sha512-8Unqkvk1RYc6yq2WBYRj4hdnsAxVze8i7iPfQr8e4uSP3tRv0rpZcbGUDvxfQQcdwHt/e9PrMvGCsa8OqG9X3w==",
|
||||||
|
"license": "MIT"
|
||||||
|
},
|
||||||
"node_modules/oniguruma-to-es": {
|
"node_modules/oniguruma-to-es": {
|
||||||
"version": "2.3.0",
|
"version": "4.3.3",
|
||||||
"resolved": "https://registry.npmjs.org/oniguruma-to-es/-/oniguruma-to-es-2.3.0.tgz",
|
"resolved": "https://registry.npmjs.org/oniguruma-to-es/-/oniguruma-to-es-4.3.3.tgz",
|
||||||
"integrity": "sha512-bwALDxriqfKGfUufKGGepCzu9x7nJQuoRoAFp4AnwehhC2crqrDIAP/uN2qdlsAvSMpeRC3+Yzhqc7hLmle5+g==",
|
"integrity": "sha512-rPiZhzC3wXwE59YQMRDodUwwT9FZ9nNBwQQfsd1wfdtlKEyCdRV0avrTcSZ5xlIvGRVPd/cx6ZN45ECmS39xvg==",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"emoji-regex-xs": "^1.0.0",
|
"oniguruma-parser": "^0.12.1",
|
||||||
"regex": "^5.1.1",
|
"regex": "^6.0.1",
|
||||||
"regex-recursion": "^5.1.1"
|
"regex-recursion": "^6.0.2"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/openid-client": {
|
"node_modules/openid-client": {
|
||||||
|
@ -12613,21 +12611,20 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/regex": {
|
"node_modules/regex": {
|
||||||
"version": "5.1.1",
|
"version": "6.0.1",
|
||||||
"resolved": "https://registry.npmjs.org/regex/-/regex-5.1.1.tgz",
|
"resolved": "https://registry.npmjs.org/regex/-/regex-6.0.1.tgz",
|
||||||
"integrity": "sha512-dN5I359AVGPnwzJm2jN1k0W9LPZ+ePvoOeVMMfqIMFz53sSwXkxaJoxr50ptnsC771lK95BnTrVSZxq0b9yCGw==",
|
"integrity": "sha512-uorlqlzAKjKQZ5P+kTJr3eeJGSVroLKoHmquUj4zHWuR+hEyNqlXsSKlYYF5F4NI6nl7tWCs0apKJ0lmfsXAPA==",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"regex-utilities": "^2.3.0"
|
"regex-utilities": "^2.3.0"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/regex-recursion": {
|
"node_modules/regex-recursion": {
|
||||||
"version": "5.1.1",
|
"version": "6.0.2",
|
||||||
"resolved": "https://registry.npmjs.org/regex-recursion/-/regex-recursion-5.1.1.tgz",
|
"resolved": "https://registry.npmjs.org/regex-recursion/-/regex-recursion-6.0.2.tgz",
|
||||||
"integrity": "sha512-ae7SBCbzVNrIjgSbh7wMznPcQel1DNlDtzensnFxpiNpXt1U2ju/bHugH422r+4LAVS1FpW1YCwilmnNsjum9w==",
|
"integrity": "sha512-0YCaSCq2VRIebiaUviZNs0cBz1kg5kVS2UKUfNIx8YVs1cN3AV7NTctO5FOKBA+UT2BPJIWZauYHPqJODG50cg==",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"regex": "^5.1.1",
|
|
||||||
"regex-utilities": "^2.3.0"
|
"regex-utilities": "^2.3.0"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -13165,18 +13162,18 @@
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/shiki": {
|
"node_modules/shiki": {
|
||||||
"version": "1.29.2",
|
"version": "3.13.0",
|
||||||
"resolved": "https://registry.npmjs.org/shiki/-/shiki-1.29.2.tgz",
|
"resolved": "https://registry.npmjs.org/shiki/-/shiki-3.13.0.tgz",
|
||||||
"integrity": "sha512-njXuliz/cP+67jU2hukkxCNuH1yUi4QfdZZY+sMr5PPrIyXSu5iTb/qYC4BiWWB0vZ+7TbdvYUCeL23zpwCfbg==",
|
"integrity": "sha512-aZW4l8Og16CokuCLf8CF8kq+KK2yOygapU5m3+hoGw0Mdosc6fPitjM+ujYarppj5ZIKGyPDPP1vqmQhr+5/0g==",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@shikijs/core": "1.29.2",
|
"@shikijs/core": "3.13.0",
|
||||||
"@shikijs/engine-javascript": "1.29.2",
|
"@shikijs/engine-javascript": "3.13.0",
|
||||||
"@shikijs/engine-oniguruma": "1.29.2",
|
"@shikijs/engine-oniguruma": "3.13.0",
|
||||||
"@shikijs/langs": "1.29.2",
|
"@shikijs/langs": "3.13.0",
|
||||||
"@shikijs/themes": "1.29.2",
|
"@shikijs/themes": "3.13.0",
|
||||||
"@shikijs/types": "1.29.2",
|
"@shikijs/types": "3.13.0",
|
||||||
"@shikijs/vscode-textmate": "^10.0.1",
|
"@shikijs/vscode-textmate": "^10.0.2",
|
||||||
"@types/hast": "^3.0.4"
|
"@types/hast": "^3.0.4"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -13970,9 +13967,9 @@
|
||||||
"license": "0BSD"
|
"license": "0BSD"
|
||||||
},
|
},
|
||||||
"node_modules/tw-animate-css": {
|
"node_modules/tw-animate-css": {
|
||||||
"version": "1.2.9",
|
"version": "1.4.0",
|
||||||
"resolved": "https://registry.npmjs.org/tw-animate-css/-/tw-animate-css-1.2.9.tgz",
|
"resolved": "https://registry.npmjs.org/tw-animate-css/-/tw-animate-css-1.4.0.tgz",
|
||||||
"integrity": "sha512-9O4k1at9pMQff9EAcCEuy1UNO43JmaPQvq+0lwza9Y0BQ6LB38NiMj+qHqjoQf40355MX+gs6wtlR6H9WsSXFg==",
|
"integrity": "sha512-7bziOlRqH0hJx80h/3mbicLW7o8qLsH5+RaLR2t+OHM3D0JlWGODQKQ4cxbK7WlvmUxpcj6Kgu6EKqjrGFe3QQ==",
|
||||||
"dev": true,
|
"dev": true,
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"funding": {
|
"funding": {
|
||||||
|
|
|
@ -33,7 +33,7 @@
|
||||||
"react-markdown": "^10.1.0",
|
"react-markdown": "^10.1.0",
|
||||||
"remark-gfm": "^4.0.1",
|
"remark-gfm": "^4.0.1",
|
||||||
"remeda": "^2.32.0",
|
"remeda": "^2.32.0",
|
||||||
"shiki": "^1.29.2",
|
"shiki": "^3.13.0",
|
||||||
"sonner": "^2.0.7",
|
"sonner": "^2.0.7",
|
||||||
"tailwind-merge": "^3.3.1"
|
"tailwind-merge": "^3.3.1"
|
||||||
},
|
},
|
||||||
|
@ -56,7 +56,7 @@
|
||||||
"prettier": "3.6.2",
|
"prettier": "3.6.2",
|
||||||
"tailwindcss": "^4",
|
"tailwindcss": "^4",
|
||||||
"ts-node": "^10.9.2",
|
"ts-node": "^10.9.2",
|
||||||
"tw-animate-css": "^1.2.9",
|
"tw-animate-css": "^1.4.0",
|
||||||
"typescript": "^5"
|
"typescript": "^5"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,6 +14,13 @@ from . import skip_in_github_actions
|
||||||
# LLAMA_STACK_CONFIG="nvidia" pytest -v tests/integration/providers/nvidia/test_datastore.py
|
# LLAMA_STACK_CONFIG="nvidia" pytest -v tests/integration/providers/nvidia/test_datastore.py
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(autouse=True)
|
||||||
|
def skip_if_no_nvidia_provider(llama_stack_client):
|
||||||
|
provider_types = {p.provider_type for p in llama_stack_client.providers.list() if p.api == "datasetio"}
|
||||||
|
if "remote::nvidia" not in provider_types:
|
||||||
|
pytest.skip("datasetio=remote::nvidia provider not configured, skipping")
|
||||||
|
|
||||||
|
|
||||||
# nvidia provider only
|
# nvidia provider only
|
||||||
@skip_in_github_actions
|
@skip_in_github_actions
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
|
|
|
@ -107,14 +107,34 @@ async def test_get_raw_document_text_deprecated_text_yaml_with_text_content_item
|
||||||
assert "text/yaml" in str(w[0].message)
|
assert "text/yaml" in str(w[0].message)
|
||||||
|
|
||||||
|
|
||||||
|
async def test_get_raw_document_text_supports_json_mime_type():
|
||||||
|
"""Test that the function accepts application/json mime type."""
|
||||||
|
json_content = '{"name": "test", "version": "1.0", "items": ["item1", "item2"]}'
|
||||||
|
|
||||||
|
document = Document(content=json_content, mime_type="application/json")
|
||||||
|
|
||||||
|
result = await get_raw_document_text(document)
|
||||||
|
assert result == json_content
|
||||||
|
|
||||||
|
|
||||||
|
async def test_get_raw_document_text_with_json_text_content_item():
|
||||||
|
"""Test that the function handles JSON TextContentItem correctly."""
|
||||||
|
json_content = '{"key": "value", "nested": {"array": [1, 2, 3]}}'
|
||||||
|
|
||||||
|
document = Document(content=TextContentItem(text=json_content), mime_type="application/json")
|
||||||
|
|
||||||
|
result = await get_raw_document_text(document)
|
||||||
|
assert result == json_content
|
||||||
|
|
||||||
|
|
||||||
async def test_get_raw_document_text_rejects_unsupported_mime_types():
|
async def test_get_raw_document_text_rejects_unsupported_mime_types():
|
||||||
"""Test that the function rejects unsupported mime types."""
|
"""Test that the function rejects unsupported mime types."""
|
||||||
document = Document(
|
document = Document(
|
||||||
content="Some content",
|
content="Some content",
|
||||||
mime_type="application/json", # Not supported
|
mime_type="application/pdf", # Not supported
|
||||||
)
|
)
|
||||||
|
|
||||||
with pytest.raises(ValueError, match="Unexpected document mime type: application/json"):
|
with pytest.raises(ValueError, match="Unexpected document mime type: application/pdf"):
|
||||||
await get_raw_document_text(document)
|
await get_raw_document_text(document)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -42,10 +42,12 @@ from llama_stack.apis.inference import (
|
||||||
)
|
)
|
||||||
from llama_stack.apis.tools.tools import Tool, ToolGroups, ToolInvocationResult, ToolParameter, ToolRuntime
|
from llama_stack.apis.tools.tools import Tool, ToolGroups, ToolInvocationResult, ToolParameter, ToolRuntime
|
||||||
from llama_stack.core.access_control.access_control import default_policy
|
from llama_stack.core.access_control.access_control import default_policy
|
||||||
|
from llama_stack.core.datatypes import ResponsesStoreConfig
|
||||||
from llama_stack.providers.inline.agents.meta_reference.responses.openai_responses import (
|
from llama_stack.providers.inline.agents.meta_reference.responses.openai_responses import (
|
||||||
OpenAIResponsesImpl,
|
OpenAIResponsesImpl,
|
||||||
)
|
)
|
||||||
from llama_stack.providers.utils.responses.responses_store import ResponsesStore
|
from llama_stack.providers.utils.responses.responses_store import ResponsesStore
|
||||||
|
from llama_stack.providers.utils.sqlstore.sqlstore import SqliteSqlStoreConfig
|
||||||
from tests.unit.providers.agents.meta_reference.fixtures import load_chat_completion_fixture
|
from tests.unit.providers.agents.meta_reference.fixtures import load_chat_completion_fixture
|
||||||
|
|
||||||
|
|
||||||
|
@ -677,7 +679,9 @@ async def test_responses_store_list_input_items_logic():
|
||||||
|
|
||||||
# Create mock store and response store
|
# Create mock store and response store
|
||||||
mock_sql_store = AsyncMock()
|
mock_sql_store = AsyncMock()
|
||||||
responses_store = ResponsesStore(sql_store_config=None, policy=default_policy())
|
responses_store = ResponsesStore(
|
||||||
|
ResponsesStoreConfig(sql_store_config=SqliteSqlStoreConfig(db_path="mock_db_path")), policy=default_policy()
|
||||||
|
)
|
||||||
responses_store.sql_store = mock_sql_store
|
responses_store.sql_store = mock_sql_store
|
||||||
|
|
||||||
# Setup test data - multiple input items
|
# Setup test data - multiple input items
|
||||||
|
|
|
@ -5,13 +5,12 @@
|
||||||
# the root directory of this source tree.
|
# the root directory of this source tree.
|
||||||
|
|
||||||
import asyncio
|
import asyncio
|
||||||
from unittest.mock import AsyncMock, MagicMock, patch
|
from unittest.mock import MagicMock, patch
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from llama_stack.apis.files import Files
|
from llama_stack.apis.files import Files
|
||||||
from llama_stack.apis.inference import EmbeddingsResponse, Inference
|
|
||||||
from llama_stack.apis.vector_dbs import VectorDB
|
from llama_stack.apis.vector_dbs import VectorDB
|
||||||
from llama_stack.apis.vector_io import Chunk, QueryChunksResponse
|
from llama_stack.apis.vector_io import Chunk, QueryChunksResponse
|
||||||
from llama_stack.providers.datatypes import HealthStatus
|
from llama_stack.providers.datatypes import HealthStatus
|
||||||
|
@ -70,13 +69,6 @@ def mock_vector_db(vector_db_id, embedding_dimension) -> MagicMock:
|
||||||
return mock_vector_db
|
return mock_vector_db
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def mock_inference_api(sample_embeddings):
|
|
||||||
mock_api = MagicMock(spec=Inference)
|
|
||||||
mock_api.embeddings = AsyncMock(return_value=EmbeddingsResponse(embeddings=sample_embeddings))
|
|
||||||
return mock_api
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def mock_files_api():
|
def mock_files_api():
|
||||||
mock_api = MagicMock(spec=Files)
|
mock_api = MagicMock(spec=Files)
|
||||||
|
@ -96,22 +88,6 @@ async def faiss_index(embedding_dimension):
|
||||||
yield index
|
yield index
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
async def faiss_adapter(faiss_config, mock_inference_api, mock_files_api) -> FaissVectorIOAdapter:
|
|
||||||
# Create the adapter
|
|
||||||
adapter = FaissVectorIOAdapter(config=faiss_config, inference_api=mock_inference_api, files_api=mock_files_api)
|
|
||||||
|
|
||||||
# Create a mock KVStore
|
|
||||||
mock_kvstore = MagicMock()
|
|
||||||
mock_kvstore.values_in_range = AsyncMock(return_value=[])
|
|
||||||
|
|
||||||
# Patch the initialize method to avoid the kvstore_impl call
|
|
||||||
with patch.object(FaissVectorIOAdapter, "initialize"):
|
|
||||||
# Set the kvstore directly
|
|
||||||
adapter.kvstore = mock_kvstore
|
|
||||||
yield adapter
|
|
||||||
|
|
||||||
|
|
||||||
async def test_faiss_query_vector_returns_infinity_when_query_and_embedding_are_identical(
|
async def test_faiss_query_vector_returns_infinity_when_query_and_embedding_are_identical(
|
||||||
faiss_index, sample_chunks, sample_embeddings, embedding_dimension
|
faiss_index, sample_chunks, sample_embeddings, embedding_dimension
|
||||||
):
|
):
|
||||||
|
|
|
@ -67,6 +67,9 @@ async def test_responses_store_pagination_basic():
|
||||||
input_list = [create_test_response_input(f"Input for {response_id}", f"input-{response_id}")]
|
input_list = [create_test_response_input(f"Input for {response_id}", f"input-{response_id}")]
|
||||||
await store.store_response_object(response, input_list)
|
await store.store_response_object(response, input_list)
|
||||||
|
|
||||||
|
# Wait for all queued writes to complete
|
||||||
|
await store.flush()
|
||||||
|
|
||||||
# Test 1: First page with limit=2, descending order (default)
|
# Test 1: First page with limit=2, descending order (default)
|
||||||
result = await store.list_responses(limit=2, order=Order.desc)
|
result = await store.list_responses(limit=2, order=Order.desc)
|
||||||
assert len(result.data) == 2
|
assert len(result.data) == 2
|
||||||
|
@ -110,6 +113,9 @@ async def test_responses_store_pagination_ascending():
|
||||||
input_list = [create_test_response_input(f"Input for {response_id}", f"input-{response_id}")]
|
input_list = [create_test_response_input(f"Input for {response_id}", f"input-{response_id}")]
|
||||||
await store.store_response_object(response, input_list)
|
await store.store_response_object(response, input_list)
|
||||||
|
|
||||||
|
# Wait for all queued writes to complete
|
||||||
|
await store.flush()
|
||||||
|
|
||||||
# Test ascending order pagination
|
# Test ascending order pagination
|
||||||
result = await store.list_responses(limit=1, order=Order.asc)
|
result = await store.list_responses(limit=1, order=Order.asc)
|
||||||
assert len(result.data) == 1
|
assert len(result.data) == 1
|
||||||
|
@ -145,6 +151,9 @@ async def test_responses_store_pagination_with_model_filter():
|
||||||
input_list = [create_test_response_input(f"Input for {response_id}", f"input-{response_id}")]
|
input_list = [create_test_response_input(f"Input for {response_id}", f"input-{response_id}")]
|
||||||
await store.store_response_object(response, input_list)
|
await store.store_response_object(response, input_list)
|
||||||
|
|
||||||
|
# Wait for all queued writes to complete
|
||||||
|
await store.flush()
|
||||||
|
|
||||||
# Test pagination with model filter
|
# Test pagination with model filter
|
||||||
result = await store.list_responses(limit=1, model="model-a", order=Order.desc)
|
result = await store.list_responses(limit=1, model="model-a", order=Order.desc)
|
||||||
assert len(result.data) == 1
|
assert len(result.data) == 1
|
||||||
|
@ -192,6 +201,9 @@ async def test_responses_store_pagination_no_limit():
|
||||||
input_list = [create_test_response_input(f"Input for {response_id}", f"input-{response_id}")]
|
input_list = [create_test_response_input(f"Input for {response_id}", f"input-{response_id}")]
|
||||||
await store.store_response_object(response, input_list)
|
await store.store_response_object(response, input_list)
|
||||||
|
|
||||||
|
# Wait for all queued writes to complete
|
||||||
|
await store.flush()
|
||||||
|
|
||||||
# Test without limit (should use default of 50)
|
# Test without limit (should use default of 50)
|
||||||
result = await store.list_responses(order=Order.desc)
|
result = await store.list_responses(order=Order.desc)
|
||||||
assert len(result.data) == 2
|
assert len(result.data) == 2
|
||||||
|
@ -212,6 +224,9 @@ async def test_responses_store_get_response_object():
|
||||||
input_list = [create_test_response_input("Test input content", "input-test-resp")]
|
input_list = [create_test_response_input("Test input content", "input-test-resp")]
|
||||||
await store.store_response_object(response, input_list)
|
await store.store_response_object(response, input_list)
|
||||||
|
|
||||||
|
# Wait for all queued writes to complete
|
||||||
|
await store.flush()
|
||||||
|
|
||||||
# Retrieve the response
|
# Retrieve the response
|
||||||
retrieved = await store.get_response_object("test-resp")
|
retrieved = await store.get_response_object("test-resp")
|
||||||
assert retrieved.id == "test-resp"
|
assert retrieved.id == "test-resp"
|
||||||
|
@ -242,6 +257,9 @@ async def test_responses_store_input_items_pagination():
|
||||||
]
|
]
|
||||||
await store.store_response_object(response, input_list)
|
await store.store_response_object(response, input_list)
|
||||||
|
|
||||||
|
# Wait for all queued writes to complete
|
||||||
|
await store.flush()
|
||||||
|
|
||||||
# Verify all items are stored correctly with explicit IDs
|
# Verify all items are stored correctly with explicit IDs
|
||||||
all_items = await store.list_response_input_items("test-resp", order=Order.desc)
|
all_items = await store.list_response_input_items("test-resp", order=Order.desc)
|
||||||
assert len(all_items.data) == 5
|
assert len(all_items.data) == 5
|
||||||
|
@ -319,6 +337,9 @@ async def test_responses_store_input_items_before_pagination():
|
||||||
]
|
]
|
||||||
await store.store_response_object(response, input_list)
|
await store.store_response_object(response, input_list)
|
||||||
|
|
||||||
|
# Wait for all queued writes to complete
|
||||||
|
await store.flush()
|
||||||
|
|
||||||
# Test before pagination with descending order
|
# Test before pagination with descending order
|
||||||
# In desc order: [Fifth, Fourth, Third, Second, First]
|
# In desc order: [Fifth, Fourth, Third, Second, First]
|
||||||
# before="before-3" should return [Fifth, Fourth]
|
# before="before-3" should return [Fifth, Fourth]
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue