refactor: enforce top-level imports for llama-stack-api

Enforce that all imports from llama-stack-api use the form:

from llama_stack_api import <symbol>

 This prevents external code from accessing internal package structure
 (e.g., llama_stack_api.agents, llama_stack_api.common.*) and establishes
 a clear public API boundary.

 Changes:
 - Export 400+ symbols from llama_stack_api/__init__.py
 - Include all API types, common utilities, and strong_typing helpers
 - Update files across src/llama_stack, docs/, tests/, scripts/
 - Convert all submodule imports to top-level imports
 - ensure docs use the proper importing structure

 Addresses PR review feedback requiring explicit __all__ definition to
 prevent "peeking inside" the API package.

Signed-off-by: Charlie Doern <cdoern@redhat.com>
This commit is contained in:
Charlie Doern 2025-11-13 14:03:30 -05:00
parent b7480e9c88
commit 2e5d1c8881
270 changed files with 1587 additions and 750 deletions

View file

@ -30,14 +30,14 @@ jobs:
activate-environment: true
version: 0.7.6
- name: Build Llama Stack Spec package
- name: Build Llama Stack API package
working-directory: src/llama-stack-api
run: uv build
- name: Build Llama Stack package
run: uv build
- name: Install Llama Stack package (with spec from local build)
- name: Install Llama Stack package (with api stubs from local build)
run: |
uv pip install --find-links src/llama-stack-api/dist dist/*.whl

View file

@ -58,7 +58,7 @@ External APIs must expose a `available_providers()` function in their module tha
```python
# llama_stack_api_weather/api.py
from llama_stack_api.providers.datatypes import Api, InlineProviderSpec, ProviderSpec
from llama_stack_api import Api, InlineProviderSpec, ProviderSpec
def available_providers() -> list[ProviderSpec]:
@ -79,7 +79,7 @@ A Protocol class like so:
# llama_stack_api_weather/api.py
from typing import Protocol
from llama_stack_api.schema_utils import webmethod
from llama_stack_api import webmethod
class WeatherAPI(Protocol):
@ -151,13 +151,12 @@ __all__ = ["WeatherAPI", "available_providers"]
# llama-stack-api-weather/src/llama_stack_api_weather/weather.py
from typing import Protocol
from llama_stack_api.providers.datatypes import (
from llama_stack_api import (
Api,
ProviderSpec,
RemoteProviderSpec,
webmethod,
)
from llama_stack_api.schema_utils import webmethod
def available_providers() -> list[ProviderSpec]:
return [

View file

@ -153,7 +153,7 @@ description: |
Example using RAGQueryConfig with different search modes:
```python
from llama_stack_api.rag_tool import RAGQueryConfig, RRFRanker, WeightedRanker
from llama_stack_api import RAGQueryConfig, RRFRanker, WeightedRanker
# Vector search
config = RAGQueryConfig(mode="vector", max_chunks=5)
@ -358,7 +358,7 @@ Two ranker types are supported:
Example using RAGQueryConfig with different search modes:
```python
from llama_stack_api.rag_tool import RAGQueryConfig, RRFRanker, WeightedRanker
from llama_stack_api import RAGQueryConfig, RRFRanker, WeightedRanker
# Vector search
config = RAGQueryConfig(mode="vector", max_chunks=5)

View file

@ -16,7 +16,7 @@ import sys
import fire
import ruamel.yaml as yaml
from llama_stack_api.version import LLAMA_STACK_API_V1 # noqa: E402
from llama_stack_api import LLAMA_STACK_API_V1 # noqa: E402
from llama_stack.core.stack import LlamaStack # noqa: E402
from .pyopenapi.options import Options # noqa: E402

View file

@ -16,27 +16,27 @@ from typing import Annotated, Any, Dict, get_args, get_origin, Set, Union
from fastapi import UploadFile
from llama_stack_api.datatypes import Error
from llama_stack_api.strong_typing.core import JsonType
from llama_stack_api.strong_typing.docstring import Docstring, parse_type
from llama_stack_api.strong_typing.inspection import (
from llama_stack_api import (
Docstring,
Error,
JsonSchemaGenerator,
JsonType,
Schema,
SchemaOptions,
get_schema_identifier,
is_generic_list,
is_type_optional,
is_type_union,
is_unwrapped_body_param,
json_dump_string,
object_to_json,
parse_type,
python_type_to_name,
register_schema,
unwrap_generic_list,
unwrap_optional_type,
unwrap_union_types,
)
from llama_stack_api.strong_typing.name import python_type_to_name
from llama_stack_api.strong_typing.schema import (
get_schema_identifier,
JsonSchemaGenerator,
register_schema,
Schema,
SchemaOptions,
)
from llama_stack_api.strong_typing.serialization import json_dump_string, object_to_json
from pydantic import BaseModel
from .operations import (

View file

@ -11,19 +11,21 @@ import typing
from dataclasses import dataclass
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union
from llama_stack_api.version import LLAMA_STACK_API_V1, LLAMA_STACK_API_V1BETA, LLAMA_STACK_API_V1ALPHA
from termcolor import colored
from llama_stack_api.strong_typing.inspection import get_signature
from typing import get_origin, get_args
from fastapi import UploadFile
from fastapi.params import File, Form
from typing import Annotated
from llama_stack_api.schema_utils import ExtraBodyField
from llama_stack_api import (
ExtraBodyField,
LLAMA_STACK_API_V1,
LLAMA_STACK_API_V1ALPHA,
LLAMA_STACK_API_V1BETA,
get_signature,
)
def split_prefix(

View file

@ -9,7 +9,7 @@ import enum
from dataclasses import dataclass
from typing import Any, ClassVar, Dict, List, Optional, Union
from llama_stack_api.strong_typing.schema import JsonType, Schema, StrictJsonType
from llama_stack_api import JsonType, Schema, StrictJsonType
URL = str

View file

@ -11,8 +11,7 @@ from pathlib import Path
from typing import Any, List, Optional, TextIO, Union, get_type_hints, get_origin, get_args
from pydantic import BaseModel
from llama_stack_api.strong_typing.schema import object_to_json, StrictJsonType
from llama_stack_api.strong_typing.inspection import is_unwrapped_body_param
from llama_stack_api import StrictJsonType, is_unwrapped_body_param, object_to_json
from llama_stack.core.resolver import api_protocol_map
from .generator import Generator

View file

@ -14,7 +14,7 @@ import os
from pathlib import Path
import fire
from llama_stack_api.common.errors import ModelNotFoundError
from llama_stack_api import ModelNotFoundError
from llama_stack.models.llama.llama3.generation import Llama3
from llama_stack.models.llama.llama4.generation import Llama4

View file

@ -12,16 +12,860 @@ for Llama Stack. It is designed to be a lightweight dependency for external prov
and clients that need to interact with Llama Stack APIs without requiring the full
server implementation.
Key components:
- API modules (agents, inference, safety, etc.): Protocol definitions for all Llama Stack APIs
- datatypes: Core data types and provider specifications
- common: Common data types used across APIs
- strong_typing: Type system utilities
- schema_utils: Schema validation and utilities
All imports from this package MUST use the form:
from llama_stack_api import <symbol>
Sub-module imports (e.g., from llama_stack_api.agents import Agents) are NOT supported
and considered a code smell. All exported symbols are explicitly listed in __all__.
"""
__version__ = "0.1.0"
__version__ = "0.4.0"
from . import common, datatypes, schema_utils, strong_typing # noqa: F401
# Import submodules for those who need them
from . import common, strong_typing # noqa: F401
__all__ = ["common", "datatypes", "schema_utils", "strong_typing"]
# Import all public API symbols
from .agents import Agents, ResponseGuardrail, ResponseGuardrailSpec
from .batches import Batches, BatchObject, ListBatchesResponse
from .benchmarks import (
Benchmark,
BenchmarkInput,
Benchmarks,
CommonBenchmarkFields,
ListBenchmarksResponse,
)
# Import commonly used types from common submodule
from .common.content_types import (
URL,
ImageContentItem,
InterleavedContent,
InterleavedContentItem,
TextContentItem,
_URLOrData,
)
from .common.errors import (
ConflictError,
DatasetNotFoundError,
InvalidConversationIdError,
ModelNotFoundError,
ModelTypeError,
ResourceNotFoundError,
TokenValidationError,
ToolGroupNotFoundError,
UnsupportedModelError,
VectorStoreNotFoundError,
)
from .common.job_types import Job, JobStatus
from .common.responses import Order, PaginatedResponse
from .common.training_types import Checkpoint, PostTrainingMetric
from .common.type_system import (
ChatCompletionInputType,
CompletionInputType,
NumberType,
ParamType,
StringType,
)
from .conversations import (
Conversation,
ConversationDeletedResource,
ConversationItem,
ConversationItemCreateRequest,
ConversationItemDeletedResource,
ConversationItemInclude,
ConversationItemList,
ConversationMessage,
Conversations,
Metadata,
)
from .datasetio import DatasetIO, DatasetStore
from .datasets import (
CommonDatasetFields,
Dataset,
DatasetInput,
DatasetPurpose,
Datasets,
DatasetType,
DataSource,
ListDatasetsResponse,
RowsDataSource,
URIDataSource,
)
from .datatypes import (
Api,
BenchmarksProtocolPrivate,
DatasetsProtocolPrivate,
DynamicApiMeta,
Error,
ExternalApiSpec,
HealthResponse,
HealthStatus,
InlineProviderSpec,
ModelsProtocolPrivate,
ProviderSpec,
RemoteProviderConfig,
RemoteProviderSpec,
RoutingTable,
ScoringFunctionsProtocolPrivate,
ShieldsProtocolPrivate,
ToolGroupsProtocolPrivate,
VectorStoresProtocolPrivate,
)
from .eval import BenchmarkConfig, Eval, EvalCandidate, EvaluateResponse, ModelCandidate
from .files import (
ExpiresAfter,
Files,
ListOpenAIFileResponse,
OpenAIFileDeleteResponse,
OpenAIFileObject,
OpenAIFilePurpose,
)
from .inference import (
Bf16QuantizationConfig,
ChatCompletionResponseEventType,
CompletionRequest,
EmbeddingsResponse,
EmbeddingTaskType,
Fp8QuantizationConfig,
GrammarResponseFormat,
GreedySamplingStrategy,
Inference,
InferenceProvider,
Int4QuantizationConfig,
JsonSchemaResponseFormat,
ListOpenAIChatCompletionResponse,
LogProbConfig,
ModelStore,
OpenAIAssistantMessageParam,
OpenAIChatCompletion,
OpenAIChatCompletionChunk,
OpenAIChatCompletionContentPartImageParam,
OpenAIChatCompletionContentPartParam,
OpenAIChatCompletionContentPartTextParam,
OpenAIChatCompletionMessageContent,
OpenAIChatCompletionRequestWithExtraBody,
OpenAIChatCompletionTextOnlyMessageContent,
OpenAIChatCompletionToolCall,
OpenAIChatCompletionToolCallFunction,
OpenAIChatCompletionUsage,
OpenAIChatCompletionUsageCompletionTokensDetails,
OpenAIChatCompletionUsagePromptTokensDetails,
OpenAIChoice,
OpenAIChoiceDelta,
OpenAIChoiceLogprobs,
OpenAIChunkChoice,
OpenAICompletion,
OpenAICompletionChoice,
OpenAICompletionLogprobs,
OpenAICompletionRequestWithExtraBody,
OpenAICompletionWithInputMessages,
OpenAIDeveloperMessageParam,
OpenAIEmbeddingData,
OpenAIEmbeddingsRequestWithExtraBody,
OpenAIEmbeddingsResponse,
OpenAIEmbeddingUsage,
OpenAIFile,
OpenAIFileFile,
OpenAIImageURL,
OpenAIJSONSchema,
OpenAIMessageParam,
OpenAIResponseFormatJSONObject,
OpenAIResponseFormatJSONSchema,
OpenAIResponseFormatParam,
OpenAIResponseFormatText,
OpenAISystemMessageParam,
OpenAITokenLogProb,
OpenAIToolMessageParam,
OpenAITopLogProb,
OpenAIUserMessageParam,
QuantizationConfig,
QuantizationType,
RerankData,
RerankResponse,
ResponseFormat,
ResponseFormatType,
SamplingParams,
SamplingStrategy,
SystemMessage,
SystemMessageBehavior,
TextTruncation,
TokenLogProbs,
ToolChoice,
ToolResponseMessage,
TopKSamplingStrategy,
TopPSamplingStrategy,
UserMessage,
)
from .inspect import (
ApiFilter,
HealthInfo,
Inspect,
ListRoutesResponse,
RouteInfo,
VersionInfo,
)
from .models import (
CommonModelFields,
ListModelsResponse,
Model,
ModelInput,
Models,
ModelType,
OpenAIListModelsResponse,
OpenAIModel,
)
from .openai_responses import (
AllowedToolsFilter,
ApprovalFilter,
ListOpenAIResponseInputItem,
ListOpenAIResponseObject,
MCPListToolsTool,
OpenAIDeleteResponseObject,
OpenAIResponseAnnotationCitation,
OpenAIResponseAnnotationContainerFileCitation,
OpenAIResponseAnnotationFileCitation,
OpenAIResponseAnnotationFilePath,
OpenAIResponseAnnotations,
OpenAIResponseContentPart,
OpenAIResponseContentPartOutputText,
OpenAIResponseContentPartReasoningSummary,
OpenAIResponseContentPartReasoningText,
OpenAIResponseContentPartRefusal,
OpenAIResponseError,
OpenAIResponseInput,
OpenAIResponseInputFunctionToolCallOutput,
OpenAIResponseInputMessageContent,
OpenAIResponseInputMessageContentFile,
OpenAIResponseInputMessageContentImage,
OpenAIResponseInputMessageContentText,
OpenAIResponseInputTool,
OpenAIResponseInputToolFileSearch,
OpenAIResponseInputToolFunction,
OpenAIResponseInputToolMCP,
OpenAIResponseInputToolWebSearch,
OpenAIResponseMCPApprovalRequest,
OpenAIResponseMCPApprovalResponse,
OpenAIResponseMessage,
OpenAIResponseObject,
OpenAIResponseObjectStream,
OpenAIResponseObjectStreamResponseCompleted,
OpenAIResponseObjectStreamResponseContentPartAdded,
OpenAIResponseObjectStreamResponseContentPartDone,
OpenAIResponseObjectStreamResponseCreated,
OpenAIResponseObjectStreamResponseFailed,
OpenAIResponseObjectStreamResponseFileSearchCallCompleted,
OpenAIResponseObjectStreamResponseFileSearchCallInProgress,
OpenAIResponseObjectStreamResponseFileSearchCallSearching,
OpenAIResponseObjectStreamResponseFunctionCallArgumentsDelta,
OpenAIResponseObjectStreamResponseFunctionCallArgumentsDone,
OpenAIResponseObjectStreamResponseIncomplete,
OpenAIResponseObjectStreamResponseInProgress,
OpenAIResponseObjectStreamResponseMcpCallArgumentsDelta,
OpenAIResponseObjectStreamResponseMcpCallArgumentsDone,
OpenAIResponseObjectStreamResponseMcpCallCompleted,
OpenAIResponseObjectStreamResponseMcpCallFailed,
OpenAIResponseObjectStreamResponseMcpCallInProgress,
OpenAIResponseObjectStreamResponseMcpListToolsCompleted,
OpenAIResponseObjectStreamResponseMcpListToolsFailed,
OpenAIResponseObjectStreamResponseMcpListToolsInProgress,
OpenAIResponseObjectStreamResponseOutputItemAdded,
OpenAIResponseObjectStreamResponseOutputItemDone,
OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded,
OpenAIResponseObjectStreamResponseOutputTextDelta,
OpenAIResponseObjectStreamResponseOutputTextDone,
OpenAIResponseObjectStreamResponseReasoningSummaryPartAdded,
OpenAIResponseObjectStreamResponseReasoningSummaryPartDone,
OpenAIResponseObjectStreamResponseReasoningSummaryTextDelta,
OpenAIResponseObjectStreamResponseReasoningSummaryTextDone,
OpenAIResponseObjectStreamResponseReasoningTextDelta,
OpenAIResponseObjectStreamResponseReasoningTextDone,
OpenAIResponseObjectStreamResponseRefusalDelta,
OpenAIResponseObjectStreamResponseRefusalDone,
OpenAIResponseObjectStreamResponseWebSearchCallCompleted,
OpenAIResponseObjectStreamResponseWebSearchCallInProgress,
OpenAIResponseObjectStreamResponseWebSearchCallSearching,
OpenAIResponseObjectWithInput,
OpenAIResponseOutput,
OpenAIResponseOutputMessageContent,
OpenAIResponseOutputMessageContentOutputText,
OpenAIResponseOutputMessageFileSearchToolCall,
OpenAIResponseOutputMessageFileSearchToolCallResults,
OpenAIResponseOutputMessageFunctionToolCall,
OpenAIResponseOutputMessageMCPCall,
OpenAIResponseOutputMessageMCPListTools,
OpenAIResponseOutputMessageWebSearchToolCall,
OpenAIResponsePrompt,
OpenAIResponseText,
OpenAIResponseTextFormat,
OpenAIResponseTool,
OpenAIResponseToolMCP,
OpenAIResponseUsage,
OpenAIResponseUsageInputTokensDetails,
OpenAIResponseUsageOutputTokensDetails,
WebSearchToolTypes,
)
from .post_training import (
AlgorithmConfig,
DataConfig,
DatasetFormat,
DPOAlignmentConfig,
DPOLossType,
EfficiencyConfig,
ListPostTrainingJobsResponse,
LoraFinetuningConfig,
OptimizerConfig,
OptimizerType,
PostTraining,
PostTrainingJob,
PostTrainingJobArtifactsResponse,
PostTrainingJobLogStream,
PostTrainingJobStatusResponse,
PostTrainingRLHFRequest,
QATFinetuningConfig,
RLHFAlgorithm,
TrainingConfig,
)
from .prompts import ListPromptsResponse, Prompt, Prompts
from .providers import ListProvidersResponse, ProviderInfo, Providers
from .rag_tool import (
DefaultRAGQueryGeneratorConfig,
LLMRAGQueryGeneratorConfig,
RAGDocument,
RAGQueryConfig,
RAGQueryGenerator,
RAGQueryGeneratorConfig,
RAGQueryResult,
RAGSearchMode,
Ranker,
RRFRanker,
WeightedRanker,
)
from .resource import Resource, ResourceType
from .safety import (
ModerationObject,
ModerationObjectResults,
RunShieldResponse,
Safety,
SafetyViolation,
ShieldStore,
ViolationLevel,
)
from .schema_utils import (
CallableT,
ExtraBodyField,
WebMethod,
json_schema_type,
register_schema,
webmethod,
)
from .scoring import (
ScoreBatchResponse,
ScoreResponse,
Scoring,
ScoringFunctionStore,
ScoringResult,
ScoringResultRow,
)
from .scoring_functions import (
AggregationFunctionType,
BasicScoringFnParams,
CommonScoringFnFields,
ListScoringFunctionsResponse,
LLMAsJudgeScoringFnParams,
RegexParserScoringFnParams,
ScoringFn,
ScoringFnInput,
ScoringFnParams,
ScoringFnParamsType,
ScoringFunctions,
)
from .shields import (
CommonShieldFields,
ListShieldsResponse,
Shield,
ShieldInput,
Shields,
)
# Import from strong_typing
from .strong_typing.core import JsonType
from .strong_typing.docstring import Docstring, parse_type
from .strong_typing.inspection import (
get_signature,
is_generic_list,
is_type_optional,
is_type_union,
is_unwrapped_body_param,
unwrap_generic_list,
unwrap_optional_type,
unwrap_union_types,
)
from .strong_typing.name import python_type_to_name
from .strong_typing.schema import (
JsonSchemaGenerator,
Schema,
SchemaOptions,
StrictJsonType,
get_schema_identifier,
)
from .strong_typing.serialization import json_dump_string, object_to_json
from .tools import (
ListToolDefsResponse,
ListToolGroupsResponse,
SpecialToolGroup,
ToolDef,
ToolGroup,
ToolGroupInput,
ToolGroups,
ToolInvocationResult,
ToolRuntime,
ToolStore,
)
from .vector_io import (
Chunk,
ChunkMetadata,
OpenAICreateVectorStoreFileBatchRequestWithExtraBody,
OpenAICreateVectorStoreRequestWithExtraBody,
QueryChunksResponse,
SearchRankingOptions,
VectorIO,
VectorStoreChunkingStrategy,
VectorStoreChunkingStrategyAuto,
VectorStoreChunkingStrategyStatic,
VectorStoreChunkingStrategyStaticConfig,
VectorStoreContent,
VectorStoreCreateRequest,
VectorStoreDeleteResponse,
VectorStoreFileBatchObject,
VectorStoreFileContentResponse,
VectorStoreFileCounts,
VectorStoreFileDeleteResponse,
VectorStoreFileLastError,
VectorStoreFileObject,
VectorStoreFilesListInBatchResponse,
VectorStoreFileStatus,
VectorStoreListFilesResponse,
VectorStoreListResponse,
VectorStoreModifyRequest,
VectorStoreObject,
VectorStoreSearchRequest,
VectorStoreSearchResponse,
VectorStoreSearchResponsePage,
VectorStoreTable,
)
from .vector_stores import VectorStore, VectorStoreInput
from .version import (
LLAMA_STACK_API_V1,
LLAMA_STACK_API_V1ALPHA,
LLAMA_STACK_API_V1BETA,
)
__all__ = [
# Submodules
"common",
"strong_typing",
# Version constants
"LLAMA_STACK_API_V1",
"LLAMA_STACK_API_V1ALPHA",
"LLAMA_STACK_API_V1BETA",
# API Symbols
"Agents",
"AggregationFunctionType",
"AlgorithmConfig",
"AllowedToolsFilter",
"Api",
"ApiFilter",
"ApprovalFilter",
"BasicScoringFnParams",
"Batches",
"BatchObject",
"Benchmark",
"BenchmarkConfig",
"BenchmarkInput",
"Benchmarks",
"BenchmarksProtocolPrivate",
"Bf16QuantizationConfig",
"CallableT",
"ChatCompletionInputType",
"ChatCompletionResponseEventType",
"Checkpoint",
"Chunk",
"ChunkMetadata",
"CommonBenchmarkFields",
"ConflictError",
"CommonDatasetFields",
"CommonModelFields",
"CommonScoringFnFields",
"CommonShieldFields",
"CompletionInputType",
"CompletionRequest",
"Conversation",
"ConversationDeletedResource",
"ConversationItem",
"ConversationItemCreateRequest",
"ConversationItemDeletedResource",
"ConversationItemInclude",
"ConversationItemList",
"ConversationMessage",
"Conversations",
"DPOAlignmentConfig",
"DPOLossType",
"DataConfig",
"DataSource",
"Dataset",
"DatasetFormat",
"DatasetIO",
"DatasetInput",
"DatasetPurpose",
"DatasetNotFoundError",
"DatasetStore",
"DatasetType",
"Datasets",
"DatasetsProtocolPrivate",
"DefaultRAGQueryGeneratorConfig",
"Docstring",
"DynamicApiMeta",
"EfficiencyConfig",
"EmbeddingTaskType",
"EmbeddingsResponse",
"Error",
"Eval",
"EvalCandidate",
"EvaluateResponse",
"ExpiresAfter",
"ExternalApiSpec",
"ExtraBodyField",
"Files",
"Fp8QuantizationConfig",
"get_schema_identifier",
"get_signature",
"GrammarResponseFormat",
"GreedySamplingStrategy",
"HealthInfo",
"HealthResponse",
"HealthStatus",
"ImageContentItem",
"Inference",
"InferenceProvider",
"InlineProviderSpec",
"Inspect",
"Int4QuantizationConfig",
"InterleavedContent",
"InterleavedContentItem",
"InvalidConversationIdError",
"is_generic_list",
"is_type_optional",
"is_type_union",
"is_unwrapped_body_param",
"Job",
"JobStatus",
"json_dump_string",
"json_schema_type",
"JsonSchemaGenerator",
"JsonSchemaResponseFormat",
"JsonType",
"LLMAsJudgeScoringFnParams",
"LLMRAGQueryGeneratorConfig",
"ListBatchesResponse",
"ListBenchmarksResponse",
"ListDatasetsResponse",
"ListModelsResponse",
"ListOpenAIChatCompletionResponse",
"ListOpenAIFileResponse",
"ListOpenAIResponseInputItem",
"ListOpenAIResponseObject",
"ListPostTrainingJobsResponse",
"ListPromptsResponse",
"ListProvidersResponse",
"ListRoutesResponse",
"ListScoringFunctionsResponse",
"ListShieldsResponse",
"ListToolDefsResponse",
"ListToolGroupsResponse",
"LogProbConfig",
"LoraFinetuningConfig",
"MCPListToolsTool",
"Metadata",
"Model",
"ModelCandidate",
"ModelInput",
"ModelNotFoundError",
"ModelStore",
"ModelType",
"ModelTypeError",
"Models",
"ModelsProtocolPrivate",
"ModerationObject",
"ModerationObjectResults",
"NumberType",
"object_to_json",
"OpenAIAssistantMessageParam",
"OpenAIChatCompletion",
"OpenAIChatCompletionChunk",
"OpenAIChatCompletionContentPartImageParam",
"OpenAIChatCompletionContentPartParam",
"OpenAIChatCompletionContentPartTextParam",
"OpenAIChatCompletionMessageContent",
"OpenAIChatCompletionRequestWithExtraBody",
"OpenAIChatCompletionTextOnlyMessageContent",
"OpenAIChatCompletionToolCall",
"OpenAIChatCompletionToolCallFunction",
"OpenAIChatCompletionUsage",
"OpenAIChatCompletionUsageCompletionTokensDetails",
"OpenAIChatCompletionUsagePromptTokensDetails",
"OpenAIChoice",
"OpenAIChoiceDelta",
"OpenAIChoiceLogprobs",
"OpenAIChunkChoice",
"OpenAICompletion",
"OpenAICompletionChoice",
"OpenAICompletionLogprobs",
"OpenAICompletionRequestWithExtraBody",
"OpenAICompletionWithInputMessages",
"OpenAICreateVectorStoreFileBatchRequestWithExtraBody",
"OpenAICreateVectorStoreRequestWithExtraBody",
"OpenAIDeleteResponseObject",
"OpenAIDeveloperMessageParam",
"OpenAIEmbeddingData",
"OpenAIEmbeddingUsage",
"OpenAIEmbeddingsRequestWithExtraBody",
"OpenAIEmbeddingsResponse",
"OpenAIFile",
"OpenAIFileDeleteResponse",
"OpenAIFileFile",
"OpenAIFileObject",
"OpenAIFilePurpose",
"OpenAIImageURL",
"OpenAIJSONSchema",
"OpenAIListModelsResponse",
"OpenAIMessageParam",
"OpenAIModel",
"Order",
"OpenAIResponseAnnotationCitation",
"OpenAIResponseAnnotationContainerFileCitation",
"OpenAIResponseAnnotationFileCitation",
"OpenAIResponseAnnotationFilePath",
"OpenAIResponseAnnotations",
"OpenAIResponseContentPart",
"OpenAIResponseContentPartOutputText",
"OpenAIResponseContentPartReasoningSummary",
"OpenAIResponseContentPartReasoningText",
"OpenAIResponseContentPartRefusal",
"OpenAIResponseError",
"OpenAIResponseFormatJSONObject",
"OpenAIResponseFormatJSONSchema",
"OpenAIResponseFormatParam",
"OpenAIResponseFormatText",
"OpenAIResponseInput",
"OpenAIResponseInputFunctionToolCallOutput",
"OpenAIResponseInputMessageContent",
"OpenAIResponseInputMessageContentFile",
"OpenAIResponseInputMessageContentImage",
"OpenAIResponseInputMessageContentText",
"OpenAIResponseInputTool",
"OpenAIResponseInputToolFileSearch",
"OpenAIResponseInputToolFunction",
"OpenAIResponseInputToolMCP",
"OpenAIResponseInputToolWebSearch",
"OpenAIResponseMCPApprovalRequest",
"OpenAIResponseMCPApprovalResponse",
"OpenAIResponseMessage",
"OpenAIResponseObject",
"OpenAIResponseObjectStream",
"OpenAIResponseObjectStreamResponseCompleted",
"OpenAIResponseObjectStreamResponseContentPartAdded",
"OpenAIResponseObjectStreamResponseContentPartDone",
"OpenAIResponseObjectStreamResponseCreated",
"OpenAIResponseObjectStreamResponseFailed",
"OpenAIResponseObjectStreamResponseFileSearchCallCompleted",
"OpenAIResponseObjectStreamResponseFileSearchCallInProgress",
"OpenAIResponseObjectStreamResponseFileSearchCallSearching",
"OpenAIResponseObjectStreamResponseFunctionCallArgumentsDelta",
"OpenAIResponseObjectStreamResponseFunctionCallArgumentsDone",
"OpenAIResponseObjectStreamResponseInProgress",
"OpenAIResponseObjectStreamResponseIncomplete",
"OpenAIResponseObjectStreamResponseMcpCallArgumentsDelta",
"OpenAIResponseObjectStreamResponseMcpCallArgumentsDone",
"OpenAIResponseObjectStreamResponseMcpCallCompleted",
"OpenAIResponseObjectStreamResponseMcpCallFailed",
"OpenAIResponseObjectStreamResponseMcpCallInProgress",
"OpenAIResponseObjectStreamResponseMcpListToolsCompleted",
"OpenAIResponseObjectStreamResponseMcpListToolsFailed",
"OpenAIResponseObjectStreamResponseMcpListToolsInProgress",
"OpenAIResponseObjectStreamResponseOutputItemAdded",
"OpenAIResponseObjectStreamResponseOutputItemDone",
"OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded",
"OpenAIResponseObjectStreamResponseOutputTextDelta",
"OpenAIResponseObjectStreamResponseOutputTextDone",
"OpenAIResponseObjectStreamResponseReasoningSummaryPartAdded",
"OpenAIResponseObjectStreamResponseReasoningSummaryPartDone",
"OpenAIResponseObjectStreamResponseReasoningSummaryTextDelta",
"OpenAIResponseObjectStreamResponseReasoningSummaryTextDone",
"OpenAIResponseObjectStreamResponseReasoningTextDelta",
"OpenAIResponseObjectStreamResponseReasoningTextDone",
"OpenAIResponseObjectStreamResponseRefusalDelta",
"OpenAIResponseObjectStreamResponseRefusalDone",
"OpenAIResponseObjectStreamResponseWebSearchCallCompleted",
"OpenAIResponseObjectStreamResponseWebSearchCallInProgress",
"OpenAIResponseObjectStreamResponseWebSearchCallSearching",
"OpenAIResponseObjectWithInput",
"OpenAIResponseOutput",
"OpenAIResponseOutputMessageContent",
"OpenAIResponseOutputMessageContentOutputText",
"OpenAIResponseOutputMessageFileSearchToolCall",
"OpenAIResponseOutputMessageFileSearchToolCallResults",
"OpenAIResponseOutputMessageFunctionToolCall",
"OpenAIResponseOutputMessageMCPCall",
"OpenAIResponseOutputMessageMCPListTools",
"OpenAIResponseOutputMessageWebSearchToolCall",
"OpenAIResponsePrompt",
"OpenAIResponseText",
"OpenAIResponseTextFormat",
"OpenAIResponseTool",
"OpenAIResponseToolMCP",
"OpenAIResponseUsage",
"OpenAIResponseUsageInputTokensDetails",
"OpenAIResponseUsageOutputTokensDetails",
"OpenAISystemMessageParam",
"OpenAITokenLogProb",
"OpenAIToolMessageParam",
"OpenAITopLogProb",
"OpenAIUserMessageParam",
"OptimizerConfig",
"OptimizerType",
"PaginatedResponse",
"ParamType",
"parse_type",
"PostTraining",
"PostTrainingMetric",
"PostTrainingJob",
"PostTrainingJobArtifactsResponse",
"PostTrainingJobLogStream",
"PostTrainingJobStatusResponse",
"PostTrainingRLHFRequest",
"Prompt",
"Prompts",
"ProviderInfo",
"ProviderSpec",
"Providers",
"python_type_to_name",
"QATFinetuningConfig",
"QuantizationConfig",
"QuantizationType",
"QueryChunksResponse",
"RAGDocument",
"RAGQueryConfig",
"RAGQueryGenerator",
"RAGQueryGeneratorConfig",
"RAGQueryResult",
"RAGSearchMode",
"register_schema",
"RLHFAlgorithm",
"RRFRanker",
"Ranker",
"RegexParserScoringFnParams",
"RemoteProviderConfig",
"RemoteProviderSpec",
"RerankData",
"RerankResponse",
"Resource",
"ResourceNotFoundError",
"ResourceType",
"ResponseFormat",
"ResponseFormatType",
"ResponseGuardrail",
"ResponseGuardrailSpec",
"RouteInfo",
"RoutingTable",
"RowsDataSource",
"RunShieldResponse",
"Safety",
"SafetyViolation",
"SamplingParams",
"SamplingStrategy",
"ScoreBatchResponse",
"ScoreResponse",
"Scoring",
"ScoringFn",
"ScoringFnInput",
"ScoringFnParams",
"ScoringFnParamsType",
"ScoringFunctionStore",
"ScoringFunctions",
"ScoringFunctionsProtocolPrivate",
"ScoringResult",
"ScoringResultRow",
"Schema",
"SchemaOptions",
"SearchRankingOptions",
"Shield",
"ShieldInput",
"ShieldStore",
"Shields",
"ShieldsProtocolPrivate",
"SpecialToolGroup",
"StrictJsonType",
"StringType",
"SystemMessage",
"SystemMessageBehavior",
"TextContentItem",
"TextTruncation",
"TokenLogProbs",
"TokenValidationError",
"ToolChoice",
"ToolGroupNotFoundError",
"ToolDef",
"ToolGroup",
"ToolGroupInput",
"ToolGroups",
"ToolGroupsProtocolPrivate",
"ToolInvocationResult",
"ToolResponseMessage",
"ToolRuntime",
"ToolStore",
"TopKSamplingStrategy",
"TopPSamplingStrategy",
"TrainingConfig",
"UnsupportedModelError",
"unwrap_generic_list",
"unwrap_optional_type",
"unwrap_union_types",
"URIDataSource",
"URL",
"_URLOrData",
"UserMessage",
"VectorIO",
"VectorStore",
"VectorStoreChunkingStrategy",
"VectorStoreChunkingStrategyAuto",
"VectorStoreChunkingStrategyStatic",
"VectorStoreChunkingStrategyStaticConfig",
"VectorStoreContent",
"VectorStoreCreateRequest",
"VectorStoreDeleteResponse",
"VectorStoreFileBatchObject",
"VectorStoreFileContentResponse",
"VectorStoreFileCounts",
"VectorStoreFileDeleteResponse",
"VectorStoreFileLastError",
"VectorStoreFileObject",
"VectorStoreFileStatus",
"VectorStoreFilesListInBatchResponse",
"VectorStoreInput",
"VectorStoreListFilesResponse",
"VectorStoreListResponse",
"VectorStoreModifyRequest",
"VectorStoreObject",
"VectorStoreSearchRequest",
"VectorStoreSearchResponse",
"VectorStoreSearchResponsePage",
"VectorStoreTable",
"VectorStoreNotFoundError",
"VectorStoresProtocolPrivate",
"VersionInfo",
"ViolationLevel",
"webmethod",
"WebMethod",
"WebSearchToolTypes",
"WeightedRanker",
]

View file

@ -9,7 +9,7 @@ import sys
from pathlib import Path
import yaml
from llama_stack_api.datatypes import Api
from llama_stack_api import Api
from termcolor import cprint
from llama_stack.cli.stack.utils import ImageType

View file

@ -11,7 +11,7 @@ from functools import lru_cache
from pathlib import Path
import yaml
from llama_stack_api.datatypes import Api
from llama_stack_api import Api
from termcolor import cprint
from llama_stack.core.datatypes import (

View file

@ -6,7 +6,7 @@
import sys
from llama_stack_api.datatypes import Api
from llama_stack_api import Api
from pydantic import BaseModel
from termcolor import cprint

View file

@ -12,7 +12,7 @@ from enum import Enum
from typing import Any, Union, get_args, get_origin
import httpx
from llama_stack_api.datatypes import RemoteProviderConfig
from llama_stack_api import RemoteProviderConfig
from pydantic import BaseModel, parse_obj_as
from termcolor import cprint

View file

@ -6,7 +6,7 @@
import textwrap
from typing import Any
from llama_stack_api.datatypes import Api, ProviderSpec
from llama_stack_api import Api, ProviderSpec
from llama_stack.core.datatypes import (
LLAMA_STACK_RUN_CONFIG_VERSION,

View file

@ -8,7 +8,7 @@ import secrets
import time
from typing import Any, Literal
from llama_stack_api.conversations import (
from llama_stack_api import (
Conversation,
ConversationDeletedResource,
ConversationItem,

View file

@ -9,21 +9,32 @@ from pathlib import Path
from typing import Annotated, Any, Literal, Self
from urllib.parse import urlparse
from llama_stack_api.benchmarks import Benchmark, BenchmarkInput
from llama_stack_api.datasetio import DatasetIO
from llama_stack_api.datasets import Dataset, DatasetInput
from llama_stack_api.datatypes import Api, ProviderSpec
from llama_stack_api.eval import Eval
from llama_stack_api.inference import Inference
from llama_stack_api.models import Model, ModelInput
from llama_stack_api.resource import Resource
from llama_stack_api.safety import Safety
from llama_stack_api.scoring import Scoring
from llama_stack_api.scoring_functions import ScoringFn, ScoringFnInput
from llama_stack_api.shields import Shield, ShieldInput
from llama_stack_api.tools import ToolGroup, ToolGroupInput, ToolRuntime
from llama_stack_api.vector_io import VectorIO
from llama_stack_api.vector_stores import VectorStore, VectorStoreInput
from llama_stack_api import (
Api,
Benchmark,
BenchmarkInput,
Dataset,
DatasetInput,
DatasetIO,
Eval,
Inference,
Model,
ModelInput,
ProviderSpec,
Resource,
Safety,
Scoring,
ScoringFn,
ScoringFnInput,
Shield,
ShieldInput,
ToolGroup,
ToolGroupInput,
ToolRuntime,
VectorIO,
VectorStore,
VectorStoreInput,
)
from pydantic import BaseModel, Field, field_validator, model_validator
from llama_stack.core.access_control.datatypes import AccessRule

View file

@ -10,7 +10,7 @@ import os
from typing import Any
import yaml
from llama_stack_api.datatypes import (
from llama_stack_api import (
Api,
InlineProviderSpec,
ProviderSpec,

View file

@ -6,7 +6,7 @@
import yaml
from llama_stack_api.datatypes import Api, ExternalApiSpec
from llama_stack_api import Api, ExternalApiSpec
from llama_stack.core.datatypes import BuildConfig, StackRunConfig
from llama_stack.log import get_logger

View file

@ -6,9 +6,9 @@
from importlib.metadata import version
from llama_stack_api.datatypes import HealthStatus
from llama_stack_api.inspect import (
from llama_stack_api import (
HealthInfo,
HealthStatus,
Inspect,
ListRoutesResponse,
RouteInfo,

View file

@ -18,7 +18,7 @@ from typing import Any, TypeVar, Union, get_args, get_origin
import httpx
import yaml
from fastapi import Response as FastAPIResponse
from llama_stack_api.strong_typing.inspection import is_unwrapped_body_param
from llama_stack_api import is_unwrapped_body_param
try:
from llama_stack_client import (

View file

@ -7,7 +7,7 @@
import json
from typing import Any
from llama_stack_api.prompts import ListPromptsResponse, Prompt, Prompts
from llama_stack_api import ListPromptsResponse, Prompt, Prompts
from pydantic import BaseModel
from llama_stack.core.datatypes import StackRunConfig

View file

@ -7,8 +7,7 @@
import asyncio
from typing import Any
from llama_stack_api.datatypes import HealthResponse, HealthStatus
from llama_stack_api.providers import ListProvidersResponse, ProviderInfo, Providers
from llama_stack_api import HealthResponse, HealthStatus, ListProvidersResponse, ProviderInfo, Providers
from pydantic import BaseModel
from llama_stack.log import get_logger

View file

@ -8,41 +8,45 @@ import importlib.metadata
import inspect
from typing import Any
from llama_stack_api.agents import Agents
from llama_stack_api.batches import Batches
from llama_stack_api.benchmarks import Benchmarks
from llama_stack_api.conversations import Conversations
from llama_stack_api.datasetio import DatasetIO
from llama_stack_api.datasets import Datasets
from llama_stack_api.datatypes import (
from llama_stack_api import (
LLAMA_STACK_API_V1ALPHA,
Agents,
Api,
Batches,
Benchmarks,
BenchmarksProtocolPrivate,
Conversations,
DatasetIO,
Datasets,
DatasetsProtocolPrivate,
Eval,
ExternalApiSpec,
Files,
Inference,
InferenceProvider,
Inspect,
Models,
ModelsProtocolPrivate,
PostTraining,
Prompts,
ProviderSpec,
RemoteProviderConfig,
RemoteProviderSpec,
Safety,
Scoring,
ScoringFunctions,
ScoringFunctionsProtocolPrivate,
Shields,
ShieldsProtocolPrivate,
ToolGroups,
ToolGroupsProtocolPrivate,
ToolRuntime,
VectorIO,
VectorStore,
)
from llama_stack_api import (
Providers as ProvidersAPI,
)
from llama_stack_api.eval import Eval
from llama_stack_api.files import Files
from llama_stack_api.inference import Inference, InferenceProvider
from llama_stack_api.inspect import Inspect
from llama_stack_api.models import Models
from llama_stack_api.post_training import PostTraining
from llama_stack_api.prompts import Prompts
from llama_stack_api.providers import Providers as ProvidersAPI
from llama_stack_api.safety import Safety
from llama_stack_api.scoring import Scoring
from llama_stack_api.scoring_functions import ScoringFunctions
from llama_stack_api.shields import Shields
from llama_stack_api.tools import ToolGroups, ToolRuntime
from llama_stack_api.vector_io import VectorIO
from llama_stack_api.vector_stores import VectorStore
from llama_stack_api.version import LLAMA_STACK_API_V1ALPHA
from llama_stack.core.client import get_client_impl
from llama_stack.core.datatypes import (

View file

@ -6,7 +6,7 @@
from typing import Any
from llama_stack_api.datatypes import Api, RoutingTable
from llama_stack_api import Api, RoutingTable
from llama_stack.core.datatypes import (
AccessRule,

View file

@ -6,10 +6,7 @@
from typing import Any
from llama_stack_api.common.responses import PaginatedResponse
from llama_stack_api.datasetio import DatasetIO
from llama_stack_api.datasets import DatasetPurpose, DataSource
from llama_stack_api.datatypes import RoutingTable
from llama_stack_api import DatasetIO, DatasetPurpose, DataSource, PaginatedResponse, RoutingTable
from llama_stack.log import get_logger

View file

@ -6,9 +6,12 @@
from typing import Any
from llama_stack_api.datatypes import RoutingTable
from llama_stack_api.eval import BenchmarkConfig, Eval, EvaluateResponse, Job
from llama_stack_api.scoring import (
from llama_stack_api import (
BenchmarkConfig,
Eval,
EvaluateResponse,
Job,
RoutingTable,
ScoreBatchResponse,
ScoreResponse,
Scoring,

View file

@ -11,11 +11,14 @@ from datetime import UTC, datetime
from typing import Annotated, Any
from fastapi import Body
from llama_stack_api.common.errors import ModelNotFoundError, ModelTypeError
from llama_stack_api.datatypes import HealthResponse, HealthStatus, RoutingTable
from llama_stack_api.inference import (
from llama_stack_api import (
HealthResponse,
HealthStatus,
Inference,
ListOpenAIChatCompletionResponse,
ModelNotFoundError,
ModelType,
ModelTypeError,
OpenAIAssistantMessageParam,
OpenAIChatCompletion,
OpenAIChatCompletionChunk,
@ -34,8 +37,8 @@ from llama_stack_api.inference import (
OpenAIMessageParam,
Order,
RerankResponse,
RoutingTable,
)
from llama_stack_api.models import ModelType
from openai.types.chat import ChatCompletionToolChoiceOptionParam as OpenAIChatCompletionToolChoiceOptionParam
from openai.types.chat import ChatCompletionToolParam as OpenAIChatCompletionToolParam
from pydantic import TypeAdapter

View file

@ -6,10 +6,7 @@
from typing import Any
from llama_stack_api.datatypes import RoutingTable
from llama_stack_api.inference import OpenAIMessageParam
from llama_stack_api.safety import ModerationObject, RunShieldResponse, Safety
from llama_stack_api.shields import Shield
from llama_stack_api import ModerationObject, OpenAIMessageParam, RoutingTable, RunShieldResponse, Safety, Shield
from llama_stack.core.datatypes import SafetyConfig
from llama_stack.log import get_logger

View file

@ -6,10 +6,8 @@
from typing import Any
from llama_stack_api.common.content_types import (
from llama_stack_api import (
URL,
)
from llama_stack_api.tools import (
ListToolDefsResponse,
ToolRuntime,
)

View file

@ -9,14 +9,16 @@ import uuid
from typing import Annotated, Any
from fastapi import Body
from llama_stack_api.common.content_types import InterleavedContent
from llama_stack_api.datatypes import HealthResponse, HealthStatus, RoutingTable
from llama_stack_api.models import ModelType
from llama_stack_api.vector_io import (
from llama_stack_api import (
Chunk,
HealthResponse,
HealthStatus,
InterleavedContent,
ModelType,
OpenAICreateVectorStoreFileBatchRequestWithExtraBody,
OpenAICreateVectorStoreRequestWithExtraBody,
QueryChunksResponse,
RoutingTable,
SearchRankingOptions,
VectorIO,
VectorStoreChunkingStrategy,

View file

@ -6,7 +6,7 @@
from typing import Any
from llama_stack_api.benchmarks import Benchmark, Benchmarks, ListBenchmarksResponse
from llama_stack_api import Benchmark, Benchmarks, ListBenchmarksResponse
from llama_stack.core.datatypes import (
BenchmarkWithOwner,

View file

@ -6,10 +6,7 @@
from typing import Any
from llama_stack_api.common.errors import ModelNotFoundError
from llama_stack_api.datatypes import Api, RoutingTable
from llama_stack_api.models import Model
from llama_stack_api.resource import ResourceType
from llama_stack_api import Api, Model, ModelNotFoundError, ResourceType, RoutingTable
from llama_stack.core.access_control.access_control import AccessDeniedError, is_action_allowed
from llama_stack.core.access_control.datatypes import Action

View file

@ -7,18 +7,18 @@
import uuid
from typing import Any
from llama_stack_api.common.errors import DatasetNotFoundError
from llama_stack_api.datasets import (
from llama_stack_api import (
Dataset,
DatasetNotFoundError,
DatasetPurpose,
Datasets,
DatasetType,
DataSource,
ListDatasetsResponse,
ResourceType,
RowsDataSource,
URIDataSource,
)
from llama_stack_api.resource import ResourceType
from llama_stack.core.datatypes import (
DatasetWithOwner,

View file

@ -7,10 +7,10 @@
import time
from typing import Any
from llama_stack_api.common.errors import ModelNotFoundError
from llama_stack_api.models import (
from llama_stack_api import (
ListModelsResponse,
Model,
ModelNotFoundError,
Models,
ModelType,
OpenAIListModelsResponse,

View file

@ -4,10 +4,10 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from llama_stack_api.common.type_system import ParamType
from llama_stack_api.resource import ResourceType
from llama_stack_api.scoring_functions import (
from llama_stack_api import (
ListScoringFunctionsResponse,
ParamType,
ResourceType,
ScoringFn,
ScoringFnParams,
ScoringFunctions,

View file

@ -6,8 +6,7 @@
from typing import Any
from llama_stack_api.resource import ResourceType
from llama_stack_api.shields import ListShieldsResponse, Shield, Shields
from llama_stack_api import ListShieldsResponse, ResourceType, Shield, Shields
from llama_stack.core.datatypes import (
ShieldWithOwner,

View file

@ -6,9 +6,15 @@
from typing import Any
from llama_stack_api.common.content_types import URL
from llama_stack_api.common.errors import ToolGroupNotFoundError
from llama_stack_api.tools import ListToolDefsResponse, ListToolGroupsResponse, ToolDef, ToolGroup, ToolGroups
from llama_stack_api import (
URL,
ListToolDefsResponse,
ListToolGroupsResponse,
ToolDef,
ToolGroup,
ToolGroupNotFoundError,
ToolGroups,
)
from llama_stack.core.datatypes import AuthenticationRequiredError, ToolGroupWithOwner
from llama_stack.log import get_logger

View file

@ -6,12 +6,12 @@
from typing import Any
from llama_stack_api.common.errors import ModelNotFoundError, ModelTypeError
from llama_stack_api.models import ModelType
from llama_stack_api.resource import ResourceType
# Removed VectorStores import to avoid exposing public API
from llama_stack_api.vector_io import (
from llama_stack_api import (
ModelNotFoundError,
ModelType,
ModelTypeError,
ResourceType,
SearchRankingOptions,
VectorStoreChunkingStrategy,
VectorStoreDeleteResponse,

View file

@ -11,7 +11,7 @@ from urllib.parse import parse_qs, urljoin, urlparse
import httpx
import jwt
from llama_stack_api.common.errors import TokenValidationError
from llama_stack_api import TokenValidationError
from pydantic import BaseModel, Field
from llama_stack.core.datatypes import (

View file

@ -10,8 +10,7 @@ from collections.abc import Callable
from typing import Any
from aiohttp import hdrs
from llama_stack_api.datatypes import Api, ExternalApiSpec
from llama_stack_api.schema_utils import WebMethod
from llama_stack_api import Api, ExternalApiSpec, WebMethod
from starlette.routing import Route
from llama_stack.core.resolver import api_protocol_map

View file

@ -28,9 +28,7 @@ from fastapi import Path as FastapiPath
from fastapi.exceptions import RequestValidationError
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse, StreamingResponse
from llama_stack_api.common.errors import ConflictError, ResourceNotFoundError
from llama_stack_api.common.responses import PaginatedResponse
from llama_stack_api.datatypes import Api
from llama_stack_api import Api, ConflictError, PaginatedResponse, ResourceNotFoundError
from openai import BadRequestError
from pydantic import BaseModel, ValidationError

View file

@ -12,27 +12,30 @@ import tempfile
from typing import Any
import yaml
from llama_stack_api.agents import Agents
from llama_stack_api.batches import Batches
from llama_stack_api.benchmarks import Benchmarks
from llama_stack_api.conversations import Conversations
from llama_stack_api.datasetio import DatasetIO
from llama_stack_api.datasets import Datasets
from llama_stack_api.datatypes import Api
from llama_stack_api.eval import Eval
from llama_stack_api.files import Files
from llama_stack_api.inference import Inference
from llama_stack_api.inspect import Inspect
from llama_stack_api.models import Models
from llama_stack_api.post_training import PostTraining
from llama_stack_api.prompts import Prompts
from llama_stack_api.providers import Providers
from llama_stack_api.safety import Safety
from llama_stack_api.scoring import Scoring
from llama_stack_api.scoring_functions import ScoringFunctions
from llama_stack_api.shields import Shields
from llama_stack_api.tools import ToolGroups, ToolRuntime
from llama_stack_api.vector_io import VectorIO
from llama_stack_api import (
Agents,
Api,
Batches,
Benchmarks,
Conversations,
DatasetIO,
Datasets,
Eval,
Files,
Inference,
Inspect,
Models,
PostTraining,
Prompts,
Providers,
Safety,
Scoring,
ScoringFunctions,
Shields,
ToolGroups,
ToolRuntime,
VectorIO,
)
from llama_stack.core.conversations.conversations import ConversationServiceConfig, ConversationServiceImpl
from llama_stack.core.datatypes import Provider, SafetyConfig, StackRunConfig, VectorStoresConfig

View file

@ -16,7 +16,7 @@ from typing import (
cast,
)
from llama_stack_api.schema_utils import json_schema_type, register_schema
from llama_stack_api import json_schema_type, register_schema
from opentelemetry import metrics, trace
from opentelemetry.exporter.otlp.proto.http.metric_exporter import OTLPMetricExporter
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter

View file

@ -4,7 +4,7 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from llama_stack_api.models import ModelType
from llama_stack_api import ModelType
from llama_stack.core.datatypes import (
BuildProvider,

View file

@ -6,7 +6,7 @@
from pathlib import Path
from llama_stack_api.models import ModelType
from llama_stack_api import ModelType
from llama_stack.core.datatypes import (
BuildProvider,

View file

@ -5,8 +5,7 @@
# the root directory of this source tree.
from llama_stack_api.datasets import DatasetPurpose, URIDataSource
from llama_stack_api.models import ModelType
from llama_stack_api import DatasetPurpose, ModelType, URIDataSource
from llama_stack.core.datatypes import (
BenchmarkInput,

View file

@ -7,7 +7,7 @@
from typing import Any
from llama_stack_api.datatypes import RemoteProviderSpec
from llama_stack_api import RemoteProviderSpec
from llama_stack.core.datatypes import (
BuildProvider,

View file

@ -10,8 +10,7 @@ from typing import Any, Literal
import jinja2
import rich
import yaml
from llama_stack_api.datasets import DatasetPurpose
from llama_stack_api.models import ModelType
from llama_stack_api import DatasetPurpose, ModelType
from pydantic import BaseModel, Field
from llama_stack.core.datatypes import (

View file

@ -5,25 +5,25 @@
# the root directory of this source tree.
from llama_stack_api.agents import (
from llama_stack_api import (
Agents,
Conversations,
Inference,
ListOpenAIResponseInputItem,
ListOpenAIResponseObject,
OpenAIDeleteResponseObject,
OpenAIResponseInput,
OpenAIResponseInputTool,
OpenAIResponseObject,
OpenAIResponsePrompt,
OpenAIResponseText,
Order,
ResponseGuardrail,
Safety,
ToolGroups,
ToolRuntime,
VectorIO,
)
from llama_stack_api.conversations import Conversations
from llama_stack_api.inference import (
Inference,
)
from llama_stack_api.openai_responses import OpenAIResponsePrompt, OpenAIResponseText
from llama_stack_api.safety import Safety
from llama_stack_api.tools import ToolGroups, ToolRuntime
from llama_stack_api.vector_io import VectorIO
from llama_stack.core.datatypes import AccessRule
from llama_stack.log import get_logger

View file

@ -8,20 +8,15 @@ import time
import uuid
from collections.abc import AsyncIterator
from llama_stack_api.agents import Order, ResponseGuardrailSpec
from llama_stack_api.common.errors import (
InvalidConversationIdError,
)
from llama_stack_api.conversations import ConversationItem, Conversations
from llama_stack_api.inference import (
from llama_stack_api import (
ConversationItem,
Conversations,
Inference,
OpenAIMessageParam,
OpenAISystemMessageParam,
)
from llama_stack_api.openai_responses import (
InvalidConversationIdError,
ListOpenAIResponseInputItem,
ListOpenAIResponseObject,
OpenAIDeleteResponseObject,
OpenAIMessageParam,
OpenAIResponseInput,
OpenAIResponseInputMessageContentText,
OpenAIResponseInputTool,
@ -31,10 +26,14 @@ from llama_stack_api.openai_responses import (
OpenAIResponsePrompt,
OpenAIResponseText,
OpenAIResponseTextFormat,
OpenAISystemMessageParam,
Order,
ResponseGuardrailSpec,
Safety,
ToolGroups,
ToolRuntime,
VectorIO,
)
from llama_stack_api.safety import Safety
from llama_stack_api.tools import ToolGroups, ToolRuntime
from llama_stack_api.vector_io import VectorIO
from pydantic import BaseModel, TypeAdapter
from llama_stack.log import get_logger

View file

@ -8,8 +8,11 @@ import uuid
from collections.abc import AsyncIterator
from typing import Any
from llama_stack_api.inference import (
from llama_stack_api import (
AllowedToolsFilter,
ApprovalFilter,
Inference,
MCPListToolsTool,
OpenAIAssistantMessageParam,
OpenAIChatCompletion,
OpenAIChatCompletionChunk,
@ -17,11 +20,6 @@ from llama_stack_api.inference import (
OpenAIChatCompletionToolCall,
OpenAIChoice,
OpenAIMessageParam,
)
from llama_stack_api.openai_responses import (
AllowedToolsFilter,
ApprovalFilter,
MCPListToolsTool,
OpenAIResponseContentPartOutputText,
OpenAIResponseContentPartReasoningText,
OpenAIResponseContentPartRefusal,
@ -1024,7 +1022,7 @@ class StreamingResponseOrchestrator:
self, tools: list[OpenAIResponseInputTool], output_messages: list[OpenAIResponseOutput]
) -> AsyncIterator[OpenAIResponseObjectStream]:
"""Process all tools and emit appropriate streaming events."""
from llama_stack_api.tools import ToolDef
from llama_stack_api import ToolDef
from openai.types.chat import ChatCompletionToolParam
from llama_stack.models.llama.datatypes import ToolDefinition

View file

@ -9,18 +9,12 @@ import json
from collections.abc import AsyncIterator
from typing import Any
from llama_stack_api.common.content_types import (
from llama_stack_api import (
ImageContentItem,
TextContentItem,
)
from llama_stack_api.inference import (
OpenAIChatCompletionContentPartImageParam,
OpenAIChatCompletionContentPartTextParam,
OpenAIChatCompletionToolCall,
OpenAIImageURL,
OpenAIToolMessageParam,
)
from llama_stack_api.openai_responses import (
OpenAIResponseInputToolFileSearch,
OpenAIResponseInputToolMCP,
OpenAIResponseObjectStreamResponseFileSearchCallCompleted,
@ -35,9 +29,13 @@ from llama_stack_api.openai_responses import (
OpenAIResponseOutputMessageFileSearchToolCall,
OpenAIResponseOutputMessageFileSearchToolCallResults,
OpenAIResponseOutputMessageWebSearchToolCall,
OpenAIToolMessageParam,
TextContentItem,
ToolGroups,
ToolInvocationResult,
ToolRuntime,
VectorIO,
)
from llama_stack_api.tools import ToolGroups, ToolInvocationResult, ToolRuntime
from llama_stack_api.vector_io import VectorIO
from llama_stack.core.telemetry import tracing
from llama_stack.log import get_logger
@ -398,7 +396,7 @@ class ToolExecutor:
# Build output message
message: Any
if mcp_tool_to_server and function.name in mcp_tool_to_server:
from llama_stack_api.openai_responses import (
from llama_stack_api import (
OpenAIResponseOutputMessageMCPCall,
)

View file

@ -7,8 +7,10 @@
from dataclasses import dataclass
from typing import cast
from llama_stack_api.inference import OpenAIChatCompletionToolCall, OpenAIMessageParam, OpenAIResponseFormatParam
from llama_stack_api.openai_responses import (
from llama_stack_api import (
OpenAIChatCompletionToolCall,
OpenAIMessageParam,
OpenAIResponseFormatParam,
OpenAIResponseInput,
OpenAIResponseInputTool,
OpenAIResponseInputToolFileSearch,

View file

@ -9,8 +9,7 @@ import re
import uuid
from collections.abc import Sequence
from llama_stack_api.agents import ResponseGuardrailSpec
from llama_stack_api.inference import (
from llama_stack_api import (
OpenAIAssistantMessageParam,
OpenAIChatCompletionContentPartImageParam,
OpenAIChatCompletionContentPartParam,
@ -22,16 +21,11 @@ from llama_stack_api.inference import (
OpenAIImageURL,
OpenAIJSONSchema,
OpenAIMessageParam,
OpenAIResponseAnnotationFileCitation,
OpenAIResponseFormatJSONObject,
OpenAIResponseFormatJSONSchema,
OpenAIResponseFormatParam,
OpenAIResponseFormatText,
OpenAISystemMessageParam,
OpenAIToolMessageParam,
OpenAIUserMessageParam,
)
from llama_stack_api.openai_responses import (
OpenAIResponseAnnotationFileCitation,
OpenAIResponseInput,
OpenAIResponseInputFunctionToolCallOutput,
OpenAIResponseInputMessageContent,
@ -47,8 +41,12 @@ from llama_stack_api.openai_responses import (
OpenAIResponseOutputMessageMCPCall,
OpenAIResponseOutputMessageMCPListTools,
OpenAIResponseText,
OpenAISystemMessageParam,
OpenAIToolMessageParam,
OpenAIUserMessageParam,
ResponseGuardrailSpec,
Safety,
)
from llama_stack_api.safety import Safety
async def convert_chat_choice_to_response_message(

View file

@ -6,8 +6,7 @@
import asyncio
from llama_stack_api.inference import OpenAIMessageParam
from llama_stack_api.safety import Safety, SafetyViolation, ViolationLevel
from llama_stack_api import OpenAIMessageParam, Safety, SafetyViolation, ViolationLevel
from llama_stack.core.telemetry import tracing
from llama_stack.log import get_logger

View file

@ -6,9 +6,7 @@
from typing import Any
from llama_stack_api.files import Files
from llama_stack_api.inference import Inference
from llama_stack_api.models import Models
from llama_stack_api import Files, Inference, Models
from llama_stack.core.datatypes import AccessRule, Api
from llama_stack.providers.utils.kvstore import kvstore_impl

View file

@ -13,22 +13,26 @@ import uuid
from io import BytesIO
from typing import Any, Literal
from llama_stack_api.batches import Batches, BatchObject, ListBatchesResponse
from llama_stack_api.common.errors import ConflictError, ResourceNotFoundError
from llama_stack_api.files import Files, OpenAIFilePurpose
from llama_stack_api.inference import (
from llama_stack_api import (
Batches,
BatchObject,
ConflictError,
Files,
Inference,
ListBatchesResponse,
Models,
OpenAIAssistantMessageParam,
OpenAIChatCompletionRequestWithExtraBody,
OpenAICompletionRequestWithExtraBody,
OpenAIDeveloperMessageParam,
OpenAIEmbeddingsRequestWithExtraBody,
OpenAIFilePurpose,
OpenAIMessageParam,
OpenAISystemMessageParam,
OpenAIToolMessageParam,
OpenAIUserMessageParam,
ResourceNotFoundError,
)
from llama_stack_api.models import Models
from openai.types.batch import BatchError, Errors
from pydantic import BaseModel

View file

@ -5,10 +5,7 @@
# the root directory of this source tree.
from typing import Any
from llama_stack_api.common.responses import PaginatedResponse
from llama_stack_api.datasetio import DatasetIO
from llama_stack_api.datasets import Dataset
from llama_stack_api.datatypes import DatasetsProtocolPrivate
from llama_stack_api import Dataset, DatasetIO, DatasetsProtocolPrivate, PaginatedResponse
from llama_stack.providers.utils.datasetio.url_utils import get_dataframe_from_uri
from llama_stack.providers.utils.kvstore import kvstore_impl

View file

@ -6,21 +6,24 @@
import json
from typing import Any
from llama_stack_api.agents import Agents
from llama_stack_api.benchmarks import Benchmark
from llama_stack_api.common.job_types import Job, JobStatus
from llama_stack_api.datasetio import DatasetIO
from llama_stack_api.datasets import Datasets
from llama_stack_api.datatypes import BenchmarksProtocolPrivate
from llama_stack_api.eval import BenchmarkConfig, Eval, EvaluateResponse
from llama_stack_api.inference import (
from llama_stack_api import (
Agents,
Benchmark,
BenchmarkConfig,
BenchmarksProtocolPrivate,
DatasetIO,
Datasets,
Eval,
EvaluateResponse,
Inference,
Job,
JobStatus,
OpenAIChatCompletionRequestWithExtraBody,
OpenAICompletionRequestWithExtraBody,
OpenAISystemMessageParam,
OpenAIUserMessageParam,
Scoring,
)
from llama_stack_api.scoring import Scoring
from tqdm import tqdm
from llama_stack.providers.utils.common.data_schema_validator import ColumnName

View file

@ -10,15 +10,15 @@ from pathlib import Path
from typing import Annotated
from fastapi import Depends, File, Form, Response, UploadFile
from llama_stack_api.common.errors import ResourceNotFoundError
from llama_stack_api.common.responses import Order
from llama_stack_api.files import (
from llama_stack_api import (
ExpiresAfter,
Files,
ListOpenAIFileResponse,
OpenAIFileDeleteResponse,
OpenAIFileObject,
OpenAIFilePurpose,
Order,
ResourceNotFoundError,
)
from llama_stack.core.datatypes import AccessRule

View file

@ -6,7 +6,7 @@
from typing import Any
from llama_stack_api.inference import QuantizationConfig
from llama_stack_api import QuantizationConfig
from pydantic import BaseModel, field_validator
from llama_stack.providers.utils.inference import supported_inference_models

View file

@ -8,7 +8,7 @@ import math
from typing import Optional
import torch
from llama_stack_api.inference import (
from llama_stack_api import (
GreedySamplingStrategy,
JsonSchemaResponseFormat,
OpenAIChatCompletionRequestWithExtraBody,

View file

@ -9,9 +9,11 @@ import time
import uuid
from collections.abc import AsyncIterator
from llama_stack_api.datatypes import ModelsProtocolPrivate
from llama_stack_api.inference import (
from llama_stack_api import (
InferenceProvider,
Model,
ModelsProtocolPrivate,
ModelType,
OpenAIAssistantMessageParam,
OpenAIChatCompletion,
OpenAIChatCompletionChunk,
@ -23,7 +25,6 @@ from llama_stack_api.inference import (
OpenAIUserMessageParam,
ToolChoice,
)
from llama_stack_api.models import Model, ModelType
from llama_stack.log import get_logger
from llama_stack.models.llama.datatypes import RawMessage, RawTextItem, ToolDefinition
@ -375,7 +376,7 @@ class MetaReferenceInferenceImpl(
# Convert tool calls to OpenAI format
openai_tool_calls = None
if decoded_message.tool_calls:
from llama_stack_api.inference import (
from llama_stack_api import (
OpenAIChatCompletionToolCall,
OpenAIChatCompletionToolCallFunction,
)
@ -440,7 +441,7 @@ class MetaReferenceInferenceImpl(
params: OpenAIChatCompletionRequestWithExtraBody,
) -> AsyncIterator[OpenAIChatCompletionChunk]:
"""Stream chat completion chunks as they're generated."""
from llama_stack_api.inference import (
from llama_stack_api import (
OpenAIChatCompletionChunk,
OpenAIChatCompletionToolCall,
OpenAIChatCompletionToolCallFunction,

View file

@ -6,16 +6,17 @@
from collections.abc import AsyncIterator
from llama_stack_api.datatypes import ModelsProtocolPrivate
from llama_stack_api.inference import (
from llama_stack_api import (
InferenceProvider,
Model,
ModelsProtocolPrivate,
ModelType,
OpenAIChatCompletion,
OpenAIChatCompletionChunk,
OpenAIChatCompletionRequestWithExtraBody,
OpenAICompletion,
OpenAICompletionRequestWithExtraBody,
)
from llama_stack_api.models import Model, ModelType
from llama_stack.log import get_logger
from llama_stack.providers.utils.inference.embedding_mixin import (

View file

@ -12,11 +12,7 @@
from typing import Any
from llama_stack_api.common.type_system import (
ChatCompletionInputType,
DialogType,
StringType,
)
from llama_stack_api import ChatCompletionInputType, DialogType, StringType
from llama_stack.providers.utils.common.data_schema_validator import (
ColumnName,

View file

@ -6,11 +6,11 @@
from enum import Enum
from typing import Any
from llama_stack_api.datasetio import DatasetIO
from llama_stack_api.datasets import Datasets
from llama_stack_api.post_training import (
from llama_stack_api import (
AlgorithmConfig,
Checkpoint,
DatasetIO,
Datasets,
DPOAlignmentConfig,
JobStatus,
ListPostTrainingJobsResponse,

View file

@ -12,11 +12,11 @@ from typing import Any
import torch
from datasets import Dataset
from llama_stack_api.datasetio import DatasetIO
from llama_stack_api.datasets import Datasets
from llama_stack_api.post_training import (
from llama_stack_api import (
Checkpoint,
DataConfig,
DatasetIO,
Datasets,
LoraFinetuningConfig,
TrainingConfig,
)

View file

@ -11,10 +11,10 @@ from typing import Any
import torch
from datasets import Dataset
from llama_stack_api.datasetio import DatasetIO
from llama_stack_api.datasets import Datasets
from llama_stack_api.post_training import (
from llama_stack_api import (
Checkpoint,
DatasetIO,
Datasets,
DPOAlignmentConfig,
TrainingConfig,
)

View file

@ -14,8 +14,7 @@ from typing import TYPE_CHECKING, Any, Protocol
import psutil
import torch
from datasets import Dataset
from llama_stack_api.datasetio import DatasetIO
from llama_stack_api.post_training import Checkpoint, TrainingConfig
from llama_stack_api import Checkpoint, DatasetIO, TrainingConfig
from transformers import AutoConfig, AutoModelForCausalLM
if TYPE_CHECKING:

View file

@ -13,7 +13,7 @@
from collections.abc import Callable
import torch
from llama_stack_api.post_training import DatasetFormat
from llama_stack_api import DatasetFormat
from pydantic import BaseModel
from torchtune.data._messages import InputOutputToMessages, ShareGPTToMessages
from torchtune.models.llama3 import llama3_tokenizer

View file

@ -6,11 +6,11 @@
from enum import Enum
from typing import Any
from llama_stack_api.datasetio import DatasetIO
from llama_stack_api.datasets import Datasets
from llama_stack_api.post_training import (
from llama_stack_api import (
AlgorithmConfig,
Checkpoint,
DatasetIO,
Datasets,
DPOAlignmentConfig,
JobStatus,
ListPostTrainingJobsResponse,

View file

@ -12,14 +12,14 @@ from pathlib import Path
from typing import Any
import torch
from llama_stack_api.common.training_types import PostTrainingMetric
from llama_stack_api.datasetio import DatasetIO
from llama_stack_api.datasets import Datasets
from llama_stack_api.post_training import (
from llama_stack_api import (
Checkpoint,
DataConfig,
DatasetIO,
Datasets,
LoraFinetuningConfig,
OptimizerConfig,
PostTrainingMetric,
QATFinetuningConfig,
TrainingConfig,
)

View file

@ -10,16 +10,16 @@ from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from codeshield.cs import CodeShieldScanResult
from llama_stack_api.inference import OpenAIMessageParam
from llama_stack_api.safety import (
from llama_stack_api import (
ModerationObject,
ModerationObjectResults,
OpenAIMessageParam,
RunShieldResponse,
Safety,
SafetyViolation,
Shield,
ViolationLevel,
)
from llama_stack_api.shields import Shield
from llama_stack.log import get_logger
from llama_stack.providers.utils.inference.prompt_adapter import (

View file

@ -9,23 +9,22 @@ import uuid
from string import Template
from typing import Any
from llama_stack_api.common.content_types import ImageContentItem, TextContentItem
from llama_stack_api.datatypes import ShieldsProtocolPrivate
from llama_stack_api.inference import (
from llama_stack_api import (
ImageContentItem,
Inference,
ModerationObject,
ModerationObjectResults,
OpenAIChatCompletionRequestWithExtraBody,
OpenAIMessageParam,
OpenAIUserMessageParam,
)
from llama_stack_api.safety import (
ModerationObject,
ModerationObjectResults,
RunShieldResponse,
Safety,
SafetyViolation,
Shield,
ShieldsProtocolPrivate,
TextContentItem,
ViolationLevel,
)
from llama_stack_api.shields import Shield
from llama_stack.core.datatypes import Api
from llama_stack.log import get_logger

View file

@ -7,17 +7,17 @@
from typing import Any
import torch
from llama_stack_api.datatypes import ShieldsProtocolPrivate
from llama_stack_api.inference import OpenAIMessageParam
from llama_stack_api.safety import (
from llama_stack_api import (
ModerationObject,
OpenAIMessageParam,
RunShieldResponse,
Safety,
SafetyViolation,
Shield,
ShieldsProtocolPrivate,
ShieldStore,
ViolationLevel,
)
from llama_stack_api.shields import Shield
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from llama_stack.core.utils.model_utils import model_local_dir

View file

@ -5,16 +5,17 @@
# the root directory of this source tree.
from typing import Any
from llama_stack_api.datasetio import DatasetIO
from llama_stack_api.datasets import Datasets
from llama_stack_api.datatypes import ScoringFunctionsProtocolPrivate
from llama_stack_api.scoring import (
from llama_stack_api import (
DatasetIO,
Datasets,
ScoreBatchResponse,
ScoreResponse,
Scoring,
ScoringFn,
ScoringFnParams,
ScoringFunctionsProtocolPrivate,
ScoringResult,
)
from llama_stack_api.scoring_functions import ScoringFn, ScoringFnParams
from llama_stack.core.datatypes import Api
from llama_stack.providers.utils.common.data_schema_validator import (

View file

@ -8,8 +8,7 @@ import json
import re
from typing import Any
from llama_stack_api.scoring import ScoringResultRow
from llama_stack_api.scoring_functions import ScoringFnParams
from llama_stack_api import ScoringFnParams, ScoringResultRow
from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn

View file

@ -6,8 +6,7 @@
from typing import Any
from llama_stack_api.scoring import ScoringResultRow
from llama_stack_api.scoring_functions import ScoringFnParams
from llama_stack_api import ScoringFnParams, ScoringResultRow
from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn

View file

@ -4,10 +4,10 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from llama_stack_api.common.type_system import NumberType
from llama_stack_api.scoring_functions import (
from llama_stack_api import (
AggregationFunctionType,
BasicScoringFnParams,
NumberType,
ScoringFn,
)

View file

@ -4,10 +4,10 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from llama_stack_api.common.type_system import NumberType
from llama_stack_api.scoring_functions import (
from llama_stack_api import (
AggregationFunctionType,
BasicScoringFnParams,
NumberType,
ScoringFn,
)

View file

@ -4,10 +4,10 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from llama_stack_api.common.type_system import NumberType
from llama_stack_api.scoring_functions import (
from llama_stack_api import (
AggregationFunctionType,
BasicScoringFnParams,
NumberType,
ScoringFn,
)

View file

@ -4,9 +4,9 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from llama_stack_api.common.type_system import NumberType
from llama_stack_api.scoring_functions import (
from llama_stack_api import (
AggregationFunctionType,
NumberType,
RegexParserScoringFnParams,
ScoringFn,
)

View file

@ -4,9 +4,9 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from llama_stack_api.common.type_system import NumberType
from llama_stack_api.scoring_functions import (
from llama_stack_api import (
AggregationFunctionType,
NumberType,
RegexParserScoringFnParams,
ScoringFn,
)

View file

@ -4,10 +4,10 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from llama_stack_api.common.type_system import NumberType
from llama_stack_api.scoring_functions import (
from llama_stack_api import (
AggregationFunctionType,
BasicScoringFnParams,
NumberType,
ScoringFn,
)

View file

@ -6,8 +6,7 @@
from typing import Any
from llama_stack_api.scoring import ScoringResultRow
from llama_stack_api.scoring_functions import ScoringFnParams
from llama_stack_api import ScoringFnParams, ScoringResultRow
from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn

View file

@ -5,8 +5,7 @@
# the root directory of this source tree.
from typing import Any
from llama_stack_api.scoring import ScoringResultRow
from llama_stack_api.scoring_functions import ScoringFnParams, ScoringFnParamsType
from llama_stack_api import ScoringFnParams, ScoringFnParamsType, ScoringResultRow
from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn

View file

@ -6,8 +6,7 @@
import re
from typing import Any
from llama_stack_api.scoring import ScoringResultRow
from llama_stack_api.scoring_functions import ScoringFnParams, ScoringFnParamsType
from llama_stack_api import ScoringFnParams, ScoringFnParamsType, ScoringResultRow
from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn

View file

@ -6,8 +6,7 @@
from typing import Any
from llama_stack_api.scoring import ScoringResultRow
from llama_stack_api.scoring_functions import ScoringFnParams
from llama_stack_api import ScoringFnParams, ScoringResultRow
from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn

View file

@ -17,17 +17,18 @@ from autoevals.ragas import (
ContextRelevancy,
Faithfulness,
)
from llama_stack_api.datasetio import DatasetIO
from llama_stack_api.datasets import Datasets
from llama_stack_api.datatypes import ScoringFunctionsProtocolPrivate
from llama_stack_api.scoring import (
from llama_stack_api import (
DatasetIO,
Datasets,
ScoreBatchResponse,
ScoreResponse,
Scoring,
ScoringFn,
ScoringFnParams,
ScoringFunctionsProtocolPrivate,
ScoringResult,
ScoringResultRow,
)
from llama_stack_api.scoring_functions import ScoringFn, ScoringFnParams
from pydantic import BaseModel
from llama_stack.core.datatypes import Api

View file

@ -4,10 +4,10 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from llama_stack_api.common.type_system import NumberType
from llama_stack_api.scoring_functions import (
from llama_stack_api import (
AggregationFunctionType,
BasicScoringFnParams,
NumberType,
ScoringFn,
)

View file

@ -4,10 +4,10 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from llama_stack_api.common.type_system import NumberType
from llama_stack_api.scoring_functions import (
from llama_stack_api import (
AggregationFunctionType,
BasicScoringFnParams,
NumberType,
ScoringFn,
)

View file

@ -4,10 +4,10 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from llama_stack_api.common.type_system import NumberType
from llama_stack_api.scoring_functions import (
from llama_stack_api import (
AggregationFunctionType,
BasicScoringFnParams,
NumberType,
ScoringFn,
)

View file

@ -4,10 +4,10 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from llama_stack_api.common.type_system import NumberType
from llama_stack_api.scoring_functions import (
from llama_stack_api import (
AggregationFunctionType,
BasicScoringFnParams,
NumberType,
ScoringFn,
)

View file

@ -4,10 +4,10 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from llama_stack_api.common.type_system import NumberType
from llama_stack_api.scoring_functions import (
from llama_stack_api import (
AggregationFunctionType,
BasicScoringFnParams,
NumberType,
ScoringFn,
)

View file

@ -4,10 +4,10 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from llama_stack_api.common.type_system import NumberType
from llama_stack_api.scoring_functions import (
from llama_stack_api import (
AggregationFunctionType,
BasicScoringFnParams,
NumberType,
ScoringFn,
)

View file

@ -4,10 +4,10 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from llama_stack_api.common.type_system import NumberType
from llama_stack_api.scoring_functions import (
from llama_stack_api import (
AggregationFunctionType,
BasicScoringFnParams,
NumberType,
ScoringFn,
)

View file

@ -4,10 +4,10 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from llama_stack_api.common.type_system import NumberType
from llama_stack_api.scoring_functions import (
from llama_stack_api import (
AggregationFunctionType,
BasicScoringFnParams,
NumberType,
ScoringFn,
)

View file

@ -4,10 +4,10 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from llama_stack_api.common.type_system import NumberType
from llama_stack_api.scoring_functions import (
from llama_stack_api import (
AggregationFunctionType,
BasicScoringFnParams,
NumberType,
ScoringFn,
)

View file

@ -5,17 +5,18 @@
# the root directory of this source tree.
from typing import Any
from llama_stack_api.datasetio import DatasetIO
from llama_stack_api.datasets import Datasets
from llama_stack_api.datatypes import ScoringFunctionsProtocolPrivate
from llama_stack_api.inference import Inference
from llama_stack_api.scoring import (
from llama_stack_api import (
DatasetIO,
Datasets,
Inference,
ScoreBatchResponse,
ScoreResponse,
Scoring,
ScoringFn,
ScoringFnParams,
ScoringFunctionsProtocolPrivate,
ScoringResult,
)
from llama_stack_api.scoring_functions import ScoringFn, ScoringFnParams
from llama_stack.core.datatypes import Api
from llama_stack.providers.utils.common.data_schema_validator import (

Some files were not shown because too many files have changed in this diff Show more