refactor: enforce top-level imports for llama-stack-api

Enforce that all imports from llama-stack-api use the form:

from llama_stack_api import <symbol>

 This prevents external code from accessing internal package structure
 (e.g., llama_stack_api.agents, llama_stack_api.common.*) and establishes
 a clear public API boundary.

 Changes:
 - Export 400+ symbols from llama_stack_api/__init__.py
 - Include all API types, common utilities, and strong_typing helpers
 - Update files across src/llama_stack, docs/, tests/, scripts/
 - Convert all submodule imports to top-level imports
 - ensure docs use the proper importing structure

 Addresses PR review feedback requiring explicit __all__ definition to
 prevent "peeking inside" the API package.

Signed-off-by: Charlie Doern <cdoern@redhat.com>
This commit is contained in:
Charlie Doern 2025-11-13 14:03:30 -05:00
parent b7480e9c88
commit 2e5d1c8881
270 changed files with 1587 additions and 750 deletions

View file

@ -30,14 +30,14 @@ jobs:
activate-environment: true activate-environment: true
version: 0.7.6 version: 0.7.6
- name: Build Llama Stack Spec package - name: Build Llama Stack API package
working-directory: src/llama-stack-api working-directory: src/llama-stack-api
run: uv build run: uv build
- name: Build Llama Stack package - name: Build Llama Stack package
run: uv build run: uv build
- name: Install Llama Stack package (with spec from local build) - name: Install Llama Stack package (with api stubs from local build)
run: | run: |
uv pip install --find-links src/llama-stack-api/dist dist/*.whl uv pip install --find-links src/llama-stack-api/dist dist/*.whl

View file

@ -58,7 +58,7 @@ External APIs must expose a `available_providers()` function in their module tha
```python ```python
# llama_stack_api_weather/api.py # llama_stack_api_weather/api.py
from llama_stack_api.providers.datatypes import Api, InlineProviderSpec, ProviderSpec from llama_stack_api import Api, InlineProviderSpec, ProviderSpec
def available_providers() -> list[ProviderSpec]: def available_providers() -> list[ProviderSpec]:
@ -79,7 +79,7 @@ A Protocol class like so:
# llama_stack_api_weather/api.py # llama_stack_api_weather/api.py
from typing import Protocol from typing import Protocol
from llama_stack_api.schema_utils import webmethod from llama_stack_api import webmethod
class WeatherAPI(Protocol): class WeatherAPI(Protocol):
@ -151,13 +151,12 @@ __all__ = ["WeatherAPI", "available_providers"]
# llama-stack-api-weather/src/llama_stack_api_weather/weather.py # llama-stack-api-weather/src/llama_stack_api_weather/weather.py
from typing import Protocol from typing import Protocol
from llama_stack_api.providers.datatypes import ( from llama_stack_api import (
Api, Api,
ProviderSpec, ProviderSpec,
RemoteProviderSpec, RemoteProviderSpec,
webmethod,
) )
from llama_stack_api.schema_utils import webmethod
def available_providers() -> list[ProviderSpec]: def available_providers() -> list[ProviderSpec]:
return [ return [

View file

@ -153,7 +153,7 @@ description: |
Example using RAGQueryConfig with different search modes: Example using RAGQueryConfig with different search modes:
```python ```python
from llama_stack_api.rag_tool import RAGQueryConfig, RRFRanker, WeightedRanker from llama_stack_api import RAGQueryConfig, RRFRanker, WeightedRanker
# Vector search # Vector search
config = RAGQueryConfig(mode="vector", max_chunks=5) config = RAGQueryConfig(mode="vector", max_chunks=5)
@ -358,7 +358,7 @@ Two ranker types are supported:
Example using RAGQueryConfig with different search modes: Example using RAGQueryConfig with different search modes:
```python ```python
from llama_stack_api.rag_tool import RAGQueryConfig, RRFRanker, WeightedRanker from llama_stack_api import RAGQueryConfig, RRFRanker, WeightedRanker
# Vector search # Vector search
config = RAGQueryConfig(mode="vector", max_chunks=5) config = RAGQueryConfig(mode="vector", max_chunks=5)

View file

@ -16,7 +16,7 @@ import sys
import fire import fire
import ruamel.yaml as yaml import ruamel.yaml as yaml
from llama_stack_api.version import LLAMA_STACK_API_V1 # noqa: E402 from llama_stack_api import LLAMA_STACK_API_V1 # noqa: E402
from llama_stack.core.stack import LlamaStack # noqa: E402 from llama_stack.core.stack import LlamaStack # noqa: E402
from .pyopenapi.options import Options # noqa: E402 from .pyopenapi.options import Options # noqa: E402

View file

@ -16,27 +16,27 @@ from typing import Annotated, Any, Dict, get_args, get_origin, Set, Union
from fastapi import UploadFile from fastapi import UploadFile
from llama_stack_api.datatypes import Error from llama_stack_api import (
from llama_stack_api.strong_typing.core import JsonType Docstring,
from llama_stack_api.strong_typing.docstring import Docstring, parse_type Error,
from llama_stack_api.strong_typing.inspection import ( JsonSchemaGenerator,
JsonType,
Schema,
SchemaOptions,
get_schema_identifier,
is_generic_list, is_generic_list,
is_type_optional, is_type_optional,
is_type_union, is_type_union,
is_unwrapped_body_param, is_unwrapped_body_param,
json_dump_string,
object_to_json,
parse_type,
python_type_to_name,
register_schema,
unwrap_generic_list, unwrap_generic_list,
unwrap_optional_type, unwrap_optional_type,
unwrap_union_types, unwrap_union_types,
) )
from llama_stack_api.strong_typing.name import python_type_to_name
from llama_stack_api.strong_typing.schema import (
get_schema_identifier,
JsonSchemaGenerator,
register_schema,
Schema,
SchemaOptions,
)
from llama_stack_api.strong_typing.serialization import json_dump_string, object_to_json
from pydantic import BaseModel from pydantic import BaseModel
from .operations import ( from .operations import (

View file

@ -11,19 +11,21 @@ import typing
from dataclasses import dataclass from dataclasses import dataclass
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union
from llama_stack_api.version import LLAMA_STACK_API_V1, LLAMA_STACK_API_V1BETA, LLAMA_STACK_API_V1ALPHA
from termcolor import colored from termcolor import colored
from llama_stack_api.strong_typing.inspection import get_signature
from typing import get_origin, get_args from typing import get_origin, get_args
from fastapi import UploadFile from fastapi import UploadFile
from fastapi.params import File, Form from fastapi.params import File, Form
from typing import Annotated from typing import Annotated
from llama_stack_api.schema_utils import ExtraBodyField from llama_stack_api import (
ExtraBodyField,
LLAMA_STACK_API_V1,
LLAMA_STACK_API_V1ALPHA,
LLAMA_STACK_API_V1BETA,
get_signature,
)
def split_prefix( def split_prefix(

View file

@ -9,7 +9,7 @@ import enum
from dataclasses import dataclass from dataclasses import dataclass
from typing import Any, ClassVar, Dict, List, Optional, Union from typing import Any, ClassVar, Dict, List, Optional, Union
from llama_stack_api.strong_typing.schema import JsonType, Schema, StrictJsonType from llama_stack_api import JsonType, Schema, StrictJsonType
URL = str URL = str

View file

@ -11,8 +11,7 @@ from pathlib import Path
from typing import Any, List, Optional, TextIO, Union, get_type_hints, get_origin, get_args from typing import Any, List, Optional, TextIO, Union, get_type_hints, get_origin, get_args
from pydantic import BaseModel from pydantic import BaseModel
from llama_stack_api.strong_typing.schema import object_to_json, StrictJsonType from llama_stack_api import StrictJsonType, is_unwrapped_body_param, object_to_json
from llama_stack_api.strong_typing.inspection import is_unwrapped_body_param
from llama_stack.core.resolver import api_protocol_map from llama_stack.core.resolver import api_protocol_map
from .generator import Generator from .generator import Generator
@ -165,12 +164,12 @@ def _validate_api_delete_method_returns_none(method) -> str | None:
return "has no return type annotation" return "has no return type annotation"
return_type = hints['return'] return_type = hints['return']
# Allow OpenAI endpoints to return response objects since they follow OpenAI specification # Allow OpenAI endpoints to return response objects since they follow OpenAI specification
method_name = getattr(method, '__name__', '') method_name = getattr(method, '__name__', '')
if method_name.__contains__('openai_'): if method_name.__contains__('openai_'):
return None return None
if return_type is not None and return_type is not type(None): if return_type is not None and return_type is not type(None):
return "does not return None where None is mandatory" return "does not return None where None is mandatory"

View file

@ -14,7 +14,7 @@ import os
from pathlib import Path from pathlib import Path
import fire import fire
from llama_stack_api.common.errors import ModelNotFoundError from llama_stack_api import ModelNotFoundError
from llama_stack.models.llama.llama3.generation import Llama3 from llama_stack.models.llama.llama3.generation import Llama3
from llama_stack.models.llama.llama4.generation import Llama4 from llama_stack.models.llama.llama4.generation import Llama4

View file

@ -12,16 +12,860 @@ for Llama Stack. It is designed to be a lightweight dependency for external prov
and clients that need to interact with Llama Stack APIs without requiring the full and clients that need to interact with Llama Stack APIs without requiring the full
server implementation. server implementation.
Key components: All imports from this package MUST use the form:
- API modules (agents, inference, safety, etc.): Protocol definitions for all Llama Stack APIs from llama_stack_api import <symbol>
- datatypes: Core data types and provider specifications
- common: Common data types used across APIs Sub-module imports (e.g., from llama_stack_api.agents import Agents) are NOT supported
- strong_typing: Type system utilities and considered a code smell. All exported symbols are explicitly listed in __all__.
- schema_utils: Schema validation and utilities
""" """
__version__ = "0.1.0" __version__ = "0.4.0"
from . import common, datatypes, schema_utils, strong_typing # noqa: F401 # Import submodules for those who need them
from . import common, strong_typing # noqa: F401
__all__ = ["common", "datatypes", "schema_utils", "strong_typing"] # Import all public API symbols
from .agents import Agents, ResponseGuardrail, ResponseGuardrailSpec
from .batches import Batches, BatchObject, ListBatchesResponse
from .benchmarks import (
Benchmark,
BenchmarkInput,
Benchmarks,
CommonBenchmarkFields,
ListBenchmarksResponse,
)
# Import commonly used types from common submodule
from .common.content_types import (
URL,
ImageContentItem,
InterleavedContent,
InterleavedContentItem,
TextContentItem,
_URLOrData,
)
from .common.errors import (
ConflictError,
DatasetNotFoundError,
InvalidConversationIdError,
ModelNotFoundError,
ModelTypeError,
ResourceNotFoundError,
TokenValidationError,
ToolGroupNotFoundError,
UnsupportedModelError,
VectorStoreNotFoundError,
)
from .common.job_types import Job, JobStatus
from .common.responses import Order, PaginatedResponse
from .common.training_types import Checkpoint, PostTrainingMetric
from .common.type_system import (
ChatCompletionInputType,
CompletionInputType,
NumberType,
ParamType,
StringType,
)
from .conversations import (
Conversation,
ConversationDeletedResource,
ConversationItem,
ConversationItemCreateRequest,
ConversationItemDeletedResource,
ConversationItemInclude,
ConversationItemList,
ConversationMessage,
Conversations,
Metadata,
)
from .datasetio import DatasetIO, DatasetStore
from .datasets import (
CommonDatasetFields,
Dataset,
DatasetInput,
DatasetPurpose,
Datasets,
DatasetType,
DataSource,
ListDatasetsResponse,
RowsDataSource,
URIDataSource,
)
from .datatypes import (
Api,
BenchmarksProtocolPrivate,
DatasetsProtocolPrivate,
DynamicApiMeta,
Error,
ExternalApiSpec,
HealthResponse,
HealthStatus,
InlineProviderSpec,
ModelsProtocolPrivate,
ProviderSpec,
RemoteProviderConfig,
RemoteProviderSpec,
RoutingTable,
ScoringFunctionsProtocolPrivate,
ShieldsProtocolPrivate,
ToolGroupsProtocolPrivate,
VectorStoresProtocolPrivate,
)
from .eval import BenchmarkConfig, Eval, EvalCandidate, EvaluateResponse, ModelCandidate
from .files import (
ExpiresAfter,
Files,
ListOpenAIFileResponse,
OpenAIFileDeleteResponse,
OpenAIFileObject,
OpenAIFilePurpose,
)
from .inference import (
Bf16QuantizationConfig,
ChatCompletionResponseEventType,
CompletionRequest,
EmbeddingsResponse,
EmbeddingTaskType,
Fp8QuantizationConfig,
GrammarResponseFormat,
GreedySamplingStrategy,
Inference,
InferenceProvider,
Int4QuantizationConfig,
JsonSchemaResponseFormat,
ListOpenAIChatCompletionResponse,
LogProbConfig,
ModelStore,
OpenAIAssistantMessageParam,
OpenAIChatCompletion,
OpenAIChatCompletionChunk,
OpenAIChatCompletionContentPartImageParam,
OpenAIChatCompletionContentPartParam,
OpenAIChatCompletionContentPartTextParam,
OpenAIChatCompletionMessageContent,
OpenAIChatCompletionRequestWithExtraBody,
OpenAIChatCompletionTextOnlyMessageContent,
OpenAIChatCompletionToolCall,
OpenAIChatCompletionToolCallFunction,
OpenAIChatCompletionUsage,
OpenAIChatCompletionUsageCompletionTokensDetails,
OpenAIChatCompletionUsagePromptTokensDetails,
OpenAIChoice,
OpenAIChoiceDelta,
OpenAIChoiceLogprobs,
OpenAIChunkChoice,
OpenAICompletion,
OpenAICompletionChoice,
OpenAICompletionLogprobs,
OpenAICompletionRequestWithExtraBody,
OpenAICompletionWithInputMessages,
OpenAIDeveloperMessageParam,
OpenAIEmbeddingData,
OpenAIEmbeddingsRequestWithExtraBody,
OpenAIEmbeddingsResponse,
OpenAIEmbeddingUsage,
OpenAIFile,
OpenAIFileFile,
OpenAIImageURL,
OpenAIJSONSchema,
OpenAIMessageParam,
OpenAIResponseFormatJSONObject,
OpenAIResponseFormatJSONSchema,
OpenAIResponseFormatParam,
OpenAIResponseFormatText,
OpenAISystemMessageParam,
OpenAITokenLogProb,
OpenAIToolMessageParam,
OpenAITopLogProb,
OpenAIUserMessageParam,
QuantizationConfig,
QuantizationType,
RerankData,
RerankResponse,
ResponseFormat,
ResponseFormatType,
SamplingParams,
SamplingStrategy,
SystemMessage,
SystemMessageBehavior,
TextTruncation,
TokenLogProbs,
ToolChoice,
ToolResponseMessage,
TopKSamplingStrategy,
TopPSamplingStrategy,
UserMessage,
)
from .inspect import (
ApiFilter,
HealthInfo,
Inspect,
ListRoutesResponse,
RouteInfo,
VersionInfo,
)
from .models import (
CommonModelFields,
ListModelsResponse,
Model,
ModelInput,
Models,
ModelType,
OpenAIListModelsResponse,
OpenAIModel,
)
from .openai_responses import (
AllowedToolsFilter,
ApprovalFilter,
ListOpenAIResponseInputItem,
ListOpenAIResponseObject,
MCPListToolsTool,
OpenAIDeleteResponseObject,
OpenAIResponseAnnotationCitation,
OpenAIResponseAnnotationContainerFileCitation,
OpenAIResponseAnnotationFileCitation,
OpenAIResponseAnnotationFilePath,
OpenAIResponseAnnotations,
OpenAIResponseContentPart,
OpenAIResponseContentPartOutputText,
OpenAIResponseContentPartReasoningSummary,
OpenAIResponseContentPartReasoningText,
OpenAIResponseContentPartRefusal,
OpenAIResponseError,
OpenAIResponseInput,
OpenAIResponseInputFunctionToolCallOutput,
OpenAIResponseInputMessageContent,
OpenAIResponseInputMessageContentFile,
OpenAIResponseInputMessageContentImage,
OpenAIResponseInputMessageContentText,
OpenAIResponseInputTool,
OpenAIResponseInputToolFileSearch,
OpenAIResponseInputToolFunction,
OpenAIResponseInputToolMCP,
OpenAIResponseInputToolWebSearch,
OpenAIResponseMCPApprovalRequest,
OpenAIResponseMCPApprovalResponse,
OpenAIResponseMessage,
OpenAIResponseObject,
OpenAIResponseObjectStream,
OpenAIResponseObjectStreamResponseCompleted,
OpenAIResponseObjectStreamResponseContentPartAdded,
OpenAIResponseObjectStreamResponseContentPartDone,
OpenAIResponseObjectStreamResponseCreated,
OpenAIResponseObjectStreamResponseFailed,
OpenAIResponseObjectStreamResponseFileSearchCallCompleted,
OpenAIResponseObjectStreamResponseFileSearchCallInProgress,
OpenAIResponseObjectStreamResponseFileSearchCallSearching,
OpenAIResponseObjectStreamResponseFunctionCallArgumentsDelta,
OpenAIResponseObjectStreamResponseFunctionCallArgumentsDone,
OpenAIResponseObjectStreamResponseIncomplete,
OpenAIResponseObjectStreamResponseInProgress,
OpenAIResponseObjectStreamResponseMcpCallArgumentsDelta,
OpenAIResponseObjectStreamResponseMcpCallArgumentsDone,
OpenAIResponseObjectStreamResponseMcpCallCompleted,
OpenAIResponseObjectStreamResponseMcpCallFailed,
OpenAIResponseObjectStreamResponseMcpCallInProgress,
OpenAIResponseObjectStreamResponseMcpListToolsCompleted,
OpenAIResponseObjectStreamResponseMcpListToolsFailed,
OpenAIResponseObjectStreamResponseMcpListToolsInProgress,
OpenAIResponseObjectStreamResponseOutputItemAdded,
OpenAIResponseObjectStreamResponseOutputItemDone,
OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded,
OpenAIResponseObjectStreamResponseOutputTextDelta,
OpenAIResponseObjectStreamResponseOutputTextDone,
OpenAIResponseObjectStreamResponseReasoningSummaryPartAdded,
OpenAIResponseObjectStreamResponseReasoningSummaryPartDone,
OpenAIResponseObjectStreamResponseReasoningSummaryTextDelta,
OpenAIResponseObjectStreamResponseReasoningSummaryTextDone,
OpenAIResponseObjectStreamResponseReasoningTextDelta,
OpenAIResponseObjectStreamResponseReasoningTextDone,
OpenAIResponseObjectStreamResponseRefusalDelta,
OpenAIResponseObjectStreamResponseRefusalDone,
OpenAIResponseObjectStreamResponseWebSearchCallCompleted,
OpenAIResponseObjectStreamResponseWebSearchCallInProgress,
OpenAIResponseObjectStreamResponseWebSearchCallSearching,
OpenAIResponseObjectWithInput,
OpenAIResponseOutput,
OpenAIResponseOutputMessageContent,
OpenAIResponseOutputMessageContentOutputText,
OpenAIResponseOutputMessageFileSearchToolCall,
OpenAIResponseOutputMessageFileSearchToolCallResults,
OpenAIResponseOutputMessageFunctionToolCall,
OpenAIResponseOutputMessageMCPCall,
OpenAIResponseOutputMessageMCPListTools,
OpenAIResponseOutputMessageWebSearchToolCall,
OpenAIResponsePrompt,
OpenAIResponseText,
OpenAIResponseTextFormat,
OpenAIResponseTool,
OpenAIResponseToolMCP,
OpenAIResponseUsage,
OpenAIResponseUsageInputTokensDetails,
OpenAIResponseUsageOutputTokensDetails,
WebSearchToolTypes,
)
from .post_training import (
AlgorithmConfig,
DataConfig,
DatasetFormat,
DPOAlignmentConfig,
DPOLossType,
EfficiencyConfig,
ListPostTrainingJobsResponse,
LoraFinetuningConfig,
OptimizerConfig,
OptimizerType,
PostTraining,
PostTrainingJob,
PostTrainingJobArtifactsResponse,
PostTrainingJobLogStream,
PostTrainingJobStatusResponse,
PostTrainingRLHFRequest,
QATFinetuningConfig,
RLHFAlgorithm,
TrainingConfig,
)
from .prompts import ListPromptsResponse, Prompt, Prompts
from .providers import ListProvidersResponse, ProviderInfo, Providers
from .rag_tool import (
DefaultRAGQueryGeneratorConfig,
LLMRAGQueryGeneratorConfig,
RAGDocument,
RAGQueryConfig,
RAGQueryGenerator,
RAGQueryGeneratorConfig,
RAGQueryResult,
RAGSearchMode,
Ranker,
RRFRanker,
WeightedRanker,
)
from .resource import Resource, ResourceType
from .safety import (
ModerationObject,
ModerationObjectResults,
RunShieldResponse,
Safety,
SafetyViolation,
ShieldStore,
ViolationLevel,
)
from .schema_utils import (
CallableT,
ExtraBodyField,
WebMethod,
json_schema_type,
register_schema,
webmethod,
)
from .scoring import (
ScoreBatchResponse,
ScoreResponse,
Scoring,
ScoringFunctionStore,
ScoringResult,
ScoringResultRow,
)
from .scoring_functions import (
AggregationFunctionType,
BasicScoringFnParams,
CommonScoringFnFields,
ListScoringFunctionsResponse,
LLMAsJudgeScoringFnParams,
RegexParserScoringFnParams,
ScoringFn,
ScoringFnInput,
ScoringFnParams,
ScoringFnParamsType,
ScoringFunctions,
)
from .shields import (
CommonShieldFields,
ListShieldsResponse,
Shield,
ShieldInput,
Shields,
)
# Import from strong_typing
from .strong_typing.core import JsonType
from .strong_typing.docstring import Docstring, parse_type
from .strong_typing.inspection import (
get_signature,
is_generic_list,
is_type_optional,
is_type_union,
is_unwrapped_body_param,
unwrap_generic_list,
unwrap_optional_type,
unwrap_union_types,
)
from .strong_typing.name import python_type_to_name
from .strong_typing.schema import (
JsonSchemaGenerator,
Schema,
SchemaOptions,
StrictJsonType,
get_schema_identifier,
)
from .strong_typing.serialization import json_dump_string, object_to_json
from .tools import (
ListToolDefsResponse,
ListToolGroupsResponse,
SpecialToolGroup,
ToolDef,
ToolGroup,
ToolGroupInput,
ToolGroups,
ToolInvocationResult,
ToolRuntime,
ToolStore,
)
from .vector_io import (
Chunk,
ChunkMetadata,
OpenAICreateVectorStoreFileBatchRequestWithExtraBody,
OpenAICreateVectorStoreRequestWithExtraBody,
QueryChunksResponse,
SearchRankingOptions,
VectorIO,
VectorStoreChunkingStrategy,
VectorStoreChunkingStrategyAuto,
VectorStoreChunkingStrategyStatic,
VectorStoreChunkingStrategyStaticConfig,
VectorStoreContent,
VectorStoreCreateRequest,
VectorStoreDeleteResponse,
VectorStoreFileBatchObject,
VectorStoreFileContentResponse,
VectorStoreFileCounts,
VectorStoreFileDeleteResponse,
VectorStoreFileLastError,
VectorStoreFileObject,
VectorStoreFilesListInBatchResponse,
VectorStoreFileStatus,
VectorStoreListFilesResponse,
VectorStoreListResponse,
VectorStoreModifyRequest,
VectorStoreObject,
VectorStoreSearchRequest,
VectorStoreSearchResponse,
VectorStoreSearchResponsePage,
VectorStoreTable,
)
from .vector_stores import VectorStore, VectorStoreInput
from .version import (
LLAMA_STACK_API_V1,
LLAMA_STACK_API_V1ALPHA,
LLAMA_STACK_API_V1BETA,
)
__all__ = [
# Submodules
"common",
"strong_typing",
# Version constants
"LLAMA_STACK_API_V1",
"LLAMA_STACK_API_V1ALPHA",
"LLAMA_STACK_API_V1BETA",
# API Symbols
"Agents",
"AggregationFunctionType",
"AlgorithmConfig",
"AllowedToolsFilter",
"Api",
"ApiFilter",
"ApprovalFilter",
"BasicScoringFnParams",
"Batches",
"BatchObject",
"Benchmark",
"BenchmarkConfig",
"BenchmarkInput",
"Benchmarks",
"BenchmarksProtocolPrivate",
"Bf16QuantizationConfig",
"CallableT",
"ChatCompletionInputType",
"ChatCompletionResponseEventType",
"Checkpoint",
"Chunk",
"ChunkMetadata",
"CommonBenchmarkFields",
"ConflictError",
"CommonDatasetFields",
"CommonModelFields",
"CommonScoringFnFields",
"CommonShieldFields",
"CompletionInputType",
"CompletionRequest",
"Conversation",
"ConversationDeletedResource",
"ConversationItem",
"ConversationItemCreateRequest",
"ConversationItemDeletedResource",
"ConversationItemInclude",
"ConversationItemList",
"ConversationMessage",
"Conversations",
"DPOAlignmentConfig",
"DPOLossType",
"DataConfig",
"DataSource",
"Dataset",
"DatasetFormat",
"DatasetIO",
"DatasetInput",
"DatasetPurpose",
"DatasetNotFoundError",
"DatasetStore",
"DatasetType",
"Datasets",
"DatasetsProtocolPrivate",
"DefaultRAGQueryGeneratorConfig",
"Docstring",
"DynamicApiMeta",
"EfficiencyConfig",
"EmbeddingTaskType",
"EmbeddingsResponse",
"Error",
"Eval",
"EvalCandidate",
"EvaluateResponse",
"ExpiresAfter",
"ExternalApiSpec",
"ExtraBodyField",
"Files",
"Fp8QuantizationConfig",
"get_schema_identifier",
"get_signature",
"GrammarResponseFormat",
"GreedySamplingStrategy",
"HealthInfo",
"HealthResponse",
"HealthStatus",
"ImageContentItem",
"Inference",
"InferenceProvider",
"InlineProviderSpec",
"Inspect",
"Int4QuantizationConfig",
"InterleavedContent",
"InterleavedContentItem",
"InvalidConversationIdError",
"is_generic_list",
"is_type_optional",
"is_type_union",
"is_unwrapped_body_param",
"Job",
"JobStatus",
"json_dump_string",
"json_schema_type",
"JsonSchemaGenerator",
"JsonSchemaResponseFormat",
"JsonType",
"LLMAsJudgeScoringFnParams",
"LLMRAGQueryGeneratorConfig",
"ListBatchesResponse",
"ListBenchmarksResponse",
"ListDatasetsResponse",
"ListModelsResponse",
"ListOpenAIChatCompletionResponse",
"ListOpenAIFileResponse",
"ListOpenAIResponseInputItem",
"ListOpenAIResponseObject",
"ListPostTrainingJobsResponse",
"ListPromptsResponse",
"ListProvidersResponse",
"ListRoutesResponse",
"ListScoringFunctionsResponse",
"ListShieldsResponse",
"ListToolDefsResponse",
"ListToolGroupsResponse",
"LogProbConfig",
"LoraFinetuningConfig",
"MCPListToolsTool",
"Metadata",
"Model",
"ModelCandidate",
"ModelInput",
"ModelNotFoundError",
"ModelStore",
"ModelType",
"ModelTypeError",
"Models",
"ModelsProtocolPrivate",
"ModerationObject",
"ModerationObjectResults",
"NumberType",
"object_to_json",
"OpenAIAssistantMessageParam",
"OpenAIChatCompletion",
"OpenAIChatCompletionChunk",
"OpenAIChatCompletionContentPartImageParam",
"OpenAIChatCompletionContentPartParam",
"OpenAIChatCompletionContentPartTextParam",
"OpenAIChatCompletionMessageContent",
"OpenAIChatCompletionRequestWithExtraBody",
"OpenAIChatCompletionTextOnlyMessageContent",
"OpenAIChatCompletionToolCall",
"OpenAIChatCompletionToolCallFunction",
"OpenAIChatCompletionUsage",
"OpenAIChatCompletionUsageCompletionTokensDetails",
"OpenAIChatCompletionUsagePromptTokensDetails",
"OpenAIChoice",
"OpenAIChoiceDelta",
"OpenAIChoiceLogprobs",
"OpenAIChunkChoice",
"OpenAICompletion",
"OpenAICompletionChoice",
"OpenAICompletionLogprobs",
"OpenAICompletionRequestWithExtraBody",
"OpenAICompletionWithInputMessages",
"OpenAICreateVectorStoreFileBatchRequestWithExtraBody",
"OpenAICreateVectorStoreRequestWithExtraBody",
"OpenAIDeleteResponseObject",
"OpenAIDeveloperMessageParam",
"OpenAIEmbeddingData",
"OpenAIEmbeddingUsage",
"OpenAIEmbeddingsRequestWithExtraBody",
"OpenAIEmbeddingsResponse",
"OpenAIFile",
"OpenAIFileDeleteResponse",
"OpenAIFileFile",
"OpenAIFileObject",
"OpenAIFilePurpose",
"OpenAIImageURL",
"OpenAIJSONSchema",
"OpenAIListModelsResponse",
"OpenAIMessageParam",
"OpenAIModel",
"Order",
"OpenAIResponseAnnotationCitation",
"OpenAIResponseAnnotationContainerFileCitation",
"OpenAIResponseAnnotationFileCitation",
"OpenAIResponseAnnotationFilePath",
"OpenAIResponseAnnotations",
"OpenAIResponseContentPart",
"OpenAIResponseContentPartOutputText",
"OpenAIResponseContentPartReasoningSummary",
"OpenAIResponseContentPartReasoningText",
"OpenAIResponseContentPartRefusal",
"OpenAIResponseError",
"OpenAIResponseFormatJSONObject",
"OpenAIResponseFormatJSONSchema",
"OpenAIResponseFormatParam",
"OpenAIResponseFormatText",
"OpenAIResponseInput",
"OpenAIResponseInputFunctionToolCallOutput",
"OpenAIResponseInputMessageContent",
"OpenAIResponseInputMessageContentFile",
"OpenAIResponseInputMessageContentImage",
"OpenAIResponseInputMessageContentText",
"OpenAIResponseInputTool",
"OpenAIResponseInputToolFileSearch",
"OpenAIResponseInputToolFunction",
"OpenAIResponseInputToolMCP",
"OpenAIResponseInputToolWebSearch",
"OpenAIResponseMCPApprovalRequest",
"OpenAIResponseMCPApprovalResponse",
"OpenAIResponseMessage",
"OpenAIResponseObject",
"OpenAIResponseObjectStream",
"OpenAIResponseObjectStreamResponseCompleted",
"OpenAIResponseObjectStreamResponseContentPartAdded",
"OpenAIResponseObjectStreamResponseContentPartDone",
"OpenAIResponseObjectStreamResponseCreated",
"OpenAIResponseObjectStreamResponseFailed",
"OpenAIResponseObjectStreamResponseFileSearchCallCompleted",
"OpenAIResponseObjectStreamResponseFileSearchCallInProgress",
"OpenAIResponseObjectStreamResponseFileSearchCallSearching",
"OpenAIResponseObjectStreamResponseFunctionCallArgumentsDelta",
"OpenAIResponseObjectStreamResponseFunctionCallArgumentsDone",
"OpenAIResponseObjectStreamResponseInProgress",
"OpenAIResponseObjectStreamResponseIncomplete",
"OpenAIResponseObjectStreamResponseMcpCallArgumentsDelta",
"OpenAIResponseObjectStreamResponseMcpCallArgumentsDone",
"OpenAIResponseObjectStreamResponseMcpCallCompleted",
"OpenAIResponseObjectStreamResponseMcpCallFailed",
"OpenAIResponseObjectStreamResponseMcpCallInProgress",
"OpenAIResponseObjectStreamResponseMcpListToolsCompleted",
"OpenAIResponseObjectStreamResponseMcpListToolsFailed",
"OpenAIResponseObjectStreamResponseMcpListToolsInProgress",
"OpenAIResponseObjectStreamResponseOutputItemAdded",
"OpenAIResponseObjectStreamResponseOutputItemDone",
"OpenAIResponseObjectStreamResponseOutputTextAnnotationAdded",
"OpenAIResponseObjectStreamResponseOutputTextDelta",
"OpenAIResponseObjectStreamResponseOutputTextDone",
"OpenAIResponseObjectStreamResponseReasoningSummaryPartAdded",
"OpenAIResponseObjectStreamResponseReasoningSummaryPartDone",
"OpenAIResponseObjectStreamResponseReasoningSummaryTextDelta",
"OpenAIResponseObjectStreamResponseReasoningSummaryTextDone",
"OpenAIResponseObjectStreamResponseReasoningTextDelta",
"OpenAIResponseObjectStreamResponseReasoningTextDone",
"OpenAIResponseObjectStreamResponseRefusalDelta",
"OpenAIResponseObjectStreamResponseRefusalDone",
"OpenAIResponseObjectStreamResponseWebSearchCallCompleted",
"OpenAIResponseObjectStreamResponseWebSearchCallInProgress",
"OpenAIResponseObjectStreamResponseWebSearchCallSearching",
"OpenAIResponseObjectWithInput",
"OpenAIResponseOutput",
"OpenAIResponseOutputMessageContent",
"OpenAIResponseOutputMessageContentOutputText",
"OpenAIResponseOutputMessageFileSearchToolCall",
"OpenAIResponseOutputMessageFileSearchToolCallResults",
"OpenAIResponseOutputMessageFunctionToolCall",
"OpenAIResponseOutputMessageMCPCall",
"OpenAIResponseOutputMessageMCPListTools",
"OpenAIResponseOutputMessageWebSearchToolCall",
"OpenAIResponsePrompt",
"OpenAIResponseText",
"OpenAIResponseTextFormat",
"OpenAIResponseTool",
"OpenAIResponseToolMCP",
"OpenAIResponseUsage",
"OpenAIResponseUsageInputTokensDetails",
"OpenAIResponseUsageOutputTokensDetails",
"OpenAISystemMessageParam",
"OpenAITokenLogProb",
"OpenAIToolMessageParam",
"OpenAITopLogProb",
"OpenAIUserMessageParam",
"OptimizerConfig",
"OptimizerType",
"PaginatedResponse",
"ParamType",
"parse_type",
"PostTraining",
"PostTrainingMetric",
"PostTrainingJob",
"PostTrainingJobArtifactsResponse",
"PostTrainingJobLogStream",
"PostTrainingJobStatusResponse",
"PostTrainingRLHFRequest",
"Prompt",
"Prompts",
"ProviderInfo",
"ProviderSpec",
"Providers",
"python_type_to_name",
"QATFinetuningConfig",
"QuantizationConfig",
"QuantizationType",
"QueryChunksResponse",
"RAGDocument",
"RAGQueryConfig",
"RAGQueryGenerator",
"RAGQueryGeneratorConfig",
"RAGQueryResult",
"RAGSearchMode",
"register_schema",
"RLHFAlgorithm",
"RRFRanker",
"Ranker",
"RegexParserScoringFnParams",
"RemoteProviderConfig",
"RemoteProviderSpec",
"RerankData",
"RerankResponse",
"Resource",
"ResourceNotFoundError",
"ResourceType",
"ResponseFormat",
"ResponseFormatType",
"ResponseGuardrail",
"ResponseGuardrailSpec",
"RouteInfo",
"RoutingTable",
"RowsDataSource",
"RunShieldResponse",
"Safety",
"SafetyViolation",
"SamplingParams",
"SamplingStrategy",
"ScoreBatchResponse",
"ScoreResponse",
"Scoring",
"ScoringFn",
"ScoringFnInput",
"ScoringFnParams",
"ScoringFnParamsType",
"ScoringFunctionStore",
"ScoringFunctions",
"ScoringFunctionsProtocolPrivate",
"ScoringResult",
"ScoringResultRow",
"Schema",
"SchemaOptions",
"SearchRankingOptions",
"Shield",
"ShieldInput",
"ShieldStore",
"Shields",
"ShieldsProtocolPrivate",
"SpecialToolGroup",
"StrictJsonType",
"StringType",
"SystemMessage",
"SystemMessageBehavior",
"TextContentItem",
"TextTruncation",
"TokenLogProbs",
"TokenValidationError",
"ToolChoice",
"ToolGroupNotFoundError",
"ToolDef",
"ToolGroup",
"ToolGroupInput",
"ToolGroups",
"ToolGroupsProtocolPrivate",
"ToolInvocationResult",
"ToolResponseMessage",
"ToolRuntime",
"ToolStore",
"TopKSamplingStrategy",
"TopPSamplingStrategy",
"TrainingConfig",
"UnsupportedModelError",
"unwrap_generic_list",
"unwrap_optional_type",
"unwrap_union_types",
"URIDataSource",
"URL",
"_URLOrData",
"UserMessage",
"VectorIO",
"VectorStore",
"VectorStoreChunkingStrategy",
"VectorStoreChunkingStrategyAuto",
"VectorStoreChunkingStrategyStatic",
"VectorStoreChunkingStrategyStaticConfig",
"VectorStoreContent",
"VectorStoreCreateRequest",
"VectorStoreDeleteResponse",
"VectorStoreFileBatchObject",
"VectorStoreFileContentResponse",
"VectorStoreFileCounts",
"VectorStoreFileDeleteResponse",
"VectorStoreFileLastError",
"VectorStoreFileObject",
"VectorStoreFileStatus",
"VectorStoreFilesListInBatchResponse",
"VectorStoreInput",
"VectorStoreListFilesResponse",
"VectorStoreListResponse",
"VectorStoreModifyRequest",
"VectorStoreObject",
"VectorStoreSearchRequest",
"VectorStoreSearchResponse",
"VectorStoreSearchResponsePage",
"VectorStoreTable",
"VectorStoreNotFoundError",
"VectorStoresProtocolPrivate",
"VersionInfo",
"ViolationLevel",
"webmethod",
"WebMethod",
"WebSearchToolTypes",
"WeightedRanker",
]

View file

@ -9,7 +9,7 @@ import sys
from pathlib import Path from pathlib import Path
import yaml import yaml
from llama_stack_api.datatypes import Api from llama_stack_api import Api
from termcolor import cprint from termcolor import cprint
from llama_stack.cli.stack.utils import ImageType from llama_stack.cli.stack.utils import ImageType

View file

@ -11,7 +11,7 @@ from functools import lru_cache
from pathlib import Path from pathlib import Path
import yaml import yaml
from llama_stack_api.datatypes import Api from llama_stack_api import Api
from termcolor import cprint from termcolor import cprint
from llama_stack.core.datatypes import ( from llama_stack.core.datatypes import (

View file

@ -6,7 +6,7 @@
import sys import sys
from llama_stack_api.datatypes import Api from llama_stack_api import Api
from pydantic import BaseModel from pydantic import BaseModel
from termcolor import cprint from termcolor import cprint

View file

@ -12,7 +12,7 @@ from enum import Enum
from typing import Any, Union, get_args, get_origin from typing import Any, Union, get_args, get_origin
import httpx import httpx
from llama_stack_api.datatypes import RemoteProviderConfig from llama_stack_api import RemoteProviderConfig
from pydantic import BaseModel, parse_obj_as from pydantic import BaseModel, parse_obj_as
from termcolor import cprint from termcolor import cprint

View file

@ -6,7 +6,7 @@
import textwrap import textwrap
from typing import Any from typing import Any
from llama_stack_api.datatypes import Api, ProviderSpec from llama_stack_api import Api, ProviderSpec
from llama_stack.core.datatypes import ( from llama_stack.core.datatypes import (
LLAMA_STACK_RUN_CONFIG_VERSION, LLAMA_STACK_RUN_CONFIG_VERSION,

View file

@ -8,7 +8,7 @@ import secrets
import time import time
from typing import Any, Literal from typing import Any, Literal
from llama_stack_api.conversations import ( from llama_stack_api import (
Conversation, Conversation,
ConversationDeletedResource, ConversationDeletedResource,
ConversationItem, ConversationItem,

View file

@ -9,21 +9,32 @@ from pathlib import Path
from typing import Annotated, Any, Literal, Self from typing import Annotated, Any, Literal, Self
from urllib.parse import urlparse from urllib.parse import urlparse
from llama_stack_api.benchmarks import Benchmark, BenchmarkInput from llama_stack_api import (
from llama_stack_api.datasetio import DatasetIO Api,
from llama_stack_api.datasets import Dataset, DatasetInput Benchmark,
from llama_stack_api.datatypes import Api, ProviderSpec BenchmarkInput,
from llama_stack_api.eval import Eval Dataset,
from llama_stack_api.inference import Inference DatasetInput,
from llama_stack_api.models import Model, ModelInput DatasetIO,
from llama_stack_api.resource import Resource Eval,
from llama_stack_api.safety import Safety Inference,
from llama_stack_api.scoring import Scoring Model,
from llama_stack_api.scoring_functions import ScoringFn, ScoringFnInput ModelInput,
from llama_stack_api.shields import Shield, ShieldInput ProviderSpec,
from llama_stack_api.tools import ToolGroup, ToolGroupInput, ToolRuntime Resource,
from llama_stack_api.vector_io import VectorIO Safety,
from llama_stack_api.vector_stores import VectorStore, VectorStoreInput Scoring,
ScoringFn,
ScoringFnInput,
Shield,
ShieldInput,
ToolGroup,
ToolGroupInput,
ToolRuntime,
VectorIO,
VectorStore,
VectorStoreInput,
)
from pydantic import BaseModel, Field, field_validator, model_validator from pydantic import BaseModel, Field, field_validator, model_validator
from llama_stack.core.access_control.datatypes import AccessRule from llama_stack.core.access_control.datatypes import AccessRule

View file

@ -10,7 +10,7 @@ import os
from typing import Any from typing import Any
import yaml import yaml
from llama_stack_api.datatypes import ( from llama_stack_api import (
Api, Api,
InlineProviderSpec, InlineProviderSpec,
ProviderSpec, ProviderSpec,

View file

@ -6,7 +6,7 @@
import yaml import yaml
from llama_stack_api.datatypes import Api, ExternalApiSpec from llama_stack_api import Api, ExternalApiSpec
from llama_stack.core.datatypes import BuildConfig, StackRunConfig from llama_stack.core.datatypes import BuildConfig, StackRunConfig
from llama_stack.log import get_logger from llama_stack.log import get_logger

View file

@ -6,9 +6,9 @@
from importlib.metadata import version from importlib.metadata import version
from llama_stack_api.datatypes import HealthStatus from llama_stack_api import (
from llama_stack_api.inspect import (
HealthInfo, HealthInfo,
HealthStatus,
Inspect, Inspect,
ListRoutesResponse, ListRoutesResponse,
RouteInfo, RouteInfo,

View file

@ -18,7 +18,7 @@ from typing import Any, TypeVar, Union, get_args, get_origin
import httpx import httpx
import yaml import yaml
from fastapi import Response as FastAPIResponse from fastapi import Response as FastAPIResponse
from llama_stack_api.strong_typing.inspection import is_unwrapped_body_param from llama_stack_api import is_unwrapped_body_param
try: try:
from llama_stack_client import ( from llama_stack_client import (

View file

@ -7,7 +7,7 @@
import json import json
from typing import Any from typing import Any
from llama_stack_api.prompts import ListPromptsResponse, Prompt, Prompts from llama_stack_api import ListPromptsResponse, Prompt, Prompts
from pydantic import BaseModel from pydantic import BaseModel
from llama_stack.core.datatypes import StackRunConfig from llama_stack.core.datatypes import StackRunConfig

View file

@ -7,8 +7,7 @@
import asyncio import asyncio
from typing import Any from typing import Any
from llama_stack_api.datatypes import HealthResponse, HealthStatus from llama_stack_api import HealthResponse, HealthStatus, ListProvidersResponse, ProviderInfo, Providers
from llama_stack_api.providers import ListProvidersResponse, ProviderInfo, Providers
from pydantic import BaseModel from pydantic import BaseModel
from llama_stack.log import get_logger from llama_stack.log import get_logger

View file

@ -8,41 +8,45 @@ import importlib.metadata
import inspect import inspect
from typing import Any from typing import Any
from llama_stack_api.agents import Agents from llama_stack_api import (
from llama_stack_api.batches import Batches LLAMA_STACK_API_V1ALPHA,
from llama_stack_api.benchmarks import Benchmarks Agents,
from llama_stack_api.conversations import Conversations
from llama_stack_api.datasetio import DatasetIO
from llama_stack_api.datasets import Datasets
from llama_stack_api.datatypes import (
Api, Api,
Batches,
Benchmarks,
BenchmarksProtocolPrivate, BenchmarksProtocolPrivate,
Conversations,
DatasetIO,
Datasets,
DatasetsProtocolPrivate, DatasetsProtocolPrivate,
Eval,
ExternalApiSpec, ExternalApiSpec,
Files,
Inference,
InferenceProvider,
Inspect,
Models,
ModelsProtocolPrivate, ModelsProtocolPrivate,
PostTraining,
Prompts,
ProviderSpec, ProviderSpec,
RemoteProviderConfig, RemoteProviderConfig,
RemoteProviderSpec, RemoteProviderSpec,
Safety,
Scoring,
ScoringFunctions,
ScoringFunctionsProtocolPrivate, ScoringFunctionsProtocolPrivate,
Shields,
ShieldsProtocolPrivate, ShieldsProtocolPrivate,
ToolGroups,
ToolGroupsProtocolPrivate, ToolGroupsProtocolPrivate,
ToolRuntime,
VectorIO,
VectorStore,
)
from llama_stack_api import (
Providers as ProvidersAPI,
) )
from llama_stack_api.eval import Eval
from llama_stack_api.files import Files
from llama_stack_api.inference import Inference, InferenceProvider
from llama_stack_api.inspect import Inspect
from llama_stack_api.models import Models
from llama_stack_api.post_training import PostTraining
from llama_stack_api.prompts import Prompts
from llama_stack_api.providers import Providers as ProvidersAPI
from llama_stack_api.safety import Safety
from llama_stack_api.scoring import Scoring
from llama_stack_api.scoring_functions import ScoringFunctions
from llama_stack_api.shields import Shields
from llama_stack_api.tools import ToolGroups, ToolRuntime
from llama_stack_api.vector_io import VectorIO
from llama_stack_api.vector_stores import VectorStore
from llama_stack_api.version import LLAMA_STACK_API_V1ALPHA
from llama_stack.core.client import get_client_impl from llama_stack.core.client import get_client_impl
from llama_stack.core.datatypes import ( from llama_stack.core.datatypes import (

View file

@ -6,7 +6,7 @@
from typing import Any from typing import Any
from llama_stack_api.datatypes import Api, RoutingTable from llama_stack_api import Api, RoutingTable
from llama_stack.core.datatypes import ( from llama_stack.core.datatypes import (
AccessRule, AccessRule,

View file

@ -6,10 +6,7 @@
from typing import Any from typing import Any
from llama_stack_api.common.responses import PaginatedResponse from llama_stack_api import DatasetIO, DatasetPurpose, DataSource, PaginatedResponse, RoutingTable
from llama_stack_api.datasetio import DatasetIO
from llama_stack_api.datasets import DatasetPurpose, DataSource
from llama_stack_api.datatypes import RoutingTable
from llama_stack.log import get_logger from llama_stack.log import get_logger

View file

@ -6,9 +6,12 @@
from typing import Any from typing import Any
from llama_stack_api.datatypes import RoutingTable from llama_stack_api import (
from llama_stack_api.eval import BenchmarkConfig, Eval, EvaluateResponse, Job BenchmarkConfig,
from llama_stack_api.scoring import ( Eval,
EvaluateResponse,
Job,
RoutingTable,
ScoreBatchResponse, ScoreBatchResponse,
ScoreResponse, ScoreResponse,
Scoring, Scoring,

View file

@ -11,11 +11,14 @@ from datetime import UTC, datetime
from typing import Annotated, Any from typing import Annotated, Any
from fastapi import Body from fastapi import Body
from llama_stack_api.common.errors import ModelNotFoundError, ModelTypeError from llama_stack_api import (
from llama_stack_api.datatypes import HealthResponse, HealthStatus, RoutingTable HealthResponse,
from llama_stack_api.inference import ( HealthStatus,
Inference, Inference,
ListOpenAIChatCompletionResponse, ListOpenAIChatCompletionResponse,
ModelNotFoundError,
ModelType,
ModelTypeError,
OpenAIAssistantMessageParam, OpenAIAssistantMessageParam,
OpenAIChatCompletion, OpenAIChatCompletion,
OpenAIChatCompletionChunk, OpenAIChatCompletionChunk,
@ -34,8 +37,8 @@ from llama_stack_api.inference import (
OpenAIMessageParam, OpenAIMessageParam,
Order, Order,
RerankResponse, RerankResponse,
RoutingTable,
) )
from llama_stack_api.models import ModelType
from openai.types.chat import ChatCompletionToolChoiceOptionParam as OpenAIChatCompletionToolChoiceOptionParam from openai.types.chat import ChatCompletionToolChoiceOptionParam as OpenAIChatCompletionToolChoiceOptionParam
from openai.types.chat import ChatCompletionToolParam as OpenAIChatCompletionToolParam from openai.types.chat import ChatCompletionToolParam as OpenAIChatCompletionToolParam
from pydantic import TypeAdapter from pydantic import TypeAdapter

View file

@ -6,10 +6,7 @@
from typing import Any from typing import Any
from llama_stack_api.datatypes import RoutingTable from llama_stack_api import ModerationObject, OpenAIMessageParam, RoutingTable, RunShieldResponse, Safety, Shield
from llama_stack_api.inference import OpenAIMessageParam
from llama_stack_api.safety import ModerationObject, RunShieldResponse, Safety
from llama_stack_api.shields import Shield
from llama_stack.core.datatypes import SafetyConfig from llama_stack.core.datatypes import SafetyConfig
from llama_stack.log import get_logger from llama_stack.log import get_logger

View file

@ -6,10 +6,8 @@
from typing import Any from typing import Any
from llama_stack_api.common.content_types import ( from llama_stack_api import (
URL, URL,
)
from llama_stack_api.tools import (
ListToolDefsResponse, ListToolDefsResponse,
ToolRuntime, ToolRuntime,
) )

View file

@ -9,14 +9,16 @@ import uuid
from typing import Annotated, Any from typing import Annotated, Any
from fastapi import Body from fastapi import Body
from llama_stack_api.common.content_types import InterleavedContent from llama_stack_api import (
from llama_stack_api.datatypes import HealthResponse, HealthStatus, RoutingTable
from llama_stack_api.models import ModelType
from llama_stack_api.vector_io import (
Chunk, Chunk,
HealthResponse,
HealthStatus,
InterleavedContent,
ModelType,
OpenAICreateVectorStoreFileBatchRequestWithExtraBody, OpenAICreateVectorStoreFileBatchRequestWithExtraBody,
OpenAICreateVectorStoreRequestWithExtraBody, OpenAICreateVectorStoreRequestWithExtraBody,
QueryChunksResponse, QueryChunksResponse,
RoutingTable,
SearchRankingOptions, SearchRankingOptions,
VectorIO, VectorIO,
VectorStoreChunkingStrategy, VectorStoreChunkingStrategy,

View file

@ -6,7 +6,7 @@
from typing import Any from typing import Any
from llama_stack_api.benchmarks import Benchmark, Benchmarks, ListBenchmarksResponse from llama_stack_api import Benchmark, Benchmarks, ListBenchmarksResponse
from llama_stack.core.datatypes import ( from llama_stack.core.datatypes import (
BenchmarkWithOwner, BenchmarkWithOwner,

View file

@ -6,10 +6,7 @@
from typing import Any from typing import Any
from llama_stack_api.common.errors import ModelNotFoundError from llama_stack_api import Api, Model, ModelNotFoundError, ResourceType, RoutingTable
from llama_stack_api.datatypes import Api, RoutingTable
from llama_stack_api.models import Model
from llama_stack_api.resource import ResourceType
from llama_stack.core.access_control.access_control import AccessDeniedError, is_action_allowed from llama_stack.core.access_control.access_control import AccessDeniedError, is_action_allowed
from llama_stack.core.access_control.datatypes import Action from llama_stack.core.access_control.datatypes import Action

View file

@ -7,18 +7,18 @@
import uuid import uuid
from typing import Any from typing import Any
from llama_stack_api.common.errors import DatasetNotFoundError from llama_stack_api import (
from llama_stack_api.datasets import (
Dataset, Dataset,
DatasetNotFoundError,
DatasetPurpose, DatasetPurpose,
Datasets, Datasets,
DatasetType, DatasetType,
DataSource, DataSource,
ListDatasetsResponse, ListDatasetsResponse,
ResourceType,
RowsDataSource, RowsDataSource,
URIDataSource, URIDataSource,
) )
from llama_stack_api.resource import ResourceType
from llama_stack.core.datatypes import ( from llama_stack.core.datatypes import (
DatasetWithOwner, DatasetWithOwner,

View file

@ -7,10 +7,10 @@
import time import time
from typing import Any from typing import Any
from llama_stack_api.common.errors import ModelNotFoundError from llama_stack_api import (
from llama_stack_api.models import (
ListModelsResponse, ListModelsResponse,
Model, Model,
ModelNotFoundError,
Models, Models,
ModelType, ModelType,
OpenAIListModelsResponse, OpenAIListModelsResponse,

View file

@ -4,10 +4,10 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack_api.common.type_system import ParamType from llama_stack_api import (
from llama_stack_api.resource import ResourceType
from llama_stack_api.scoring_functions import (
ListScoringFunctionsResponse, ListScoringFunctionsResponse,
ParamType,
ResourceType,
ScoringFn, ScoringFn,
ScoringFnParams, ScoringFnParams,
ScoringFunctions, ScoringFunctions,

View file

@ -6,8 +6,7 @@
from typing import Any from typing import Any
from llama_stack_api.resource import ResourceType from llama_stack_api import ListShieldsResponse, ResourceType, Shield, Shields
from llama_stack_api.shields import ListShieldsResponse, Shield, Shields
from llama_stack.core.datatypes import ( from llama_stack.core.datatypes import (
ShieldWithOwner, ShieldWithOwner,

View file

@ -6,9 +6,15 @@
from typing import Any from typing import Any
from llama_stack_api.common.content_types import URL from llama_stack_api import (
from llama_stack_api.common.errors import ToolGroupNotFoundError URL,
from llama_stack_api.tools import ListToolDefsResponse, ListToolGroupsResponse, ToolDef, ToolGroup, ToolGroups ListToolDefsResponse,
ListToolGroupsResponse,
ToolDef,
ToolGroup,
ToolGroupNotFoundError,
ToolGroups,
)
from llama_stack.core.datatypes import AuthenticationRequiredError, ToolGroupWithOwner from llama_stack.core.datatypes import AuthenticationRequiredError, ToolGroupWithOwner
from llama_stack.log import get_logger from llama_stack.log import get_logger

View file

@ -6,12 +6,12 @@
from typing import Any from typing import Any
from llama_stack_api.common.errors import ModelNotFoundError, ModelTypeError
from llama_stack_api.models import ModelType
from llama_stack_api.resource import ResourceType
# Removed VectorStores import to avoid exposing public API # Removed VectorStores import to avoid exposing public API
from llama_stack_api.vector_io import ( from llama_stack_api import (
ModelNotFoundError,
ModelType,
ModelTypeError,
ResourceType,
SearchRankingOptions, SearchRankingOptions,
VectorStoreChunkingStrategy, VectorStoreChunkingStrategy,
VectorStoreDeleteResponse, VectorStoreDeleteResponse,

View file

@ -11,7 +11,7 @@ from urllib.parse import parse_qs, urljoin, urlparse
import httpx import httpx
import jwt import jwt
from llama_stack_api.common.errors import TokenValidationError from llama_stack_api import TokenValidationError
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from llama_stack.core.datatypes import ( from llama_stack.core.datatypes import (

View file

@ -10,8 +10,7 @@ from collections.abc import Callable
from typing import Any from typing import Any
from aiohttp import hdrs from aiohttp import hdrs
from llama_stack_api.datatypes import Api, ExternalApiSpec from llama_stack_api import Api, ExternalApiSpec, WebMethod
from llama_stack_api.schema_utils import WebMethod
from starlette.routing import Route from starlette.routing import Route
from llama_stack.core.resolver import api_protocol_map from llama_stack.core.resolver import api_protocol_map

View file

@ -28,9 +28,7 @@ from fastapi import Path as FastapiPath
from fastapi.exceptions import RequestValidationError from fastapi.exceptions import RequestValidationError
from fastapi.middleware.cors import CORSMiddleware from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse, StreamingResponse from fastapi.responses import JSONResponse, StreamingResponse
from llama_stack_api.common.errors import ConflictError, ResourceNotFoundError from llama_stack_api import Api, ConflictError, PaginatedResponse, ResourceNotFoundError
from llama_stack_api.common.responses import PaginatedResponse
from llama_stack_api.datatypes import Api
from openai import BadRequestError from openai import BadRequestError
from pydantic import BaseModel, ValidationError from pydantic import BaseModel, ValidationError

View file

@ -12,27 +12,30 @@ import tempfile
from typing import Any from typing import Any
import yaml import yaml
from llama_stack_api.agents import Agents from llama_stack_api import (
from llama_stack_api.batches import Batches Agents,
from llama_stack_api.benchmarks import Benchmarks Api,
from llama_stack_api.conversations import Conversations Batches,
from llama_stack_api.datasetio import DatasetIO Benchmarks,
from llama_stack_api.datasets import Datasets Conversations,
from llama_stack_api.datatypes import Api DatasetIO,
from llama_stack_api.eval import Eval Datasets,
from llama_stack_api.files import Files Eval,
from llama_stack_api.inference import Inference Files,
from llama_stack_api.inspect import Inspect Inference,
from llama_stack_api.models import Models Inspect,
from llama_stack_api.post_training import PostTraining Models,
from llama_stack_api.prompts import Prompts PostTraining,
from llama_stack_api.providers import Providers Prompts,
from llama_stack_api.safety import Safety Providers,
from llama_stack_api.scoring import Scoring Safety,
from llama_stack_api.scoring_functions import ScoringFunctions Scoring,
from llama_stack_api.shields import Shields ScoringFunctions,
from llama_stack_api.tools import ToolGroups, ToolRuntime Shields,
from llama_stack_api.vector_io import VectorIO ToolGroups,
ToolRuntime,
VectorIO,
)
from llama_stack.core.conversations.conversations import ConversationServiceConfig, ConversationServiceImpl from llama_stack.core.conversations.conversations import ConversationServiceConfig, ConversationServiceImpl
from llama_stack.core.datatypes import Provider, SafetyConfig, StackRunConfig, VectorStoresConfig from llama_stack.core.datatypes import Provider, SafetyConfig, StackRunConfig, VectorStoresConfig

View file

@ -16,7 +16,7 @@ from typing import (
cast, cast,
) )
from llama_stack_api.schema_utils import json_schema_type, register_schema from llama_stack_api import json_schema_type, register_schema
from opentelemetry import metrics, trace from opentelemetry import metrics, trace
from opentelemetry.exporter.otlp.proto.http.metric_exporter import OTLPMetricExporter from opentelemetry.exporter.otlp.proto.http.metric_exporter import OTLPMetricExporter
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter

View file

@ -4,7 +4,7 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack_api.models import ModelType from llama_stack_api import ModelType
from llama_stack.core.datatypes import ( from llama_stack.core.datatypes import (
BuildProvider, BuildProvider,

View file

@ -6,7 +6,7 @@
from pathlib import Path from pathlib import Path
from llama_stack_api.models import ModelType from llama_stack_api import ModelType
from llama_stack.core.datatypes import ( from llama_stack.core.datatypes import (
BuildProvider, BuildProvider,

View file

@ -5,8 +5,7 @@
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack_api.datasets import DatasetPurpose, URIDataSource from llama_stack_api import DatasetPurpose, ModelType, URIDataSource
from llama_stack_api.models import ModelType
from llama_stack.core.datatypes import ( from llama_stack.core.datatypes import (
BenchmarkInput, BenchmarkInput,

View file

@ -7,7 +7,7 @@
from typing import Any from typing import Any
from llama_stack_api.datatypes import RemoteProviderSpec from llama_stack_api import RemoteProviderSpec
from llama_stack.core.datatypes import ( from llama_stack.core.datatypes import (
BuildProvider, BuildProvider,

View file

@ -10,8 +10,7 @@ from typing import Any, Literal
import jinja2 import jinja2
import rich import rich
import yaml import yaml
from llama_stack_api.datasets import DatasetPurpose from llama_stack_api import DatasetPurpose, ModelType
from llama_stack_api.models import ModelType
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from llama_stack.core.datatypes import ( from llama_stack.core.datatypes import (

View file

@ -5,25 +5,25 @@
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack_api.agents import ( from llama_stack_api import (
Agents, Agents,
Conversations,
Inference,
ListOpenAIResponseInputItem, ListOpenAIResponseInputItem,
ListOpenAIResponseObject, ListOpenAIResponseObject,
OpenAIDeleteResponseObject, OpenAIDeleteResponseObject,
OpenAIResponseInput, OpenAIResponseInput,
OpenAIResponseInputTool, OpenAIResponseInputTool,
OpenAIResponseObject, OpenAIResponseObject,
OpenAIResponsePrompt,
OpenAIResponseText,
Order, Order,
ResponseGuardrail, ResponseGuardrail,
Safety,
ToolGroups,
ToolRuntime,
VectorIO,
) )
from llama_stack_api.conversations import Conversations
from llama_stack_api.inference import (
Inference,
)
from llama_stack_api.openai_responses import OpenAIResponsePrompt, OpenAIResponseText
from llama_stack_api.safety import Safety
from llama_stack_api.tools import ToolGroups, ToolRuntime
from llama_stack_api.vector_io import VectorIO
from llama_stack.core.datatypes import AccessRule from llama_stack.core.datatypes import AccessRule
from llama_stack.log import get_logger from llama_stack.log import get_logger

View file

@ -8,20 +8,15 @@ import time
import uuid import uuid
from collections.abc import AsyncIterator from collections.abc import AsyncIterator
from llama_stack_api.agents import Order, ResponseGuardrailSpec from llama_stack_api import (
from llama_stack_api.common.errors import ( ConversationItem,
InvalidConversationIdError, Conversations,
)
from llama_stack_api.conversations import ConversationItem, Conversations
from llama_stack_api.inference import (
Inference, Inference,
OpenAIMessageParam, InvalidConversationIdError,
OpenAISystemMessageParam,
)
from llama_stack_api.openai_responses import (
ListOpenAIResponseInputItem, ListOpenAIResponseInputItem,
ListOpenAIResponseObject, ListOpenAIResponseObject,
OpenAIDeleteResponseObject, OpenAIDeleteResponseObject,
OpenAIMessageParam,
OpenAIResponseInput, OpenAIResponseInput,
OpenAIResponseInputMessageContentText, OpenAIResponseInputMessageContentText,
OpenAIResponseInputTool, OpenAIResponseInputTool,
@ -31,10 +26,14 @@ from llama_stack_api.openai_responses import (
OpenAIResponsePrompt, OpenAIResponsePrompt,
OpenAIResponseText, OpenAIResponseText,
OpenAIResponseTextFormat, OpenAIResponseTextFormat,
OpenAISystemMessageParam,
Order,
ResponseGuardrailSpec,
Safety,
ToolGroups,
ToolRuntime,
VectorIO,
) )
from llama_stack_api.safety import Safety
from llama_stack_api.tools import ToolGroups, ToolRuntime
from llama_stack_api.vector_io import VectorIO
from pydantic import BaseModel, TypeAdapter from pydantic import BaseModel, TypeAdapter
from llama_stack.log import get_logger from llama_stack.log import get_logger

View file

@ -8,8 +8,11 @@ import uuid
from collections.abc import AsyncIterator from collections.abc import AsyncIterator
from typing import Any from typing import Any
from llama_stack_api.inference import ( from llama_stack_api import (
AllowedToolsFilter,
ApprovalFilter,
Inference, Inference,
MCPListToolsTool,
OpenAIAssistantMessageParam, OpenAIAssistantMessageParam,
OpenAIChatCompletion, OpenAIChatCompletion,
OpenAIChatCompletionChunk, OpenAIChatCompletionChunk,
@ -17,11 +20,6 @@ from llama_stack_api.inference import (
OpenAIChatCompletionToolCall, OpenAIChatCompletionToolCall,
OpenAIChoice, OpenAIChoice,
OpenAIMessageParam, OpenAIMessageParam,
)
from llama_stack_api.openai_responses import (
AllowedToolsFilter,
ApprovalFilter,
MCPListToolsTool,
OpenAIResponseContentPartOutputText, OpenAIResponseContentPartOutputText,
OpenAIResponseContentPartReasoningText, OpenAIResponseContentPartReasoningText,
OpenAIResponseContentPartRefusal, OpenAIResponseContentPartRefusal,
@ -1024,7 +1022,7 @@ class StreamingResponseOrchestrator:
self, tools: list[OpenAIResponseInputTool], output_messages: list[OpenAIResponseOutput] self, tools: list[OpenAIResponseInputTool], output_messages: list[OpenAIResponseOutput]
) -> AsyncIterator[OpenAIResponseObjectStream]: ) -> AsyncIterator[OpenAIResponseObjectStream]:
"""Process all tools and emit appropriate streaming events.""" """Process all tools and emit appropriate streaming events."""
from llama_stack_api.tools import ToolDef from llama_stack_api import ToolDef
from openai.types.chat import ChatCompletionToolParam from openai.types.chat import ChatCompletionToolParam
from llama_stack.models.llama.datatypes import ToolDefinition from llama_stack.models.llama.datatypes import ToolDefinition

View file

@ -9,18 +9,12 @@ import json
from collections.abc import AsyncIterator from collections.abc import AsyncIterator
from typing import Any from typing import Any
from llama_stack_api.common.content_types import ( from llama_stack_api import (
ImageContentItem, ImageContentItem,
TextContentItem,
)
from llama_stack_api.inference import (
OpenAIChatCompletionContentPartImageParam, OpenAIChatCompletionContentPartImageParam,
OpenAIChatCompletionContentPartTextParam, OpenAIChatCompletionContentPartTextParam,
OpenAIChatCompletionToolCall, OpenAIChatCompletionToolCall,
OpenAIImageURL, OpenAIImageURL,
OpenAIToolMessageParam,
)
from llama_stack_api.openai_responses import (
OpenAIResponseInputToolFileSearch, OpenAIResponseInputToolFileSearch,
OpenAIResponseInputToolMCP, OpenAIResponseInputToolMCP,
OpenAIResponseObjectStreamResponseFileSearchCallCompleted, OpenAIResponseObjectStreamResponseFileSearchCallCompleted,
@ -35,9 +29,13 @@ from llama_stack_api.openai_responses import (
OpenAIResponseOutputMessageFileSearchToolCall, OpenAIResponseOutputMessageFileSearchToolCall,
OpenAIResponseOutputMessageFileSearchToolCallResults, OpenAIResponseOutputMessageFileSearchToolCallResults,
OpenAIResponseOutputMessageWebSearchToolCall, OpenAIResponseOutputMessageWebSearchToolCall,
OpenAIToolMessageParam,
TextContentItem,
ToolGroups,
ToolInvocationResult,
ToolRuntime,
VectorIO,
) )
from llama_stack_api.tools import ToolGroups, ToolInvocationResult, ToolRuntime
from llama_stack_api.vector_io import VectorIO
from llama_stack.core.telemetry import tracing from llama_stack.core.telemetry import tracing
from llama_stack.log import get_logger from llama_stack.log import get_logger
@ -398,7 +396,7 @@ class ToolExecutor:
# Build output message # Build output message
message: Any message: Any
if mcp_tool_to_server and function.name in mcp_tool_to_server: if mcp_tool_to_server and function.name in mcp_tool_to_server:
from llama_stack_api.openai_responses import ( from llama_stack_api import (
OpenAIResponseOutputMessageMCPCall, OpenAIResponseOutputMessageMCPCall,
) )

View file

@ -7,8 +7,10 @@
from dataclasses import dataclass from dataclasses import dataclass
from typing import cast from typing import cast
from llama_stack_api.inference import OpenAIChatCompletionToolCall, OpenAIMessageParam, OpenAIResponseFormatParam from llama_stack_api import (
from llama_stack_api.openai_responses import ( OpenAIChatCompletionToolCall,
OpenAIMessageParam,
OpenAIResponseFormatParam,
OpenAIResponseInput, OpenAIResponseInput,
OpenAIResponseInputTool, OpenAIResponseInputTool,
OpenAIResponseInputToolFileSearch, OpenAIResponseInputToolFileSearch,

View file

@ -9,8 +9,7 @@ import re
import uuid import uuid
from collections.abc import Sequence from collections.abc import Sequence
from llama_stack_api.agents import ResponseGuardrailSpec from llama_stack_api import (
from llama_stack_api.inference import (
OpenAIAssistantMessageParam, OpenAIAssistantMessageParam,
OpenAIChatCompletionContentPartImageParam, OpenAIChatCompletionContentPartImageParam,
OpenAIChatCompletionContentPartParam, OpenAIChatCompletionContentPartParam,
@ -22,16 +21,11 @@ from llama_stack_api.inference import (
OpenAIImageURL, OpenAIImageURL,
OpenAIJSONSchema, OpenAIJSONSchema,
OpenAIMessageParam, OpenAIMessageParam,
OpenAIResponseAnnotationFileCitation,
OpenAIResponseFormatJSONObject, OpenAIResponseFormatJSONObject,
OpenAIResponseFormatJSONSchema, OpenAIResponseFormatJSONSchema,
OpenAIResponseFormatParam, OpenAIResponseFormatParam,
OpenAIResponseFormatText, OpenAIResponseFormatText,
OpenAISystemMessageParam,
OpenAIToolMessageParam,
OpenAIUserMessageParam,
)
from llama_stack_api.openai_responses import (
OpenAIResponseAnnotationFileCitation,
OpenAIResponseInput, OpenAIResponseInput,
OpenAIResponseInputFunctionToolCallOutput, OpenAIResponseInputFunctionToolCallOutput,
OpenAIResponseInputMessageContent, OpenAIResponseInputMessageContent,
@ -47,8 +41,12 @@ from llama_stack_api.openai_responses import (
OpenAIResponseOutputMessageMCPCall, OpenAIResponseOutputMessageMCPCall,
OpenAIResponseOutputMessageMCPListTools, OpenAIResponseOutputMessageMCPListTools,
OpenAIResponseText, OpenAIResponseText,
OpenAISystemMessageParam,
OpenAIToolMessageParam,
OpenAIUserMessageParam,
ResponseGuardrailSpec,
Safety,
) )
from llama_stack_api.safety import Safety
async def convert_chat_choice_to_response_message( async def convert_chat_choice_to_response_message(

View file

@ -6,8 +6,7 @@
import asyncio import asyncio
from llama_stack_api.inference import OpenAIMessageParam from llama_stack_api import OpenAIMessageParam, Safety, SafetyViolation, ViolationLevel
from llama_stack_api.safety import Safety, SafetyViolation, ViolationLevel
from llama_stack.core.telemetry import tracing from llama_stack.core.telemetry import tracing
from llama_stack.log import get_logger from llama_stack.log import get_logger

View file

@ -6,9 +6,7 @@
from typing import Any from typing import Any
from llama_stack_api.files import Files from llama_stack_api import Files, Inference, Models
from llama_stack_api.inference import Inference
from llama_stack_api.models import Models
from llama_stack.core.datatypes import AccessRule, Api from llama_stack.core.datatypes import AccessRule, Api
from llama_stack.providers.utils.kvstore import kvstore_impl from llama_stack.providers.utils.kvstore import kvstore_impl

View file

@ -13,22 +13,26 @@ import uuid
from io import BytesIO from io import BytesIO
from typing import Any, Literal from typing import Any, Literal
from llama_stack_api.batches import Batches, BatchObject, ListBatchesResponse from llama_stack_api import (
from llama_stack_api.common.errors import ConflictError, ResourceNotFoundError Batches,
from llama_stack_api.files import Files, OpenAIFilePurpose BatchObject,
from llama_stack_api.inference import ( ConflictError,
Files,
Inference, Inference,
ListBatchesResponse,
Models,
OpenAIAssistantMessageParam, OpenAIAssistantMessageParam,
OpenAIChatCompletionRequestWithExtraBody, OpenAIChatCompletionRequestWithExtraBody,
OpenAICompletionRequestWithExtraBody, OpenAICompletionRequestWithExtraBody,
OpenAIDeveloperMessageParam, OpenAIDeveloperMessageParam,
OpenAIEmbeddingsRequestWithExtraBody, OpenAIEmbeddingsRequestWithExtraBody,
OpenAIFilePurpose,
OpenAIMessageParam, OpenAIMessageParam,
OpenAISystemMessageParam, OpenAISystemMessageParam,
OpenAIToolMessageParam, OpenAIToolMessageParam,
OpenAIUserMessageParam, OpenAIUserMessageParam,
ResourceNotFoundError,
) )
from llama_stack_api.models import Models
from openai.types.batch import BatchError, Errors from openai.types.batch import BatchError, Errors
from pydantic import BaseModel from pydantic import BaseModel

View file

@ -5,10 +5,7 @@
# the root directory of this source tree. # the root directory of this source tree.
from typing import Any from typing import Any
from llama_stack_api.common.responses import PaginatedResponse from llama_stack_api import Dataset, DatasetIO, DatasetsProtocolPrivate, PaginatedResponse
from llama_stack_api.datasetio import DatasetIO
from llama_stack_api.datasets import Dataset
from llama_stack_api.datatypes import DatasetsProtocolPrivate
from llama_stack.providers.utils.datasetio.url_utils import get_dataframe_from_uri from llama_stack.providers.utils.datasetio.url_utils import get_dataframe_from_uri
from llama_stack.providers.utils.kvstore import kvstore_impl from llama_stack.providers.utils.kvstore import kvstore_impl

View file

@ -6,21 +6,24 @@
import json import json
from typing import Any from typing import Any
from llama_stack_api.agents import Agents from llama_stack_api import (
from llama_stack_api.benchmarks import Benchmark Agents,
from llama_stack_api.common.job_types import Job, JobStatus Benchmark,
from llama_stack_api.datasetio import DatasetIO BenchmarkConfig,
from llama_stack_api.datasets import Datasets BenchmarksProtocolPrivate,
from llama_stack_api.datatypes import BenchmarksProtocolPrivate DatasetIO,
from llama_stack_api.eval import BenchmarkConfig, Eval, EvaluateResponse Datasets,
from llama_stack_api.inference import ( Eval,
EvaluateResponse,
Inference, Inference,
Job,
JobStatus,
OpenAIChatCompletionRequestWithExtraBody, OpenAIChatCompletionRequestWithExtraBody,
OpenAICompletionRequestWithExtraBody, OpenAICompletionRequestWithExtraBody,
OpenAISystemMessageParam, OpenAISystemMessageParam,
OpenAIUserMessageParam, OpenAIUserMessageParam,
Scoring,
) )
from llama_stack_api.scoring import Scoring
from tqdm import tqdm from tqdm import tqdm
from llama_stack.providers.utils.common.data_schema_validator import ColumnName from llama_stack.providers.utils.common.data_schema_validator import ColumnName

View file

@ -10,15 +10,15 @@ from pathlib import Path
from typing import Annotated from typing import Annotated
from fastapi import Depends, File, Form, Response, UploadFile from fastapi import Depends, File, Form, Response, UploadFile
from llama_stack_api.common.errors import ResourceNotFoundError from llama_stack_api import (
from llama_stack_api.common.responses import Order
from llama_stack_api.files import (
ExpiresAfter, ExpiresAfter,
Files, Files,
ListOpenAIFileResponse, ListOpenAIFileResponse,
OpenAIFileDeleteResponse, OpenAIFileDeleteResponse,
OpenAIFileObject, OpenAIFileObject,
OpenAIFilePurpose, OpenAIFilePurpose,
Order,
ResourceNotFoundError,
) )
from llama_stack.core.datatypes import AccessRule from llama_stack.core.datatypes import AccessRule

View file

@ -6,7 +6,7 @@
from typing import Any from typing import Any
from llama_stack_api.inference import QuantizationConfig from llama_stack_api import QuantizationConfig
from pydantic import BaseModel, field_validator from pydantic import BaseModel, field_validator
from llama_stack.providers.utils.inference import supported_inference_models from llama_stack.providers.utils.inference import supported_inference_models

View file

@ -8,7 +8,7 @@ import math
from typing import Optional from typing import Optional
import torch import torch
from llama_stack_api.inference import ( from llama_stack_api import (
GreedySamplingStrategy, GreedySamplingStrategy,
JsonSchemaResponseFormat, JsonSchemaResponseFormat,
OpenAIChatCompletionRequestWithExtraBody, OpenAIChatCompletionRequestWithExtraBody,

View file

@ -9,9 +9,11 @@ import time
import uuid import uuid
from collections.abc import AsyncIterator from collections.abc import AsyncIterator
from llama_stack_api.datatypes import ModelsProtocolPrivate from llama_stack_api import (
from llama_stack_api.inference import (
InferenceProvider, InferenceProvider,
Model,
ModelsProtocolPrivate,
ModelType,
OpenAIAssistantMessageParam, OpenAIAssistantMessageParam,
OpenAIChatCompletion, OpenAIChatCompletion,
OpenAIChatCompletionChunk, OpenAIChatCompletionChunk,
@ -23,7 +25,6 @@ from llama_stack_api.inference import (
OpenAIUserMessageParam, OpenAIUserMessageParam,
ToolChoice, ToolChoice,
) )
from llama_stack_api.models import Model, ModelType
from llama_stack.log import get_logger from llama_stack.log import get_logger
from llama_stack.models.llama.datatypes import RawMessage, RawTextItem, ToolDefinition from llama_stack.models.llama.datatypes import RawMessage, RawTextItem, ToolDefinition
@ -375,7 +376,7 @@ class MetaReferenceInferenceImpl(
# Convert tool calls to OpenAI format # Convert tool calls to OpenAI format
openai_tool_calls = None openai_tool_calls = None
if decoded_message.tool_calls: if decoded_message.tool_calls:
from llama_stack_api.inference import ( from llama_stack_api import (
OpenAIChatCompletionToolCall, OpenAIChatCompletionToolCall,
OpenAIChatCompletionToolCallFunction, OpenAIChatCompletionToolCallFunction,
) )
@ -440,7 +441,7 @@ class MetaReferenceInferenceImpl(
params: OpenAIChatCompletionRequestWithExtraBody, params: OpenAIChatCompletionRequestWithExtraBody,
) -> AsyncIterator[OpenAIChatCompletionChunk]: ) -> AsyncIterator[OpenAIChatCompletionChunk]:
"""Stream chat completion chunks as they're generated.""" """Stream chat completion chunks as they're generated."""
from llama_stack_api.inference import ( from llama_stack_api import (
OpenAIChatCompletionChunk, OpenAIChatCompletionChunk,
OpenAIChatCompletionToolCall, OpenAIChatCompletionToolCall,
OpenAIChatCompletionToolCallFunction, OpenAIChatCompletionToolCallFunction,

View file

@ -6,16 +6,17 @@
from collections.abc import AsyncIterator from collections.abc import AsyncIterator
from llama_stack_api.datatypes import ModelsProtocolPrivate from llama_stack_api import (
from llama_stack_api.inference import (
InferenceProvider, InferenceProvider,
Model,
ModelsProtocolPrivate,
ModelType,
OpenAIChatCompletion, OpenAIChatCompletion,
OpenAIChatCompletionChunk, OpenAIChatCompletionChunk,
OpenAIChatCompletionRequestWithExtraBody, OpenAIChatCompletionRequestWithExtraBody,
OpenAICompletion, OpenAICompletion,
OpenAICompletionRequestWithExtraBody, OpenAICompletionRequestWithExtraBody,
) )
from llama_stack_api.models import Model, ModelType
from llama_stack.log import get_logger from llama_stack.log import get_logger
from llama_stack.providers.utils.inference.embedding_mixin import ( from llama_stack.providers.utils.inference.embedding_mixin import (

View file

@ -12,11 +12,7 @@
from typing import Any from typing import Any
from llama_stack_api.common.type_system import ( from llama_stack_api import ChatCompletionInputType, DialogType, StringType
ChatCompletionInputType,
DialogType,
StringType,
)
from llama_stack.providers.utils.common.data_schema_validator import ( from llama_stack.providers.utils.common.data_schema_validator import (
ColumnName, ColumnName,

View file

@ -6,11 +6,11 @@
from enum import Enum from enum import Enum
from typing import Any from typing import Any
from llama_stack_api.datasetio import DatasetIO from llama_stack_api import (
from llama_stack_api.datasets import Datasets
from llama_stack_api.post_training import (
AlgorithmConfig, AlgorithmConfig,
Checkpoint, Checkpoint,
DatasetIO,
Datasets,
DPOAlignmentConfig, DPOAlignmentConfig,
JobStatus, JobStatus,
ListPostTrainingJobsResponse, ListPostTrainingJobsResponse,

View file

@ -12,11 +12,11 @@ from typing import Any
import torch import torch
from datasets import Dataset from datasets import Dataset
from llama_stack_api.datasetio import DatasetIO from llama_stack_api import (
from llama_stack_api.datasets import Datasets
from llama_stack_api.post_training import (
Checkpoint, Checkpoint,
DataConfig, DataConfig,
DatasetIO,
Datasets,
LoraFinetuningConfig, LoraFinetuningConfig,
TrainingConfig, TrainingConfig,
) )

View file

@ -11,10 +11,10 @@ from typing import Any
import torch import torch
from datasets import Dataset from datasets import Dataset
from llama_stack_api.datasetio import DatasetIO from llama_stack_api import (
from llama_stack_api.datasets import Datasets
from llama_stack_api.post_training import (
Checkpoint, Checkpoint,
DatasetIO,
Datasets,
DPOAlignmentConfig, DPOAlignmentConfig,
TrainingConfig, TrainingConfig,
) )

View file

@ -14,8 +14,7 @@ from typing import TYPE_CHECKING, Any, Protocol
import psutil import psutil
import torch import torch
from datasets import Dataset from datasets import Dataset
from llama_stack_api.datasetio import DatasetIO from llama_stack_api import Checkpoint, DatasetIO, TrainingConfig
from llama_stack_api.post_training import Checkpoint, TrainingConfig
from transformers import AutoConfig, AutoModelForCausalLM from transformers import AutoConfig, AutoModelForCausalLM
if TYPE_CHECKING: if TYPE_CHECKING:

View file

@ -13,7 +13,7 @@
from collections.abc import Callable from collections.abc import Callable
import torch import torch
from llama_stack_api.post_training import DatasetFormat from llama_stack_api import DatasetFormat
from pydantic import BaseModel from pydantic import BaseModel
from torchtune.data._messages import InputOutputToMessages, ShareGPTToMessages from torchtune.data._messages import InputOutputToMessages, ShareGPTToMessages
from torchtune.models.llama3 import llama3_tokenizer from torchtune.models.llama3 import llama3_tokenizer

View file

@ -6,11 +6,11 @@
from enum import Enum from enum import Enum
from typing import Any from typing import Any
from llama_stack_api.datasetio import DatasetIO from llama_stack_api import (
from llama_stack_api.datasets import Datasets
from llama_stack_api.post_training import (
AlgorithmConfig, AlgorithmConfig,
Checkpoint, Checkpoint,
DatasetIO,
Datasets,
DPOAlignmentConfig, DPOAlignmentConfig,
JobStatus, JobStatus,
ListPostTrainingJobsResponse, ListPostTrainingJobsResponse,

View file

@ -12,14 +12,14 @@ from pathlib import Path
from typing import Any from typing import Any
import torch import torch
from llama_stack_api.common.training_types import PostTrainingMetric from llama_stack_api import (
from llama_stack_api.datasetio import DatasetIO
from llama_stack_api.datasets import Datasets
from llama_stack_api.post_training import (
Checkpoint, Checkpoint,
DataConfig, DataConfig,
DatasetIO,
Datasets,
LoraFinetuningConfig, LoraFinetuningConfig,
OptimizerConfig, OptimizerConfig,
PostTrainingMetric,
QATFinetuningConfig, QATFinetuningConfig,
TrainingConfig, TrainingConfig,
) )

View file

@ -10,16 +10,16 @@ from typing import TYPE_CHECKING, Any
if TYPE_CHECKING: if TYPE_CHECKING:
from codeshield.cs import CodeShieldScanResult from codeshield.cs import CodeShieldScanResult
from llama_stack_api.inference import OpenAIMessageParam from llama_stack_api import (
from llama_stack_api.safety import (
ModerationObject, ModerationObject,
ModerationObjectResults, ModerationObjectResults,
OpenAIMessageParam,
RunShieldResponse, RunShieldResponse,
Safety, Safety,
SafetyViolation, SafetyViolation,
Shield,
ViolationLevel, ViolationLevel,
) )
from llama_stack_api.shields import Shield
from llama_stack.log import get_logger from llama_stack.log import get_logger
from llama_stack.providers.utils.inference.prompt_adapter import ( from llama_stack.providers.utils.inference.prompt_adapter import (

View file

@ -9,23 +9,22 @@ import uuid
from string import Template from string import Template
from typing import Any from typing import Any
from llama_stack_api.common.content_types import ImageContentItem, TextContentItem from llama_stack_api import (
from llama_stack_api.datatypes import ShieldsProtocolPrivate ImageContentItem,
from llama_stack_api.inference import (
Inference, Inference,
ModerationObject,
ModerationObjectResults,
OpenAIChatCompletionRequestWithExtraBody, OpenAIChatCompletionRequestWithExtraBody,
OpenAIMessageParam, OpenAIMessageParam,
OpenAIUserMessageParam, OpenAIUserMessageParam,
)
from llama_stack_api.safety import (
ModerationObject,
ModerationObjectResults,
RunShieldResponse, RunShieldResponse,
Safety, Safety,
SafetyViolation, SafetyViolation,
Shield,
ShieldsProtocolPrivate,
TextContentItem,
ViolationLevel, ViolationLevel,
) )
from llama_stack_api.shields import Shield
from llama_stack.core.datatypes import Api from llama_stack.core.datatypes import Api
from llama_stack.log import get_logger from llama_stack.log import get_logger

View file

@ -7,17 +7,17 @@
from typing import Any from typing import Any
import torch import torch
from llama_stack_api.datatypes import ShieldsProtocolPrivate from llama_stack_api import (
from llama_stack_api.inference import OpenAIMessageParam
from llama_stack_api.safety import (
ModerationObject, ModerationObject,
OpenAIMessageParam,
RunShieldResponse, RunShieldResponse,
Safety, Safety,
SafetyViolation, SafetyViolation,
Shield,
ShieldsProtocolPrivate,
ShieldStore, ShieldStore,
ViolationLevel, ViolationLevel,
) )
from llama_stack_api.shields import Shield
from transformers import AutoModelForSequenceClassification, AutoTokenizer from transformers import AutoModelForSequenceClassification, AutoTokenizer
from llama_stack.core.utils.model_utils import model_local_dir from llama_stack.core.utils.model_utils import model_local_dir

View file

@ -5,16 +5,17 @@
# the root directory of this source tree. # the root directory of this source tree.
from typing import Any from typing import Any
from llama_stack_api.datasetio import DatasetIO from llama_stack_api import (
from llama_stack_api.datasets import Datasets DatasetIO,
from llama_stack_api.datatypes import ScoringFunctionsProtocolPrivate Datasets,
from llama_stack_api.scoring import (
ScoreBatchResponse, ScoreBatchResponse,
ScoreResponse, ScoreResponse,
Scoring, Scoring,
ScoringFn,
ScoringFnParams,
ScoringFunctionsProtocolPrivate,
ScoringResult, ScoringResult,
) )
from llama_stack_api.scoring_functions import ScoringFn, ScoringFnParams
from llama_stack.core.datatypes import Api from llama_stack.core.datatypes import Api
from llama_stack.providers.utils.common.data_schema_validator import ( from llama_stack.providers.utils.common.data_schema_validator import (

View file

@ -8,8 +8,7 @@ import json
import re import re
from typing import Any from typing import Any
from llama_stack_api.scoring import ScoringResultRow from llama_stack_api import ScoringFnParams, ScoringResultRow
from llama_stack_api.scoring_functions import ScoringFnParams
from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn

View file

@ -6,8 +6,7 @@
from typing import Any from typing import Any
from llama_stack_api.scoring import ScoringResultRow from llama_stack_api import ScoringFnParams, ScoringResultRow
from llama_stack_api.scoring_functions import ScoringFnParams
from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn

View file

@ -4,10 +4,10 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack_api.common.type_system import NumberType from llama_stack_api import (
from llama_stack_api.scoring_functions import (
AggregationFunctionType, AggregationFunctionType,
BasicScoringFnParams, BasicScoringFnParams,
NumberType,
ScoringFn, ScoringFn,
) )

View file

@ -4,10 +4,10 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack_api.common.type_system import NumberType from llama_stack_api import (
from llama_stack_api.scoring_functions import (
AggregationFunctionType, AggregationFunctionType,
BasicScoringFnParams, BasicScoringFnParams,
NumberType,
ScoringFn, ScoringFn,
) )

View file

@ -4,10 +4,10 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack_api.common.type_system import NumberType from llama_stack_api import (
from llama_stack_api.scoring_functions import (
AggregationFunctionType, AggregationFunctionType,
BasicScoringFnParams, BasicScoringFnParams,
NumberType,
ScoringFn, ScoringFn,
) )

View file

@ -4,9 +4,9 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack_api.common.type_system import NumberType from llama_stack_api import (
from llama_stack_api.scoring_functions import (
AggregationFunctionType, AggregationFunctionType,
NumberType,
RegexParserScoringFnParams, RegexParserScoringFnParams,
ScoringFn, ScoringFn,
) )

View file

@ -4,9 +4,9 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack_api.common.type_system import NumberType from llama_stack_api import (
from llama_stack_api.scoring_functions import (
AggregationFunctionType, AggregationFunctionType,
NumberType,
RegexParserScoringFnParams, RegexParserScoringFnParams,
ScoringFn, ScoringFn,
) )

View file

@ -4,10 +4,10 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack_api.common.type_system import NumberType from llama_stack_api import (
from llama_stack_api.scoring_functions import (
AggregationFunctionType, AggregationFunctionType,
BasicScoringFnParams, BasicScoringFnParams,
NumberType,
ScoringFn, ScoringFn,
) )

View file

@ -6,8 +6,7 @@
from typing import Any from typing import Any
from llama_stack_api.scoring import ScoringResultRow from llama_stack_api import ScoringFnParams, ScoringResultRow
from llama_stack_api.scoring_functions import ScoringFnParams
from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn

View file

@ -5,8 +5,7 @@
# the root directory of this source tree. # the root directory of this source tree.
from typing import Any from typing import Any
from llama_stack_api.scoring import ScoringResultRow from llama_stack_api import ScoringFnParams, ScoringFnParamsType, ScoringResultRow
from llama_stack_api.scoring_functions import ScoringFnParams, ScoringFnParamsType
from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn

View file

@ -6,8 +6,7 @@
import re import re
from typing import Any from typing import Any
from llama_stack_api.scoring import ScoringResultRow from llama_stack_api import ScoringFnParams, ScoringFnParamsType, ScoringResultRow
from llama_stack_api.scoring_functions import ScoringFnParams, ScoringFnParamsType
from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn

View file

@ -6,8 +6,7 @@
from typing import Any from typing import Any
from llama_stack_api.scoring import ScoringResultRow from llama_stack_api import ScoringFnParams, ScoringResultRow
from llama_stack_api.scoring_functions import ScoringFnParams
from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn

View file

@ -17,17 +17,18 @@ from autoevals.ragas import (
ContextRelevancy, ContextRelevancy,
Faithfulness, Faithfulness,
) )
from llama_stack_api.datasetio import DatasetIO from llama_stack_api import (
from llama_stack_api.datasets import Datasets DatasetIO,
from llama_stack_api.datatypes import ScoringFunctionsProtocolPrivate Datasets,
from llama_stack_api.scoring import (
ScoreBatchResponse, ScoreBatchResponse,
ScoreResponse, ScoreResponse,
Scoring, Scoring,
ScoringFn,
ScoringFnParams,
ScoringFunctionsProtocolPrivate,
ScoringResult, ScoringResult,
ScoringResultRow, ScoringResultRow,
) )
from llama_stack_api.scoring_functions import ScoringFn, ScoringFnParams
from pydantic import BaseModel from pydantic import BaseModel
from llama_stack.core.datatypes import Api from llama_stack.core.datatypes import Api

View file

@ -4,10 +4,10 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack_api.common.type_system import NumberType from llama_stack_api import (
from llama_stack_api.scoring_functions import (
AggregationFunctionType, AggregationFunctionType,
BasicScoringFnParams, BasicScoringFnParams,
NumberType,
ScoringFn, ScoringFn,
) )

View file

@ -4,10 +4,10 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack_api.common.type_system import NumberType from llama_stack_api import (
from llama_stack_api.scoring_functions import (
AggregationFunctionType, AggregationFunctionType,
BasicScoringFnParams, BasicScoringFnParams,
NumberType,
ScoringFn, ScoringFn,
) )

View file

@ -4,10 +4,10 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack_api.common.type_system import NumberType from llama_stack_api import (
from llama_stack_api.scoring_functions import (
AggregationFunctionType, AggregationFunctionType,
BasicScoringFnParams, BasicScoringFnParams,
NumberType,
ScoringFn, ScoringFn,
) )

View file

@ -4,10 +4,10 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack_api.common.type_system import NumberType from llama_stack_api import (
from llama_stack_api.scoring_functions import (
AggregationFunctionType, AggregationFunctionType,
BasicScoringFnParams, BasicScoringFnParams,
NumberType,
ScoringFn, ScoringFn,
) )

View file

@ -4,10 +4,10 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack_api.common.type_system import NumberType from llama_stack_api import (
from llama_stack_api.scoring_functions import (
AggregationFunctionType, AggregationFunctionType,
BasicScoringFnParams, BasicScoringFnParams,
NumberType,
ScoringFn, ScoringFn,
) )

View file

@ -4,10 +4,10 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack_api.common.type_system import NumberType from llama_stack_api import (
from llama_stack_api.scoring_functions import (
AggregationFunctionType, AggregationFunctionType,
BasicScoringFnParams, BasicScoringFnParams,
NumberType,
ScoringFn, ScoringFn,
) )

View file

@ -4,10 +4,10 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack_api.common.type_system import NumberType from llama_stack_api import (
from llama_stack_api.scoring_functions import (
AggregationFunctionType, AggregationFunctionType,
BasicScoringFnParams, BasicScoringFnParams,
NumberType,
ScoringFn, ScoringFn,
) )

View file

@ -4,10 +4,10 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack_api.common.type_system import NumberType from llama_stack_api import (
from llama_stack_api.scoring_functions import (
AggregationFunctionType, AggregationFunctionType,
BasicScoringFnParams, BasicScoringFnParams,
NumberType,
ScoringFn, ScoringFn,
) )

View file

@ -4,10 +4,10 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack_api.common.type_system import NumberType from llama_stack_api import (
from llama_stack_api.scoring_functions import (
AggregationFunctionType, AggregationFunctionType,
BasicScoringFnParams, BasicScoringFnParams,
NumberType,
ScoringFn, ScoringFn,
) )

View file

@ -5,17 +5,18 @@
# the root directory of this source tree. # the root directory of this source tree.
from typing import Any from typing import Any
from llama_stack_api.datasetio import DatasetIO from llama_stack_api import (
from llama_stack_api.datasets import Datasets DatasetIO,
from llama_stack_api.datatypes import ScoringFunctionsProtocolPrivate Datasets,
from llama_stack_api.inference import Inference Inference,
from llama_stack_api.scoring import (
ScoreBatchResponse, ScoreBatchResponse,
ScoreResponse, ScoreResponse,
Scoring, Scoring,
ScoringFn,
ScoringFnParams,
ScoringFunctionsProtocolPrivate,
ScoringResult, ScoringResult,
) )
from llama_stack_api.scoring_functions import ScoringFn, ScoringFnParams
from llama_stack.core.datatypes import Api from llama_stack.core.datatypes import Api
from llama_stack.providers.utils.common.data_schema_validator import ( from llama_stack.providers.utils.common.data_schema_validator import (

Some files were not shown because too many files have changed in this diff Show more