mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-05 10:23:44 +00:00
feat: refactor llama-stack-api structure
move llama_stack_api.apis... to top level llama_stack_api. merge provider datatypes and the existing apis.datatypes into a common llama_stack_api.datatypes update all usages of these packages throughout LLS Signed-off-by: Charlie Doern <cdoern@redhat.com>
This commit is contained in:
parent
d6b915ce0a
commit
b7480e9c88
296 changed files with 906 additions and 1109 deletions
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
import sys
|
||||
|
||||
from llama_stack_api.providers.datatypes import Api
|
||||
from llama_stack_api.datatypes import Api
|
||||
from pydantic import BaseModel
|
||||
from termcolor import cprint
|
||||
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ from enum import Enum
|
|||
from typing import Any, Union, get_args, get_origin
|
||||
|
||||
import httpx
|
||||
from llama_stack_api.providers.datatypes import RemoteProviderConfig
|
||||
from llama_stack_api.datatypes import RemoteProviderConfig
|
||||
from pydantic import BaseModel, parse_obj_as
|
||||
from termcolor import cprint
|
||||
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@
|
|||
import textwrap
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api.providers.datatypes import Api, ProviderSpec
|
||||
from llama_stack_api.datatypes import Api, ProviderSpec
|
||||
|
||||
from llama_stack.core.datatypes import (
|
||||
LLAMA_STACK_RUN_CONFIG_VERSION,
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ import secrets
|
|||
import time
|
||||
from typing import Any, Literal
|
||||
|
||||
from llama_stack_api.apis.conversations.conversations import (
|
||||
from llama_stack_api.conversations import (
|
||||
Conversation,
|
||||
ConversationDeletedResource,
|
||||
ConversationItem,
|
||||
|
|
|
|||
|
|
@ -9,21 +9,21 @@ from pathlib import Path
|
|||
from typing import Annotated, Any, Literal, Self
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from llama_stack_api.apis.benchmarks import Benchmark, BenchmarkInput
|
||||
from llama_stack_api.apis.datasetio import DatasetIO
|
||||
from llama_stack_api.apis.datasets import Dataset, DatasetInput
|
||||
from llama_stack_api.apis.eval import Eval
|
||||
from llama_stack_api.apis.inference import Inference
|
||||
from llama_stack_api.apis.models import Model, ModelInput
|
||||
from llama_stack_api.apis.resource import Resource
|
||||
from llama_stack_api.apis.safety import Safety
|
||||
from llama_stack_api.apis.scoring import Scoring
|
||||
from llama_stack_api.apis.scoring_functions import ScoringFn, ScoringFnInput
|
||||
from llama_stack_api.apis.shields import Shield, ShieldInput
|
||||
from llama_stack_api.apis.tools import ToolGroup, ToolGroupInput, ToolRuntime
|
||||
from llama_stack_api.apis.vector_io import VectorIO
|
||||
from llama_stack_api.apis.vector_stores import VectorStore, VectorStoreInput
|
||||
from llama_stack_api.providers.datatypes import Api, ProviderSpec
|
||||
from llama_stack_api.benchmarks import Benchmark, BenchmarkInput
|
||||
from llama_stack_api.datasetio import DatasetIO
|
||||
from llama_stack_api.datasets import Dataset, DatasetInput
|
||||
from llama_stack_api.datatypes import Api, ProviderSpec
|
||||
from llama_stack_api.eval import Eval
|
||||
from llama_stack_api.inference import Inference
|
||||
from llama_stack_api.models import Model, ModelInput
|
||||
from llama_stack_api.resource import Resource
|
||||
from llama_stack_api.safety import Safety
|
||||
from llama_stack_api.scoring import Scoring
|
||||
from llama_stack_api.scoring_functions import ScoringFn, ScoringFnInput
|
||||
from llama_stack_api.shields import Shield, ShieldInput
|
||||
from llama_stack_api.tools import ToolGroup, ToolGroupInput, ToolRuntime
|
||||
from llama_stack_api.vector_io import VectorIO
|
||||
from llama_stack_api.vector_stores import VectorStore, VectorStoreInput
|
||||
from pydantic import BaseModel, Field, field_validator, model_validator
|
||||
|
||||
from llama_stack.core.access_control.datatypes import AccessRule
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ import os
|
|||
from typing import Any
|
||||
|
||||
import yaml
|
||||
from llama_stack_api.providers.datatypes import (
|
||||
from llama_stack_api.datatypes import (
|
||||
Api,
|
||||
InlineProviderSpec,
|
||||
ProviderSpec,
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
|
||||
import yaml
|
||||
from llama_stack_api.apis.datatypes import Api, ExternalApiSpec
|
||||
from llama_stack_api.datatypes import Api, ExternalApiSpec
|
||||
|
||||
from llama_stack.core.datatypes import BuildConfig, StackRunConfig
|
||||
from llama_stack.log import get_logger
|
||||
|
|
|
|||
|
|
@ -6,14 +6,14 @@
|
|||
|
||||
from importlib.metadata import version
|
||||
|
||||
from llama_stack_api.apis.inspect import (
|
||||
from llama_stack_api.datatypes import HealthStatus
|
||||
from llama_stack_api.inspect import (
|
||||
HealthInfo,
|
||||
Inspect,
|
||||
ListRoutesResponse,
|
||||
RouteInfo,
|
||||
VersionInfo,
|
||||
)
|
||||
from llama_stack_api.providers.datatypes import HealthStatus
|
||||
from pydantic import BaseModel
|
||||
|
||||
from llama_stack.core.datatypes import StackRunConfig
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@
|
|||
import json
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api.apis.prompts import ListPromptsResponse, Prompt, Prompts
|
||||
from llama_stack_api.prompts import ListPromptsResponse, Prompt, Prompts
|
||||
from pydantic import BaseModel
|
||||
|
||||
from llama_stack.core.datatypes import StackRunConfig
|
||||
|
|
|
|||
|
|
@ -7,8 +7,8 @@
|
|||
import asyncio
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api.apis.providers import ListProvidersResponse, ProviderInfo, Providers
|
||||
from llama_stack_api.providers.datatypes import HealthResponse, HealthStatus
|
||||
from llama_stack_api.datatypes import HealthResponse, HealthStatus
|
||||
from llama_stack_api.providers import ListProvidersResponse, ProviderInfo, Providers
|
||||
from pydantic import BaseModel
|
||||
|
||||
from llama_stack.log import get_logger
|
||||
|
|
|
|||
|
|
@ -8,33 +8,17 @@ import importlib.metadata
|
|||
import inspect
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api.apis.agents import Agents
|
||||
from llama_stack_api.apis.batches import Batches
|
||||
from llama_stack_api.apis.benchmarks import Benchmarks
|
||||
from llama_stack_api.apis.conversations import Conversations
|
||||
from llama_stack_api.apis.datasetio import DatasetIO
|
||||
from llama_stack_api.apis.datasets import Datasets
|
||||
from llama_stack_api.apis.datatypes import ExternalApiSpec
|
||||
from llama_stack_api.apis.eval import Eval
|
||||
from llama_stack_api.apis.files import Files
|
||||
from llama_stack_api.apis.inference import Inference, InferenceProvider
|
||||
from llama_stack_api.apis.inspect import Inspect
|
||||
from llama_stack_api.apis.models import Models
|
||||
from llama_stack_api.apis.post_training import PostTraining
|
||||
from llama_stack_api.apis.prompts import Prompts
|
||||
from llama_stack_api.apis.providers import Providers as ProvidersAPI
|
||||
from llama_stack_api.apis.safety import Safety
|
||||
from llama_stack_api.apis.scoring import Scoring
|
||||
from llama_stack_api.apis.scoring_functions import ScoringFunctions
|
||||
from llama_stack_api.apis.shields import Shields
|
||||
from llama_stack_api.apis.tools import ToolGroups, ToolRuntime
|
||||
from llama_stack_api.apis.vector_io import VectorIO
|
||||
from llama_stack_api.apis.vector_stores import VectorStore
|
||||
from llama_stack_api.apis.version import LLAMA_STACK_API_V1ALPHA
|
||||
from llama_stack_api.providers.datatypes import (
|
||||
from llama_stack_api.agents import Agents
|
||||
from llama_stack_api.batches import Batches
|
||||
from llama_stack_api.benchmarks import Benchmarks
|
||||
from llama_stack_api.conversations import Conversations
|
||||
from llama_stack_api.datasetio import DatasetIO
|
||||
from llama_stack_api.datasets import Datasets
|
||||
from llama_stack_api.datatypes import (
|
||||
Api,
|
||||
BenchmarksProtocolPrivate,
|
||||
DatasetsProtocolPrivate,
|
||||
ExternalApiSpec,
|
||||
ModelsProtocolPrivate,
|
||||
ProviderSpec,
|
||||
RemoteProviderConfig,
|
||||
|
|
@ -43,6 +27,22 @@ from llama_stack_api.providers.datatypes import (
|
|||
ShieldsProtocolPrivate,
|
||||
ToolGroupsProtocolPrivate,
|
||||
)
|
||||
from llama_stack_api.eval import Eval
|
||||
from llama_stack_api.files import Files
|
||||
from llama_stack_api.inference import Inference, InferenceProvider
|
||||
from llama_stack_api.inspect import Inspect
|
||||
from llama_stack_api.models import Models
|
||||
from llama_stack_api.post_training import PostTraining
|
||||
from llama_stack_api.prompts import Prompts
|
||||
from llama_stack_api.providers import Providers as ProvidersAPI
|
||||
from llama_stack_api.safety import Safety
|
||||
from llama_stack_api.scoring import Scoring
|
||||
from llama_stack_api.scoring_functions import ScoringFunctions
|
||||
from llama_stack_api.shields import Shields
|
||||
from llama_stack_api.tools import ToolGroups, ToolRuntime
|
||||
from llama_stack_api.vector_io import VectorIO
|
||||
from llama_stack_api.vector_stores import VectorStore
|
||||
from llama_stack_api.version import LLAMA_STACK_API_V1ALPHA
|
||||
|
||||
from llama_stack.core.client import get_client_impl
|
||||
from llama_stack.core.datatypes import (
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api.providers.datatypes import Api, RoutingTable
|
||||
from llama_stack_api.datatypes import Api, RoutingTable
|
||||
|
||||
from llama_stack.core.datatypes import (
|
||||
AccessRule,
|
||||
|
|
|
|||
|
|
@ -6,10 +6,10 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api.apis.common.responses import PaginatedResponse
|
||||
from llama_stack_api.apis.datasetio import DatasetIO
|
||||
from llama_stack_api.apis.datasets import DatasetPurpose, DataSource
|
||||
from llama_stack_api.providers.datatypes import RoutingTable
|
||||
from llama_stack_api.common.responses import PaginatedResponse
|
||||
from llama_stack_api.datasetio import DatasetIO
|
||||
from llama_stack_api.datasets import DatasetPurpose, DataSource
|
||||
from llama_stack_api.datatypes import RoutingTable
|
||||
|
||||
from llama_stack.log import get_logger
|
||||
|
||||
|
|
|
|||
|
|
@ -6,14 +6,14 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api.apis.eval import BenchmarkConfig, Eval, EvaluateResponse, Job
|
||||
from llama_stack_api.apis.scoring import (
|
||||
from llama_stack_api.datatypes import RoutingTable
|
||||
from llama_stack_api.eval import BenchmarkConfig, Eval, EvaluateResponse, Job
|
||||
from llama_stack_api.scoring import (
|
||||
ScoreBatchResponse,
|
||||
ScoreResponse,
|
||||
Scoring,
|
||||
ScoringFnParams,
|
||||
)
|
||||
from llama_stack_api.providers.datatypes import RoutingTable
|
||||
|
||||
from llama_stack.log import get_logger
|
||||
|
||||
|
|
|
|||
|
|
@ -11,13 +11,16 @@ from datetime import UTC, datetime
|
|||
from typing import Annotated, Any
|
||||
|
||||
from fastapi import Body
|
||||
from llama_stack_api.apis.common.errors import ModelNotFoundError, ModelTypeError
|
||||
from llama_stack_api.apis.inference import (
|
||||
from llama_stack_api.common.errors import ModelNotFoundError, ModelTypeError
|
||||
from llama_stack_api.datatypes import HealthResponse, HealthStatus, RoutingTable
|
||||
from llama_stack_api.inference import (
|
||||
Inference,
|
||||
ListOpenAIChatCompletionResponse,
|
||||
OpenAIAssistantMessageParam,
|
||||
OpenAIChatCompletion,
|
||||
OpenAIChatCompletionChunk,
|
||||
OpenAIChatCompletionContentPartImageParam,
|
||||
OpenAIChatCompletionContentPartTextParam,
|
||||
OpenAIChatCompletionRequestWithExtraBody,
|
||||
OpenAIChatCompletionToolCall,
|
||||
OpenAIChatCompletionToolCallFunction,
|
||||
|
|
@ -32,12 +35,7 @@ from llama_stack_api.apis.inference import (
|
|||
Order,
|
||||
RerankResponse,
|
||||
)
|
||||
from llama_stack_api.apis.inference.inference import (
|
||||
OpenAIChatCompletionContentPartImageParam,
|
||||
OpenAIChatCompletionContentPartTextParam,
|
||||
)
|
||||
from llama_stack_api.apis.models import ModelType
|
||||
from llama_stack_api.providers.datatypes import HealthResponse, HealthStatus, RoutingTable
|
||||
from llama_stack_api.models import ModelType
|
||||
from openai.types.chat import ChatCompletionToolChoiceOptionParam as OpenAIChatCompletionToolChoiceOptionParam
|
||||
from openai.types.chat import ChatCompletionToolParam as OpenAIChatCompletionToolParam
|
||||
from pydantic import TypeAdapter
|
||||
|
|
|
|||
|
|
@ -6,11 +6,10 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api.apis.inference import OpenAIMessageParam
|
||||
from llama_stack_api.apis.safety import RunShieldResponse, Safety
|
||||
from llama_stack_api.apis.safety.safety import ModerationObject
|
||||
from llama_stack_api.apis.shields import Shield
|
||||
from llama_stack_api.providers.datatypes import RoutingTable
|
||||
from llama_stack_api.datatypes import RoutingTable
|
||||
from llama_stack_api.inference import OpenAIMessageParam
|
||||
from llama_stack_api.safety import ModerationObject, RunShieldResponse, Safety
|
||||
from llama_stack_api.shields import Shield
|
||||
|
||||
from llama_stack.core.datatypes import SafetyConfig
|
||||
from llama_stack.log import get_logger
|
||||
|
|
|
|||
|
|
@ -6,10 +6,10 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api.apis.common.content_types import (
|
||||
from llama_stack_api.common.content_types import (
|
||||
URL,
|
||||
)
|
||||
from llama_stack_api.apis.tools import (
|
||||
from llama_stack_api.tools import (
|
||||
ListToolDefsResponse,
|
||||
ToolRuntime,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -9,9 +9,10 @@ import uuid
|
|||
from typing import Annotated, Any
|
||||
|
||||
from fastapi import Body
|
||||
from llama_stack_api.apis.common.content_types import InterleavedContent
|
||||
from llama_stack_api.apis.models import ModelType
|
||||
from llama_stack_api.apis.vector_io import (
|
||||
from llama_stack_api.common.content_types import InterleavedContent
|
||||
from llama_stack_api.datatypes import HealthResponse, HealthStatus, RoutingTable
|
||||
from llama_stack_api.models import ModelType
|
||||
from llama_stack_api.vector_io import (
|
||||
Chunk,
|
||||
OpenAICreateVectorStoreFileBatchRequestWithExtraBody,
|
||||
OpenAICreateVectorStoreRequestWithExtraBody,
|
||||
|
|
@ -32,7 +33,6 @@ from llama_stack_api.apis.vector_io import (
|
|||
VectorStoreObject,
|
||||
VectorStoreSearchResponsePage,
|
||||
)
|
||||
from llama_stack_api.providers.datatypes import HealthResponse, HealthStatus, RoutingTable
|
||||
|
||||
from llama_stack.core.datatypes import VectorStoresConfig
|
||||
from llama_stack.log import get_logger
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api.apis.benchmarks import Benchmark, Benchmarks, ListBenchmarksResponse
|
||||
from llama_stack_api.benchmarks import Benchmark, Benchmarks, ListBenchmarksResponse
|
||||
|
||||
from llama_stack.core.datatypes import (
|
||||
BenchmarkWithOwner,
|
||||
|
|
|
|||
|
|
@ -6,10 +6,10 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api.apis.common.errors import ModelNotFoundError
|
||||
from llama_stack_api.apis.models import Model
|
||||
from llama_stack_api.apis.resource import ResourceType
|
||||
from llama_stack_api.providers.datatypes import Api, RoutingTable
|
||||
from llama_stack_api.common.errors import ModelNotFoundError
|
||||
from llama_stack_api.datatypes import Api, RoutingTable
|
||||
from llama_stack_api.models import Model
|
||||
from llama_stack_api.resource import ResourceType
|
||||
|
||||
from llama_stack.core.access_control.access_control import AccessDeniedError, is_action_allowed
|
||||
from llama_stack.core.access_control.datatypes import Action
|
||||
|
|
|
|||
|
|
@ -7,8 +7,8 @@
|
|||
import uuid
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api.apis.common.errors import DatasetNotFoundError
|
||||
from llama_stack_api.apis.datasets import (
|
||||
from llama_stack_api.common.errors import DatasetNotFoundError
|
||||
from llama_stack_api.datasets import (
|
||||
Dataset,
|
||||
DatasetPurpose,
|
||||
Datasets,
|
||||
|
|
@ -18,7 +18,7 @@ from llama_stack_api.apis.datasets import (
|
|||
RowsDataSource,
|
||||
URIDataSource,
|
||||
)
|
||||
from llama_stack_api.apis.resource import ResourceType
|
||||
from llama_stack_api.resource import ResourceType
|
||||
|
||||
from llama_stack.core.datatypes import (
|
||||
DatasetWithOwner,
|
||||
|
|
|
|||
|
|
@ -7,8 +7,8 @@
|
|||
import time
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api.apis.common.errors import ModelNotFoundError
|
||||
from llama_stack_api.apis.models import (
|
||||
from llama_stack_api.common.errors import ModelNotFoundError
|
||||
from llama_stack_api.models import (
|
||||
ListModelsResponse,
|
||||
Model,
|
||||
Models,
|
||||
|
|
|
|||
|
|
@ -4,9 +4,9 @@
|
|||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from llama_stack_api.apis.common.type_system import ParamType
|
||||
from llama_stack_api.apis.resource import ResourceType
|
||||
from llama_stack_api.apis.scoring_functions import (
|
||||
from llama_stack_api.common.type_system import ParamType
|
||||
from llama_stack_api.resource import ResourceType
|
||||
from llama_stack_api.scoring_functions import (
|
||||
ListScoringFunctionsResponse,
|
||||
ScoringFn,
|
||||
ScoringFnParams,
|
||||
|
|
|
|||
|
|
@ -6,8 +6,8 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api.apis.resource import ResourceType
|
||||
from llama_stack_api.apis.shields import ListShieldsResponse, Shield, Shields
|
||||
from llama_stack_api.resource import ResourceType
|
||||
from llama_stack_api.shields import ListShieldsResponse, Shield, Shields
|
||||
|
||||
from llama_stack.core.datatypes import (
|
||||
ShieldWithOwner,
|
||||
|
|
|
|||
|
|
@ -6,9 +6,9 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api.apis.common.content_types import URL
|
||||
from llama_stack_api.apis.common.errors import ToolGroupNotFoundError
|
||||
from llama_stack_api.apis.tools import ListToolDefsResponse, ListToolGroupsResponse, ToolDef, ToolGroup, ToolGroups
|
||||
from llama_stack_api.common.content_types import URL
|
||||
from llama_stack_api.common.errors import ToolGroupNotFoundError
|
||||
from llama_stack_api.tools import ListToolDefsResponse, ListToolGroupsResponse, ToolDef, ToolGroup, ToolGroups
|
||||
|
||||
from llama_stack.core.datatypes import AuthenticationRequiredError, ToolGroupWithOwner
|
||||
from llama_stack.log import get_logger
|
||||
|
|
|
|||
|
|
@ -6,12 +6,12 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from llama_stack_api.apis.common.errors import ModelNotFoundError, ModelTypeError
|
||||
from llama_stack_api.apis.models import ModelType
|
||||
from llama_stack_api.apis.resource import ResourceType
|
||||
from llama_stack_api.common.errors import ModelNotFoundError, ModelTypeError
|
||||
from llama_stack_api.models import ModelType
|
||||
from llama_stack_api.resource import ResourceType
|
||||
|
||||
# Removed VectorStores import to avoid exposing public API
|
||||
from llama_stack_api.apis.vector_io.vector_io import (
|
||||
from llama_stack_api.vector_io import (
|
||||
SearchRankingOptions,
|
||||
VectorStoreChunkingStrategy,
|
||||
VectorStoreDeleteResponse,
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ from urllib.parse import parse_qs, urljoin, urlparse
|
|||
|
||||
import httpx
|
||||
import jwt
|
||||
from llama_stack_api.apis.common.errors import TokenValidationError
|
||||
from llama_stack_api.common.errors import TokenValidationError
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from llama_stack.core.datatypes import (
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ from collections.abc import Callable
|
|||
from typing import Any
|
||||
|
||||
from aiohttp import hdrs
|
||||
from llama_stack_api.apis.datatypes import Api, ExternalApiSpec
|
||||
from llama_stack_api.datatypes import Api, ExternalApiSpec
|
||||
from llama_stack_api.schema_utils import WebMethod
|
||||
from starlette.routing import Route
|
||||
|
||||
|
|
|
|||
|
|
@ -28,9 +28,9 @@ from fastapi import Path as FastapiPath
|
|||
from fastapi.exceptions import RequestValidationError
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from fastapi.responses import JSONResponse, StreamingResponse
|
||||
from llama_stack_api.apis.common.errors import ConflictError, ResourceNotFoundError
|
||||
from llama_stack_api.apis.common.responses import PaginatedResponse
|
||||
from llama_stack_api.providers.datatypes import Api
|
||||
from llama_stack_api.common.errors import ConflictError, ResourceNotFoundError
|
||||
from llama_stack_api.common.responses import PaginatedResponse
|
||||
from llama_stack_api.datatypes import Api
|
||||
from openai import BadRequestError
|
||||
from pydantic import BaseModel, ValidationError
|
||||
|
||||
|
|
|
|||
|
|
@ -12,27 +12,27 @@ import tempfile
|
|||
from typing import Any
|
||||
|
||||
import yaml
|
||||
from llama_stack_api.apis.agents import Agents
|
||||
from llama_stack_api.apis.batches import Batches
|
||||
from llama_stack_api.apis.benchmarks import Benchmarks
|
||||
from llama_stack_api.apis.conversations import Conversations
|
||||
from llama_stack_api.apis.datasetio import DatasetIO
|
||||
from llama_stack_api.apis.datasets import Datasets
|
||||
from llama_stack_api.apis.eval import Eval
|
||||
from llama_stack_api.apis.files import Files
|
||||
from llama_stack_api.apis.inference import Inference
|
||||
from llama_stack_api.apis.inspect import Inspect
|
||||
from llama_stack_api.apis.models import Models
|
||||
from llama_stack_api.apis.post_training import PostTraining
|
||||
from llama_stack_api.apis.prompts import Prompts
|
||||
from llama_stack_api.apis.providers import Providers
|
||||
from llama_stack_api.apis.safety import Safety
|
||||
from llama_stack_api.apis.scoring import Scoring
|
||||
from llama_stack_api.apis.scoring_functions import ScoringFunctions
|
||||
from llama_stack_api.apis.shields import Shields
|
||||
from llama_stack_api.apis.tools import ToolGroups, ToolRuntime
|
||||
from llama_stack_api.apis.vector_io import VectorIO
|
||||
from llama_stack_api.providers.datatypes import Api
|
||||
from llama_stack_api.agents import Agents
|
||||
from llama_stack_api.batches import Batches
|
||||
from llama_stack_api.benchmarks import Benchmarks
|
||||
from llama_stack_api.conversations import Conversations
|
||||
from llama_stack_api.datasetio import DatasetIO
|
||||
from llama_stack_api.datasets import Datasets
|
||||
from llama_stack_api.datatypes import Api
|
||||
from llama_stack_api.eval import Eval
|
||||
from llama_stack_api.files import Files
|
||||
from llama_stack_api.inference import Inference
|
||||
from llama_stack_api.inspect import Inspect
|
||||
from llama_stack_api.models import Models
|
||||
from llama_stack_api.post_training import PostTraining
|
||||
from llama_stack_api.prompts import Prompts
|
||||
from llama_stack_api.providers import Providers
|
||||
from llama_stack_api.safety import Safety
|
||||
from llama_stack_api.scoring import Scoring
|
||||
from llama_stack_api.scoring_functions import ScoringFunctions
|
||||
from llama_stack_api.shields import Shields
|
||||
from llama_stack_api.tools import ToolGroups, ToolRuntime
|
||||
from llama_stack_api.vector_io import VectorIO
|
||||
|
||||
from llama_stack.core.conversations.conversations import ConversationServiceConfig, ConversationServiceImpl
|
||||
from llama_stack.core.datatypes import Provider, SafetyConfig, StackRunConfig, VectorStoresConfig
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue