refactor: enforce top-level imports for llama-stack-api

Enforce that all imports from llama-stack-api use the form:

from llama_stack_api import <symbol>

 This prevents external code from accessing internal package structure
 (e.g., llama_stack_api.agents, llama_stack_api.common.*) and establishes
 a clear public API boundary.

 Changes:
 - Export 400+ symbols from llama_stack_api/__init__.py
 - Include all API types, common utilities, and strong_typing helpers
 - Update files across src/llama_stack, docs/, tests/, scripts/
 - Convert all submodule imports to top-level imports
 - ensure docs use the proper importing structure

 Addresses PR review feedback requiring explicit __all__ definition to
 prevent "peeking inside" the API package.

Signed-off-by: Charlie Doern <cdoern@redhat.com>
This commit is contained in:
Charlie Doern 2025-11-13 14:03:30 -05:00
parent b7480e9c88
commit 2e5d1c8881
270 changed files with 1587 additions and 750 deletions

View file

@ -8,8 +8,7 @@ import os
from unittest.mock import patch
import pytest
from llama_stack_api.datasets import Dataset, DatasetPurpose, URIDataSource
from llama_stack_api.resource import ResourceType
from llama_stack_api import Dataset, DatasetPurpose, ResourceType, URIDataSource
from llama_stack.providers.remote.datasetio.nvidia.config import NvidiaDatasetIOConfig
from llama_stack.providers.remote.datasetio.nvidia.datasetio import NvidiaDatasetIOAdapter

View file

@ -8,11 +8,17 @@ import os
from unittest.mock import MagicMock, patch
import pytest
from llama_stack_api.benchmarks import Benchmark
from llama_stack_api.common.job_types import Job, JobStatus
from llama_stack_api.eval import BenchmarkConfig, EvaluateResponse, ModelCandidate, SamplingParams
from llama_stack_api.inference import TopPSamplingStrategy
from llama_stack_api.resource import ResourceType
from llama_stack_api import (
Benchmark,
BenchmarkConfig,
EvaluateResponse,
Job,
JobStatus,
ModelCandidate,
ResourceType,
SamplingParams,
TopPSamplingStrategy,
)
from llama_stack.models.llama.sku_types import CoreModelId
from llama_stack.providers.remote.eval.nvidia.config import NVIDIAEvalConfig

View file

@ -9,7 +9,7 @@ import warnings
from unittest.mock import patch
import pytest
from llama_stack_api.post_training import (
from llama_stack_api import (
DataConfig,
DatasetFormat,
EfficiencyConfig,

View file

@ -8,7 +8,7 @@ from unittest.mock import AsyncMock, MagicMock, patch
import aiohttp
import pytest
from llama_stack_api.models import ModelType
from llama_stack_api import ModelType
from llama_stack.providers.remote.inference.nvidia.config import NVIDIAConfig
from llama_stack.providers.remote.inference.nvidia.nvidia import NVIDIAInferenceAdapter

View file

@ -9,13 +9,14 @@ from typing import Any
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from llama_stack_api.inference import (
from llama_stack_api import (
OpenAIAssistantMessageParam,
OpenAIUserMessageParam,
ResourceType,
RunShieldResponse,
Shield,
ViolationLevel,
)
from llama_stack_api.resource import ResourceType
from llama_stack_api.safety import RunShieldResponse, ViolationLevel
from llama_stack_api.shields import Shield
from llama_stack.providers.remote.safety.nvidia.config import NVIDIASafetyConfig
from llama_stack.providers.remote.safety.nvidia.nvidia import NVIDIASafetyAdapter

View file

@ -9,7 +9,7 @@ import warnings
from unittest.mock import patch
import pytest
from llama_stack_api.post_training import (
from llama_stack_api import (
DataConfig,
DatasetFormat,
LoraFinetuningConfig,