feat: refactor llama-stack-api structure

move llama_stack_api.apis... to top level llama_stack_api.

merge provider datatypes and the existing apis.datatypes into a common llama_stack_api.datatypes

update all usages of these packages throughout LLS

Signed-off-by: Charlie Doern <cdoern@redhat.com>
This commit is contained in:
Charlie Doern 2025-11-12 15:59:34 -05:00
parent d6b915ce0a
commit b7480e9c88
296 changed files with 906 additions and 1109 deletions

View file

@ -8,8 +8,8 @@ import os
from unittest.mock import patch
import pytest
from llama_stack_api.apis.datasets import Dataset, DatasetPurpose, URIDataSource
from llama_stack_api.apis.resource import ResourceType
from llama_stack_api.datasets import Dataset, DatasetPurpose, URIDataSource
from llama_stack_api.resource import ResourceType
from llama_stack.providers.remote.datasetio.nvidia.config import NvidiaDatasetIOConfig
from llama_stack.providers.remote.datasetio.nvidia.datasetio import NvidiaDatasetIOAdapter

View file

@ -8,11 +8,11 @@ import os
from unittest.mock import MagicMock, patch
import pytest
from llama_stack_api.apis.benchmarks import Benchmark
from llama_stack_api.apis.common.job_types import Job, JobStatus
from llama_stack_api.apis.eval.eval import BenchmarkConfig, EvaluateResponse, ModelCandidate, SamplingParams
from llama_stack_api.apis.inference.inference import TopPSamplingStrategy
from llama_stack_api.apis.resource import ResourceType
from llama_stack_api.benchmarks import Benchmark
from llama_stack_api.common.job_types import Job, JobStatus
from llama_stack_api.eval import BenchmarkConfig, EvaluateResponse, ModelCandidate, SamplingParams
from llama_stack_api.inference import TopPSamplingStrategy
from llama_stack_api.resource import ResourceType
from llama_stack.models.llama.sku_types import CoreModelId
from llama_stack.providers.remote.eval.nvidia.config import NVIDIAEvalConfig

View file

@ -9,7 +9,7 @@ import warnings
from unittest.mock import patch
import pytest
from llama_stack_api.apis.post_training.post_training import (
from llama_stack_api.post_training import (
DataConfig,
DatasetFormat,
EfficiencyConfig,

View file

@ -8,7 +8,7 @@ from unittest.mock import AsyncMock, MagicMock, patch
import aiohttp
import pytest
from llama_stack_api.apis.models import ModelType
from llama_stack_api.models import ModelType
from llama_stack.providers.remote.inference.nvidia.config import NVIDIAConfig
from llama_stack.providers.remote.inference.nvidia.nvidia import NVIDIAInferenceAdapter

View file

@ -9,13 +9,13 @@ from typing import Any
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from llama_stack_api.apis.inference import (
from llama_stack_api.inference import (
OpenAIAssistantMessageParam,
OpenAIUserMessageParam,
)
from llama_stack_api.apis.resource import ResourceType
from llama_stack_api.apis.safety import RunShieldResponse, ViolationLevel
from llama_stack_api.apis.shields import Shield
from llama_stack_api.resource import ResourceType
from llama_stack_api.safety import RunShieldResponse, ViolationLevel
from llama_stack_api.shields import Shield
from llama_stack.providers.remote.safety.nvidia.config import NVIDIASafetyConfig
from llama_stack.providers.remote.safety.nvidia.nvidia import NVIDIASafetyAdapter

View file

@ -9,7 +9,7 @@ import warnings
from unittest.mock import patch
import pytest
from llama_stack_api.apis.post_training.post_training import (
from llama_stack_api.post_training import (
DataConfig,
DatasetFormat,
LoraFinetuningConfig,