mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-03 09:53:45 +00:00
fix: move models.py to top-level init
All batch models are now exported from the top level for better discoverability and IDE support. Signed-off-by: Sébastien Han <seb@redhat.com>
This commit is contained in:
parent
6f552e0a31
commit
ac816a6b25
1 changed files with 13 additions and 1 deletions
|
|
@ -26,7 +26,15 @@ from . import common # noqa: F401
|
|||
|
||||
# Import all public API symbols
|
||||
from .agents import Agents, ResponseGuardrail, ResponseGuardrailSpec
|
||||
from .batches import Batches, BatchObject, ListBatchesResponse
|
||||
from .batches import (
|
||||
Batches,
|
||||
BatchObject,
|
||||
CancelBatchRequest,
|
||||
CreateBatchRequest,
|
||||
ListBatchesRequest,
|
||||
ListBatchesResponse,
|
||||
RetrieveBatchRequest,
|
||||
)
|
||||
from .benchmarks import (
|
||||
Benchmark,
|
||||
BenchmarkInput,
|
||||
|
|
@ -462,6 +470,9 @@ __all__ = [
|
|||
"BasicScoringFnParams",
|
||||
"Batches",
|
||||
"BatchObject",
|
||||
"CancelBatchRequest",
|
||||
"CreateBatchRequest",
|
||||
"ListBatchesRequest",
|
||||
"Benchmark",
|
||||
"BenchmarkConfig",
|
||||
"BenchmarkInput",
|
||||
|
|
@ -555,6 +566,7 @@ __all__ = [
|
|||
"LLMAsJudgeScoringFnParams",
|
||||
"LLMRAGQueryGeneratorConfig",
|
||||
"ListBatchesResponse",
|
||||
"RetrieveBatchRequest",
|
||||
"ListBenchmarksResponse",
|
||||
"ListDatasetsResponse",
|
||||
"ListModelsResponse",
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue