mirror of
https://github.com/meta-llama/llama-stack.git
synced 2026-01-02 17:34:31 +00:00
add datastore initial code
This commit is contained in:
parent
d667a7109f
commit
0a2af0e2f8
9 changed files with 293 additions and 9 deletions
|
|
@ -7,6 +7,7 @@
|
|||
from pathlib import Path
|
||||
|
||||
from llama_stack.distribution.datatypes import ModelInput, Provider, ShieldInput, ToolGroupInput
|
||||
from llama_stack.providers.remote.datasets.nvidia import NvidiaDatasetConfig
|
||||
from llama_stack.providers.remote.inference.nvidia import NVIDIAConfig
|
||||
from llama_stack.providers.remote.inference.nvidia.models import MODEL_ENTRIES
|
||||
from llama_stack.providers.remote.post_training.nvidia import NvidiaPostTrainingConfig
|
||||
|
|
@ -24,6 +25,7 @@ def get_distribution_template() -> DistributionTemplate:
|
|||
"telemetry": ["inline::meta-reference"],
|
||||
"eval": ["inline::meta-reference"],
|
||||
"datasetio": ["inline::localfs"],
|
||||
"datasets": ["remote::nvidia"],
|
||||
"scoring": ["inline::basic"],
|
||||
"tool_runtime": ["inline::rag-runtime"],
|
||||
}
|
||||
|
|
@ -39,6 +41,12 @@ def get_distribution_template() -> DistributionTemplate:
|
|||
provider_type="remote::nvidia",
|
||||
config=NvidiaPostTrainingConfig.sample_run_config(),
|
||||
)
|
||||
|
||||
datasets_provider = Provider(
|
||||
provider_id="nvidia",
|
||||
provider_type="remote::nvidia",
|
||||
config=NvidiaDatasetConfig.sample_run_config(),
|
||||
)
|
||||
safety_provider = Provider(
|
||||
provider_id="nvidia",
|
||||
provider_type="remote::nvidia",
|
||||
|
|
@ -76,6 +84,8 @@ def get_distribution_template() -> DistributionTemplate:
|
|||
"run.yaml": RunConfigSettings(
|
||||
provider_overrides={
|
||||
"inference": [inference_provider],
|
||||
"post_training": [post_training_provider],
|
||||
"datasets": [datasets_provider],
|
||||
},
|
||||
default_models=default_models,
|
||||
default_tool_groups=default_tool_groups,
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue