remove post-training provider from distribution

This commit is contained in:
Ubuntu 2025-03-20 09:48:53 +00:00
parent f534b4c2ea
commit 19f36aadaa
5 changed files with 0 additions and 30 deletions

View file

@ -9,7 +9,6 @@ The `llamastack/distribution-nvidia` distribution consists of the following prov
| datasetio | `inline::localfs` |
| eval | `inline::meta-reference` |
| inference | `remote::nvidia` |
| post_training | `remote::nvidia` |
| safety | `remote::nvidia` |
| scoring | `inline::basic` |
| telemetry | `inline::meta-reference` |

View file

@ -8,8 +8,6 @@ distribution_spec:
- inline::faiss
safety:
- remote::nvidia
post_training:
- remote::nvidia
agents:
- inline::meta-reference
telemetry:

View file

@ -9,7 +9,6 @@ from pathlib import Path
from llama_stack.distribution.datatypes import ModelInput, Provider, ShieldInput, ToolGroupInput
from llama_stack.providers.remote.inference.nvidia import NVIDIAConfig
from llama_stack.providers.remote.inference.nvidia.models import MODEL_ENTRIES
from llama_stack.providers.remote.post_training.nvidia import NvidiaPostTrainingConfig
from llama_stack.providers.remote.safety.nvidia import NVIDIASafetyConfig
from llama_stack.templates.template import DistributionTemplate, RunConfigSettings, get_model_registry
@ -19,7 +18,6 @@ def get_distribution_template() -> DistributionTemplate:
"inference": ["remote::nvidia"],
"vector_io": ["inline::faiss"],
"safety": ["remote::nvidia"],
"post_training": ["remote::nvidia"],
"agents": ["inline::meta-reference"],
"telemetry": ["inline::meta-reference"],
"eval": ["inline::meta-reference"],
@ -33,12 +31,6 @@ def get_distribution_template() -> DistributionTemplate:
provider_type="remote::nvidia",
config=NVIDIAConfig.sample_run_config(),
)
post_training_provider = Provider(
provider_id="nvidia",
provider_type="remote::nvidia",
config=NvidiaPostTrainingConfig.sample_run_config(),
)
safety_provider = Provider(
provider_id="nvidia",
provider_type="remote::nvidia",
@ -76,7 +68,6 @@ def get_distribution_template() -> DistributionTemplate:
"run.yaml": RunConfigSettings(
provider_overrides={
"inference": [inference_provider],
"post_training": [post_training_provider],
},
default_models=default_models,
default_tool_groups=default_tool_groups,

View file

@ -5,7 +5,6 @@ apis:
- datasetio
- eval
- inference
- post_training
- safety
- scoring
- telemetry
@ -37,14 +36,6 @@ providers:
config:
guardrails_service_url: ${env.GUARDRAILS_SERVICE_URL:http://localhost:7331}
config_id: self-check
post_training:
- provider_id: nvidia
provider_type: remote::nvidia
config:
api_key: ${env.NVIDIA_API_KEY:}
dataset_namespace: ${env.NVIDIA_DATASET_NAMESPACE:default}
project_id: ${env.NVIDIA_PROJECT_ID:test-project}
customizer_url: ${env.NVIDIA_CUSTOMIZER_URL:http://nemo.test}
agents:
- provider_id: meta-reference
provider_type: inline::meta-reference

View file

@ -5,7 +5,6 @@ apis:
- datasetio
- eval
- inference
- post_training
- safety
- scoring
- telemetry
@ -32,14 +31,6 @@ providers:
config:
guardrails_service_url: ${env.GUARDRAILS_SERVICE_URL:http://localhost:7331}
config_id: self-check
post_training:
- provider_id: nvidia
provider_type: remote::nvidia
config:
api_key: ${env.NVIDIA_API_KEY:}
dataset_namespace: ${env.NVIDIA_DATASET_NAMESPACE:default}
project_id: ${env.NVIDIA_PROJECT_ID:test-project}
customizer_url: ${env.NVIDIA_CUSTOMIZER_URL:http://nemo.test}
agents:
- provider_id: meta-reference
provider_type: inline::meta-reference