forked from phoenix-oss/llama-stack-mirror
# What does this PR do? We are dropping configuration via CLI flag almost entirely. If any server configuration has to be tweak it must be done through the server section in the run.yaml. This is unfortunately a breaking change for whover was using: * `--tls-*` * `--disable_ipv6` `--port` stays around and get a special treatment since we believe, it's common for user dev to change port for quick experimentations. Closes: https://github.com/meta-llama/llama-stack/issues/1076 ## Test Plan Simply do `llama stack run <config>` nothing should break :) Signed-off-by: Sébastien Han <seb@redhat.com>
222 lines
6.2 KiB
YAML
222 lines
6.2 KiB
YAML
version: '2'
|
|
image_name: nvidia
|
|
apis:
|
|
- agents
|
|
- datasetio
|
|
- eval
|
|
- inference
|
|
- post_training
|
|
- safety
|
|
- scoring
|
|
- telemetry
|
|
- tool_runtime
|
|
- vector_io
|
|
providers:
|
|
inference:
|
|
- provider_id: nvidia
|
|
provider_type: remote::nvidia
|
|
config:
|
|
url: ${env.NVIDIA_BASE_URL:https://integrate.api.nvidia.com}
|
|
api_key: ${env.NVIDIA_API_KEY:}
|
|
append_api_version: ${env.NVIDIA_APPEND_API_VERSION:True}
|
|
vector_io:
|
|
- provider_id: faiss
|
|
provider_type: inline::faiss
|
|
config:
|
|
kvstore:
|
|
type: sqlite
|
|
namespace: null
|
|
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/nvidia}/faiss_store.db
|
|
safety:
|
|
- provider_id: nvidia
|
|
provider_type: remote::nvidia
|
|
config:
|
|
guardrails_service_url: ${env.GUARDRAILS_SERVICE_URL:http://localhost:7331}
|
|
config_id: self-check
|
|
agents:
|
|
- provider_id: meta-reference
|
|
provider_type: inline::meta-reference
|
|
config:
|
|
persistence_store:
|
|
type: sqlite
|
|
namespace: null
|
|
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/nvidia}/agents_store.db
|
|
telemetry:
|
|
- provider_id: meta-reference
|
|
provider_type: inline::meta-reference
|
|
config:
|
|
service_name: ${env.OTEL_SERVICE_NAME:}
|
|
sinks: ${env.TELEMETRY_SINKS:console,sqlite}
|
|
sqlite_db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/nvidia}/trace_store.db
|
|
eval:
|
|
- provider_id: nvidia
|
|
provider_type: remote::nvidia
|
|
config:
|
|
evaluator_url: ${env.NVIDIA_EVALUATOR_URL:http://localhost:7331}
|
|
post_training:
|
|
- provider_id: nvidia
|
|
provider_type: remote::nvidia
|
|
config:
|
|
api_key: ${env.NVIDIA_API_KEY:}
|
|
dataset_namespace: ${env.NVIDIA_DATASET_NAMESPACE:default}
|
|
project_id: ${env.NVIDIA_PROJECT_ID:test-project}
|
|
customizer_url: ${env.NVIDIA_CUSTOMIZER_URL:http://nemo.test}
|
|
datasetio:
|
|
- provider_id: nvidia
|
|
provider_type: remote::nvidia
|
|
config:
|
|
api_key: ${env.NVIDIA_API_KEY:}
|
|
dataset_namespace: ${env.NVIDIA_DATASET_NAMESPACE:default}
|
|
project_id: ${env.NVIDIA_PROJECT_ID:test-project}
|
|
datasets_url: ${env.NVIDIA_DATASETS_URL:http://nemo.test}
|
|
scoring:
|
|
- provider_id: basic
|
|
provider_type: inline::basic
|
|
config: {}
|
|
tool_runtime:
|
|
- provider_id: rag-runtime
|
|
provider_type: inline::rag-runtime
|
|
config: {}
|
|
metadata_store:
|
|
type: sqlite
|
|
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/nvidia}/registry.db
|
|
models:
|
|
- metadata: {}
|
|
model_id: meta/llama3-8b-instruct
|
|
provider_id: nvidia
|
|
provider_model_id: meta/llama3-8b-instruct
|
|
model_type: llm
|
|
- metadata: {}
|
|
model_id: meta-llama/Llama-3-8B-Instruct
|
|
provider_id: nvidia
|
|
provider_model_id: meta/llama3-8b-instruct
|
|
model_type: llm
|
|
- metadata: {}
|
|
model_id: meta/llama3-70b-instruct
|
|
provider_id: nvidia
|
|
provider_model_id: meta/llama3-70b-instruct
|
|
model_type: llm
|
|
- metadata: {}
|
|
model_id: meta-llama/Llama-3-70B-Instruct
|
|
provider_id: nvidia
|
|
provider_model_id: meta/llama3-70b-instruct
|
|
model_type: llm
|
|
- metadata: {}
|
|
model_id: meta/llama-3.1-8b-instruct
|
|
provider_id: nvidia
|
|
provider_model_id: meta/llama-3.1-8b-instruct
|
|
model_type: llm
|
|
- metadata: {}
|
|
model_id: meta-llama/Llama-3.1-8B-Instruct
|
|
provider_id: nvidia
|
|
provider_model_id: meta/llama-3.1-8b-instruct
|
|
model_type: llm
|
|
- metadata: {}
|
|
model_id: meta/llama-3.1-70b-instruct
|
|
provider_id: nvidia
|
|
provider_model_id: meta/llama-3.1-70b-instruct
|
|
model_type: llm
|
|
- metadata: {}
|
|
model_id: meta-llama/Llama-3.1-70B-Instruct
|
|
provider_id: nvidia
|
|
provider_model_id: meta/llama-3.1-70b-instruct
|
|
model_type: llm
|
|
- metadata: {}
|
|
model_id: meta/llama-3.1-405b-instruct
|
|
provider_id: nvidia
|
|
provider_model_id: meta/llama-3.1-405b-instruct
|
|
model_type: llm
|
|
- metadata: {}
|
|
model_id: meta-llama/Llama-3.1-405B-Instruct-FP8
|
|
provider_id: nvidia
|
|
provider_model_id: meta/llama-3.1-405b-instruct
|
|
model_type: llm
|
|
- metadata: {}
|
|
model_id: meta/llama-3.2-1b-instruct
|
|
provider_id: nvidia
|
|
provider_model_id: meta/llama-3.2-1b-instruct
|
|
model_type: llm
|
|
- metadata: {}
|
|
model_id: meta-llama/Llama-3.2-1B-Instruct
|
|
provider_id: nvidia
|
|
provider_model_id: meta/llama-3.2-1b-instruct
|
|
model_type: llm
|
|
- metadata: {}
|
|
model_id: meta/llama-3.2-3b-instruct
|
|
provider_id: nvidia
|
|
provider_model_id: meta/llama-3.2-3b-instruct
|
|
model_type: llm
|
|
- metadata: {}
|
|
model_id: meta-llama/Llama-3.2-3B-Instruct
|
|
provider_id: nvidia
|
|
provider_model_id: meta/llama-3.2-3b-instruct
|
|
model_type: llm
|
|
- metadata: {}
|
|
model_id: meta/llama-3.2-11b-vision-instruct
|
|
provider_id: nvidia
|
|
provider_model_id: meta/llama-3.2-11b-vision-instruct
|
|
model_type: llm
|
|
- metadata: {}
|
|
model_id: meta-llama/Llama-3.2-11B-Vision-Instruct
|
|
provider_id: nvidia
|
|
provider_model_id: meta/llama-3.2-11b-vision-instruct
|
|
model_type: llm
|
|
- metadata: {}
|
|
model_id: meta/llama-3.2-90b-vision-instruct
|
|
provider_id: nvidia
|
|
provider_model_id: meta/llama-3.2-90b-vision-instruct
|
|
model_type: llm
|
|
- metadata: {}
|
|
model_id: meta-llama/Llama-3.2-90B-Vision-Instruct
|
|
provider_id: nvidia
|
|
provider_model_id: meta/llama-3.2-90b-vision-instruct
|
|
model_type: llm
|
|
- metadata: {}
|
|
model_id: meta/llama-3.3-70b-instruct
|
|
provider_id: nvidia
|
|
provider_model_id: meta/llama-3.3-70b-instruct
|
|
model_type: llm
|
|
- metadata: {}
|
|
model_id: meta-llama/Llama-3.3-70B-Instruct
|
|
provider_id: nvidia
|
|
provider_model_id: meta/llama-3.3-70b-instruct
|
|
model_type: llm
|
|
- metadata:
|
|
embedding_dimension: 2048
|
|
context_length: 8192
|
|
model_id: nvidia/llama-3.2-nv-embedqa-1b-v2
|
|
provider_id: nvidia
|
|
provider_model_id: nvidia/llama-3.2-nv-embedqa-1b-v2
|
|
model_type: embedding
|
|
- metadata:
|
|
embedding_dimension: 1024
|
|
context_length: 512
|
|
model_id: nvidia/nv-embedqa-e5-v5
|
|
provider_id: nvidia
|
|
provider_model_id: nvidia/nv-embedqa-e5-v5
|
|
model_type: embedding
|
|
- metadata:
|
|
embedding_dimension: 4096
|
|
context_length: 512
|
|
model_id: nvidia/nv-embedqa-mistral-7b-v2
|
|
provider_id: nvidia
|
|
provider_model_id: nvidia/nv-embedqa-mistral-7b-v2
|
|
model_type: embedding
|
|
- metadata:
|
|
embedding_dimension: 1024
|
|
context_length: 512
|
|
model_id: snowflake/arctic-embed-l
|
|
provider_id: nvidia
|
|
provider_model_id: snowflake/arctic-embed-l
|
|
model_type: embedding
|
|
shields: []
|
|
vector_dbs: []
|
|
datasets: []
|
|
scoring_fns: []
|
|
benchmarks: []
|
|
tool_groups:
|
|
- toolgroup_id: builtin::rag
|
|
provider_id: rag-runtime
|
|
server:
|
|
port: 8321
|
|
disable_ipv6: false
|