mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-27 18:50:41 +00:00
# What does this PR do? * Provide sqlite implementation of the APIs introduced in https://github.com/meta-llama/llama-stack/pull/2145. * Introduced a SqlStore API: llama_stack/providers/utils/sqlstore/api.py and the first Sqlite implementation * Pagination support will be added in a future PR. ## Test Plan Unit test on sql store: <img width="1005" alt="image" src="https://github.com/user-attachments/assets/9b8b7ec8-632b-4667-8127-5583426b2e29" /> Integration test: ``` INFERENCE_MODEL="llama3.2:3b-instruct-fp16" llama stack build --template ollama --image-type conda --run ``` ``` LLAMA_STACK_CONFIG=http://localhost:5001 INFERENCE_MODEL="llama3.2:3b-instruct-fp16" python -m pytest -v tests/integration/inference/test_openai_completion.py --text-model "llama3.2:3b-instruct-fp16" -k 'inference_store and openai' ```
224 lines
6.3 KiB
YAML
224 lines
6.3 KiB
YAML
version: '2'
|
|
image_name: nvidia
|
|
apis:
|
|
- agents
|
|
- datasetio
|
|
- eval
|
|
- inference
|
|
- post_training
|
|
- safety
|
|
- scoring
|
|
- telemetry
|
|
- tool_runtime
|
|
- vector_io
|
|
providers:
|
|
inference:
|
|
- provider_id: nvidia
|
|
provider_type: remote::nvidia
|
|
config:
|
|
url: ${env.NVIDIA_BASE_URL:https://integrate.api.nvidia.com}
|
|
api_key: ${env.NVIDIA_API_KEY:}
|
|
append_api_version: ${env.NVIDIA_APPEND_API_VERSION:True}
|
|
vector_io:
|
|
- provider_id: faiss
|
|
provider_type: inline::faiss
|
|
config:
|
|
kvstore:
|
|
type: sqlite
|
|
namespace: null
|
|
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/nvidia}/faiss_store.db
|
|
safety:
|
|
- provider_id: nvidia
|
|
provider_type: remote::nvidia
|
|
config:
|
|
guardrails_service_url: ${env.GUARDRAILS_SERVICE_URL:http://localhost:7331}
|
|
config_id: self-check
|
|
agents:
|
|
- provider_id: meta-reference
|
|
provider_type: inline::meta-reference
|
|
config:
|
|
persistence_store:
|
|
type: sqlite
|
|
namespace: null
|
|
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/nvidia}/agents_store.db
|
|
telemetry:
|
|
- provider_id: meta-reference
|
|
provider_type: inline::meta-reference
|
|
config:
|
|
service_name: ${env.OTEL_SERVICE_NAME:}
|
|
sinks: ${env.TELEMETRY_SINKS:console,sqlite}
|
|
sqlite_db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/nvidia}/trace_store.db
|
|
eval:
|
|
- provider_id: nvidia
|
|
provider_type: remote::nvidia
|
|
config:
|
|
evaluator_url: ${env.NVIDIA_EVALUATOR_URL:http://localhost:7331}
|
|
post_training:
|
|
- provider_id: nvidia
|
|
provider_type: remote::nvidia
|
|
config:
|
|
api_key: ${env.NVIDIA_API_KEY:}
|
|
dataset_namespace: ${env.NVIDIA_DATASET_NAMESPACE:default}
|
|
project_id: ${env.NVIDIA_PROJECT_ID:test-project}
|
|
customizer_url: ${env.NVIDIA_CUSTOMIZER_URL:http://nemo.test}
|
|
datasetio:
|
|
- provider_id: nvidia
|
|
provider_type: remote::nvidia
|
|
config:
|
|
api_key: ${env.NVIDIA_API_KEY:}
|
|
dataset_namespace: ${env.NVIDIA_DATASET_NAMESPACE:default}
|
|
project_id: ${env.NVIDIA_PROJECT_ID:test-project}
|
|
datasets_url: ${env.NVIDIA_DATASETS_URL:http://nemo.test}
|
|
scoring:
|
|
- provider_id: basic
|
|
provider_type: inline::basic
|
|
config: {}
|
|
tool_runtime:
|
|
- provider_id: rag-runtime
|
|
provider_type: inline::rag-runtime
|
|
config: {}
|
|
metadata_store:
|
|
type: sqlite
|
|
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/nvidia}/registry.db
|
|
inference_store:
|
|
type: sqlite
|
|
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/nvidia}/inference_store.db
|
|
models:
|
|
- metadata: {}
|
|
model_id: meta/llama3-8b-instruct
|
|
provider_id: nvidia
|
|
provider_model_id: meta/llama3-8b-instruct
|
|
model_type: llm
|
|
- metadata: {}
|
|
model_id: meta-llama/Llama-3-8B-Instruct
|
|
provider_id: nvidia
|
|
provider_model_id: meta/llama3-8b-instruct
|
|
model_type: llm
|
|
- metadata: {}
|
|
model_id: meta/llama3-70b-instruct
|
|
provider_id: nvidia
|
|
provider_model_id: meta/llama3-70b-instruct
|
|
model_type: llm
|
|
- metadata: {}
|
|
model_id: meta-llama/Llama-3-70B-Instruct
|
|
provider_id: nvidia
|
|
provider_model_id: meta/llama3-70b-instruct
|
|
model_type: llm
|
|
- metadata: {}
|
|
model_id: meta/llama-3.1-8b-instruct
|
|
provider_id: nvidia
|
|
provider_model_id: meta/llama-3.1-8b-instruct
|
|
model_type: llm
|
|
- metadata: {}
|
|
model_id: meta-llama/Llama-3.1-8B-Instruct
|
|
provider_id: nvidia
|
|
provider_model_id: meta/llama-3.1-8b-instruct
|
|
model_type: llm
|
|
- metadata: {}
|
|
model_id: meta/llama-3.1-70b-instruct
|
|
provider_id: nvidia
|
|
provider_model_id: meta/llama-3.1-70b-instruct
|
|
model_type: llm
|
|
- metadata: {}
|
|
model_id: meta-llama/Llama-3.1-70B-Instruct
|
|
provider_id: nvidia
|
|
provider_model_id: meta/llama-3.1-70b-instruct
|
|
model_type: llm
|
|
- metadata: {}
|
|
model_id: meta/llama-3.1-405b-instruct
|
|
provider_id: nvidia
|
|
provider_model_id: meta/llama-3.1-405b-instruct
|
|
model_type: llm
|
|
- metadata: {}
|
|
model_id: meta-llama/Llama-3.1-405B-Instruct-FP8
|
|
provider_id: nvidia
|
|
provider_model_id: meta/llama-3.1-405b-instruct
|
|
model_type: llm
|
|
- metadata: {}
|
|
model_id: meta/llama-3.2-1b-instruct
|
|
provider_id: nvidia
|
|
provider_model_id: meta/llama-3.2-1b-instruct
|
|
model_type: llm
|
|
- metadata: {}
|
|
model_id: meta-llama/Llama-3.2-1B-Instruct
|
|
provider_id: nvidia
|
|
provider_model_id: meta/llama-3.2-1b-instruct
|
|
model_type: llm
|
|
- metadata: {}
|
|
model_id: meta/llama-3.2-3b-instruct
|
|
provider_id: nvidia
|
|
provider_model_id: meta/llama-3.2-3b-instruct
|
|
model_type: llm
|
|
- metadata: {}
|
|
model_id: meta-llama/Llama-3.2-3B-Instruct
|
|
provider_id: nvidia
|
|
provider_model_id: meta/llama-3.2-3b-instruct
|
|
model_type: llm
|
|
- metadata: {}
|
|
model_id: meta/llama-3.2-11b-vision-instruct
|
|
provider_id: nvidia
|
|
provider_model_id: meta/llama-3.2-11b-vision-instruct
|
|
model_type: llm
|
|
- metadata: {}
|
|
model_id: meta-llama/Llama-3.2-11B-Vision-Instruct
|
|
provider_id: nvidia
|
|
provider_model_id: meta/llama-3.2-11b-vision-instruct
|
|
model_type: llm
|
|
- metadata: {}
|
|
model_id: meta/llama-3.2-90b-vision-instruct
|
|
provider_id: nvidia
|
|
provider_model_id: meta/llama-3.2-90b-vision-instruct
|
|
model_type: llm
|
|
- metadata: {}
|
|
model_id: meta-llama/Llama-3.2-90B-Vision-Instruct
|
|
provider_id: nvidia
|
|
provider_model_id: meta/llama-3.2-90b-vision-instruct
|
|
model_type: llm
|
|
- metadata: {}
|
|
model_id: meta/llama-3.3-70b-instruct
|
|
provider_id: nvidia
|
|
provider_model_id: meta/llama-3.3-70b-instruct
|
|
model_type: llm
|
|
- metadata: {}
|
|
model_id: meta-llama/Llama-3.3-70B-Instruct
|
|
provider_id: nvidia
|
|
provider_model_id: meta/llama-3.3-70b-instruct
|
|
model_type: llm
|
|
- metadata:
|
|
embedding_dimension: 2048
|
|
context_length: 8192
|
|
model_id: nvidia/llama-3.2-nv-embedqa-1b-v2
|
|
provider_id: nvidia
|
|
provider_model_id: nvidia/llama-3.2-nv-embedqa-1b-v2
|
|
model_type: embedding
|
|
- metadata:
|
|
embedding_dimension: 1024
|
|
context_length: 512
|
|
model_id: nvidia/nv-embedqa-e5-v5
|
|
provider_id: nvidia
|
|
provider_model_id: nvidia/nv-embedqa-e5-v5
|
|
model_type: embedding
|
|
- metadata:
|
|
embedding_dimension: 4096
|
|
context_length: 512
|
|
model_id: nvidia/nv-embedqa-mistral-7b-v2
|
|
provider_id: nvidia
|
|
provider_model_id: nvidia/nv-embedqa-mistral-7b-v2
|
|
model_type: embedding
|
|
- metadata:
|
|
embedding_dimension: 1024
|
|
context_length: 512
|
|
model_id: snowflake/arctic-embed-l
|
|
provider_id: nvidia
|
|
provider_model_id: snowflake/arctic-embed-l
|
|
model_type: embedding
|
|
shields: []
|
|
vector_dbs: []
|
|
datasets: []
|
|
scoring_fns: []
|
|
benchmarks: []
|
|
tool_groups:
|
|
- toolgroup_id: builtin::rag
|
|
provider_id: rag-runtime
|
|
server:
|
|
port: 8321
|