mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-04 04:04:14 +00:00
8 lines
363 B
YAML
8 lines
363 B
YAML
distribution_type: local-plus-tgi-inference
|
|
description: Use TGI (local or with Hugging Face Inference Endpoints for running LLM inference. When using HF Inference Endpoints, you must provide the name of the endpoint).
|
|
docker_image: null
|
|
providers:
|
|
inference: remote::tgi
|
|
safety: meta-reference
|
|
agentic_system: meta-reference
|
|
memory: meta-reference-faiss
|