llama-stack-mirror/tests/examples/inference-run.yaml
2024-10-23 15:22:03 -07:00

14 lines
255 B
YAML

version: '2'
built_at: '2024-10-08T17:40:45.325529'
image_name: local
docker_image: null
conda_env: local
apis:
- models
- inference
providers:
inference:
- provider_id: tgi0
provider_type: remote::tgi
config:
url: http://127.0.0.1:5009