forked from phoenix-oss/llama-stack-mirror
CLI: add build templates support, move imports (#77)
* list templates implementation * relative path * finalize templates * remove imports * remove templates from name, name templates * fix docker * fix docker
This commit is contained in:
parent
6b21523c28
commit
1128f69674
9 changed files with 130 additions and 37 deletions
10
llama_stack/distribution/templates/local-build.yaml
Normal file
10
llama_stack/distribution/templates/local-build.yaml
Normal file
|
@ -0,0 +1,10 @@
|
|||
name: local
|
||||
distribution_spec:
|
||||
description: Use code from `llama_stack` itself to serve all llama stack APIs
|
||||
providers:
|
||||
inference: meta-reference
|
||||
memory: meta-reference
|
||||
safety: meta-reference
|
||||
agents: meta-reference
|
||||
telemetry: meta-reference
|
||||
image_type: conda
|
|
@ -0,0 +1,10 @@
|
|||
name: local-fireworks
|
||||
distribution_spec:
|
||||
description: Use Fireworks.ai for running LLM inference
|
||||
providers:
|
||||
inference: remote::fireworks
|
||||
memory: meta-reference
|
||||
safety: meta-reference
|
||||
agents: meta-reference
|
||||
telemetry: meta-reference
|
||||
image_type: conda
|
10
llama_stack/distribution/templates/local-ollama-build.yaml
Normal file
10
llama_stack/distribution/templates/local-ollama-build.yaml
Normal file
|
@ -0,0 +1,10 @@
|
|||
name: local-ollama
|
||||
distribution_spec:
|
||||
description: Like local, but use ollama for running LLM inference
|
||||
providers:
|
||||
inference: remote::ollama
|
||||
memory: meta-reference
|
||||
safety: meta-reference
|
||||
agents: meta-reference
|
||||
telemetry: meta-reference
|
||||
image_type: conda
|
10
llama_stack/distribution/templates/local-tgi-build.yaml
Normal file
10
llama_stack/distribution/templates/local-tgi-build.yaml
Normal file
|
@ -0,0 +1,10 @@
|
|||
name: local-tgi
|
||||
distribution_spec:
|
||||
description: Use TGI (local or with Hugging Face Inference Endpoints for running LLM inference. When using HF Inference Endpoints, you must provide the name of the endpoint).
|
||||
providers:
|
||||
inference: remote::tgi
|
||||
memory: meta-reference
|
||||
safety: meta-reference
|
||||
agents: meta-reference
|
||||
telemetry: meta-reference
|
||||
image_type: conda
|
10
llama_stack/distribution/templates/local-together-build.yaml
Normal file
10
llama_stack/distribution/templates/local-together-build.yaml
Normal file
|
@ -0,0 +1,10 @@
|
|||
name: local-together
|
||||
distribution_spec:
|
||||
description: Use Together.ai for running LLM inference
|
||||
providers:
|
||||
inference: remote::together
|
||||
memory: meta-reference
|
||||
safety: meta-reference
|
||||
agents: meta-reference
|
||||
telemetry: meta-reference
|
||||
image_type: conda
|
Loading…
Add table
Add a link
Reference in a new issue