mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-28 02:53:30 +00:00
fix broken --list-templates with adding build.yaml files for packaging (#327)
* add build files to templates * fix templates * manifest * symlink * symlink * precommit * change everything to docker build.yaml * remove image_type in templates * fix build from templates CLI * fix readmes
This commit is contained in:
parent
afae4e3d8e
commit
07f9bf723f
32 changed files with 161 additions and 158 deletions
9
llama_stack/templates/bedrock/build.yaml
Normal file
9
llama_stack/templates/bedrock/build.yaml
Normal file
|
@ -0,0 +1,9 @@
|
|||
name: bedrock
|
||||
distribution_spec:
|
||||
description: Use Amazon Bedrock APIs.
|
||||
providers:
|
||||
inference: remote::bedrock
|
||||
memory: meta-reference
|
||||
safety: meta-reference
|
||||
agents: meta-reference
|
||||
telemetry: meta-reference
|
9
llama_stack/templates/databricks/build.yaml
Normal file
9
llama_stack/templates/databricks/build.yaml
Normal file
|
@ -0,0 +1,9 @@
|
|||
name: databricks
|
||||
distribution_spec:
|
||||
description: Use Databricks for running LLM inference
|
||||
providers:
|
||||
inference: remote::databricks
|
||||
memory: meta-reference
|
||||
safety: meta-reference
|
||||
agents: meta-reference
|
||||
telemetry: meta-reference
|
9
llama_stack/templates/fireworks/build.yaml
Normal file
9
llama_stack/templates/fireworks/build.yaml
Normal file
|
@ -0,0 +1,9 @@
|
|||
name: fireworks
|
||||
distribution_spec:
|
||||
description: Use Fireworks.ai for running LLM inference
|
||||
providers:
|
||||
inference: remote::fireworks
|
||||
memory: meta-reference
|
||||
safety: meta-reference
|
||||
agents: meta-reference
|
||||
telemetry: meta-reference
|
9
llama_stack/templates/hf-endpoint/build.yaml
Normal file
9
llama_stack/templates/hf-endpoint/build.yaml
Normal file
|
@ -0,0 +1,9 @@
|
|||
name: hf-endpoint
|
||||
distribution_spec:
|
||||
description: "Like local, but use Hugging Face Inference Endpoints for running LLM inference.\nSee https://hf.co/docs/api-endpoints."
|
||||
providers:
|
||||
inference: remote::hf::endpoint
|
||||
memory: meta-reference
|
||||
safety: meta-reference
|
||||
agents: meta-reference
|
||||
telemetry: meta-reference
|
9
llama_stack/templates/hf-serverless/build.yaml
Normal file
9
llama_stack/templates/hf-serverless/build.yaml
Normal file
|
@ -0,0 +1,9 @@
|
|||
name: hf-serverless
|
||||
distribution_spec:
|
||||
description: "Like local, but use Hugging Face Inference API (serverless) for running LLM inference.\nSee https://hf.co/docs/api-inference."
|
||||
providers:
|
||||
inference: remote::hf::serverless
|
||||
memory: meta-reference
|
||||
safety: meta-reference
|
||||
agents: meta-reference
|
||||
telemetry: meta-reference
|
13
llama_stack/templates/meta-reference-gpu/build.yaml
Normal file
13
llama_stack/templates/meta-reference-gpu/build.yaml
Normal file
|
@ -0,0 +1,13 @@
|
|||
name: meta-reference-gpu
|
||||
distribution_spec:
|
||||
docker_image: pytorch/pytorch:2.5.0-cuda12.4-cudnn9-runtime
|
||||
description: Use code from `llama_stack` itself to serve all llama stack APIs
|
||||
providers:
|
||||
inference: meta-reference
|
||||
memory:
|
||||
- meta-reference
|
||||
- remote::chromadb
|
||||
- remote::pgvector
|
||||
safety: meta-reference
|
||||
agents: meta-reference
|
||||
telemetry: meta-reference
|
|
@ -0,0 +1,13 @@
|
|||
name: meta-reference-quantized-gpu
|
||||
distribution_spec:
|
||||
docker_image: pytorch/pytorch:2.5.0-cuda12.4-cudnn9-runtime
|
||||
description: Use code from `llama_stack` itself to serve all llama stack APIs
|
||||
providers:
|
||||
inference: meta-reference-quantized
|
||||
memory:
|
||||
- meta-reference
|
||||
- remote::chromadb
|
||||
- remote::pgvector
|
||||
safety: meta-reference
|
||||
agents: meta-reference
|
||||
telemetry: meta-reference
|
12
llama_stack/templates/ollama/build.yaml
Normal file
12
llama_stack/templates/ollama/build.yaml
Normal file
|
@ -0,0 +1,12 @@
|
|||
name: ollama
|
||||
distribution_spec:
|
||||
description: Use ollama for running LLM inference
|
||||
providers:
|
||||
inference: remote::ollama
|
||||
memory:
|
||||
- meta-reference
|
||||
- remote::chromadb
|
||||
- remote::pgvector
|
||||
safety: meta-reference
|
||||
agents: meta-reference
|
||||
telemetry: meta-reference
|
12
llama_stack/templates/tgi/build.yaml
Normal file
12
llama_stack/templates/tgi/build.yaml
Normal file
|
@ -0,0 +1,12 @@
|
|||
name: tgi
|
||||
distribution_spec:
|
||||
description: Use TGI for running LLM inference
|
||||
providers:
|
||||
inference: remote::tgi
|
||||
memory:
|
||||
- meta-reference
|
||||
- remote::chromadb
|
||||
- remote::pgvector
|
||||
safety: meta-reference
|
||||
agents: meta-reference
|
||||
telemetry: meta-reference
|
9
llama_stack/templates/together/build.yaml
Normal file
9
llama_stack/templates/together/build.yaml
Normal file
|
@ -0,0 +1,9 @@
|
|||
name: together
|
||||
distribution_spec:
|
||||
description: Use Together.ai for running LLM inference
|
||||
providers:
|
||||
inference: remote::together
|
||||
memory: remote::weaviate
|
||||
safety: remote::together
|
||||
agents: meta-reference
|
||||
telemetry: meta-reference
|
9
llama_stack/templates/vllm/build.yaml
Normal file
9
llama_stack/templates/vllm/build.yaml
Normal file
|
@ -0,0 +1,9 @@
|
|||
name: vllm
|
||||
distribution_spec:
|
||||
description: Like local, but use vLLM for running LLM inference
|
||||
providers:
|
||||
inference: vllm
|
||||
memory: meta-reference
|
||||
safety: meta-reference
|
||||
agents: meta-reference
|
||||
telemetry: meta-reference
|
Loading…
Add table
Add a link
Reference in a new issue