mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-13 06:07:58 +00:00
address nits, remove docker_image=null
This commit is contained in:
parent
037212c7b1
commit
13871d5a5b
13 changed files with 6 additions and 16 deletions
|
@ -1,7 +1,6 @@
|
|||
name: local-conda-example
|
||||
distribution_spec:
|
||||
description: Use code from `llama_toolchain` itself to serve all llama stack APIs
|
||||
docker_image: null
|
||||
providers:
|
||||
inference: meta-reference
|
||||
memory: meta-reference-faiss
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
name: local-fireworks-conda-example
|
||||
distribution_spec:
|
||||
description: Use Fireworks.ai for running LLM inference
|
||||
docker_image: null
|
||||
providers:
|
||||
inference: remote::fireworks
|
||||
memory: meta-reference-faiss
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
name: local-ollama-conda-example
|
||||
distribution_spec:
|
||||
description: Like local, but use ollama for running LLM inference
|
||||
docker_image: null
|
||||
providers:
|
||||
inference: remote::ollama
|
||||
memory: meta-reference-faiss
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
name: local-tgi-conda-example
|
||||
distribution_spec:
|
||||
description: Use TGI (local or with Hugging Face Inference Endpoints for running LLM inference. When using HF Inference Endpoints, you must provide the name of the endpoint).
|
||||
docker_image: null
|
||||
providers:
|
||||
inference: remote::tgi
|
||||
memory: meta-reference-faiss
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
name: local-tgi-conda-example
|
||||
distribution_spec:
|
||||
description: Use Together.ai for running LLM inference
|
||||
docker_image: null
|
||||
providers:
|
||||
inference: remote::together
|
||||
memory: meta-reference-faiss
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
description: Like local, but use ollama for running LLM inference
|
||||
docker_image: null
|
||||
providers:
|
||||
inference: remote::ollama
|
||||
safety: meta-reference
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
description: Use Fireworks.ai for running LLM inference
|
||||
docker_image: null
|
||||
providers:
|
||||
inference: remote::fireworks
|
||||
safety: meta-reference
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
description: Use TGI (local or with Hugging Face Inference Endpoints for running LLM inference. When using HF Inference Endpoints, you must provide the name of the endpoint).
|
||||
docker_image: null
|
||||
providers:
|
||||
inference: remote::tgi
|
||||
safety: meta-reference
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
description: Use Together.ai for running LLM inference
|
||||
docker_image: null
|
||||
providers:
|
||||
inference: remote::together
|
||||
safety: meta-reference
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
description: Use code from `llama_toolchain` itself to serve all llama stack APIs
|
||||
docker_image: null
|
||||
providers:
|
||||
inference: meta-reference
|
||||
memory: meta-reference-faiss
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
name: local-docker-example
|
||||
distribution_spec:
|
||||
description: Use code from `llama_toolchain` itself to serve all llama stack APIs
|
||||
docker_image: null
|
||||
providers:
|
||||
inference: meta-reference
|
||||
memory: meta-reference-faiss
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue