move distribution to yaml files

This commit is contained in:
Xi Yan 2024-09-14 15:02:22 -07:00
parent 3802d5acdc
commit 0068d059db
9 changed files with 78 additions and 49 deletions

View file

@ -0,0 +1,9 @@
distribution_type: local-ollama
description: Like local, but use ollama for running LLM inference
docker_image: null
providers:
inference: remote::ollama
safety: meta-reference
agentic_system: meta-reference
memory: meta-reference-faiss
telemetry: console

View file

@ -0,0 +1,9 @@
distribution_type: local-plus-fireworks-inference
description: Use Fireworks.ai for running LLM inference
docker_image: null
providers:
inference: remote::fireworks
safety: meta-reference
agentic_system: meta-reference
memory: meta-reference-faiss
telemetry: console

View file

@ -0,0 +1,8 @@
distribution_type: local-plus-tgi-inference
description: Use TGI (local or with Hugging Face Inference Endpoints for running LLM inference. When using HF Inference Endpoints, you must provide the name of the endpoint).
docker_image: null
providers:
inference: remote::tgi
safety: meta-reference
agentic_system: meta-reference
memory: meta-reference-faiss

View file

@ -0,0 +1,9 @@
distribution_type: local-plus-together-inference
description: Use Together.ai for running LLM inference
docker_image: null
providers:
inference: remote::together
safety: meta-reference
agentic_system: meta-reference
memory: meta-reference-faiss
telemetry: console

View file

@ -0,0 +1,9 @@
distribution_type: local
description: Use code from `llama_toolchain` itself to serve all llama stack APIs
docker_image: null
providers:
inference: meta-reference
memory: meta-reference-faiss
safety: meta-reference
agentic_system: meta-reference
telemetry: console