remove distribution types!

This commit is contained in:
Xi Yan 2024-09-16 10:12:45 -07:00
parent e466ec389b
commit c0c5839361
17 changed files with 17 additions and 98 deletions

View file

@ -1,6 +1,5 @@
name: local-conda-example
distribution_spec:
distribution_type: local
description: Use code from `llama_toolchain` itself to serve all llama stack APIs
docker_image: null
providers:

View file

@ -1,6 +1,5 @@
name: local-fireworks-conda-example
distribution_spec:
distribution_type: local-plus-fireworks-inference
description: Use Fireworks.ai for running LLM inference
docker_image: null
providers:

View file

@ -1,6 +1,5 @@
name: local-ollama-conda-example
distribution_spec:
distribution_type: local-plus-ollama-inference
description: Like local, but use ollama for running LLM inference
docker_image: null
providers:

View file

@ -1,6 +1,5 @@
name: local-tgi-conda-example
distribution_spec:
distribution_type: local-plus-tgi-inference
description: Use TGI (local or with Hugging Face Inference Endpoints for running LLM inference. When using HF Inference Endpoints, you must provide the name of the endpoint).
docker_image: null
providers:

View file

@ -1,6 +1,5 @@
name: local-tgi-conda-example
distribution_spec:
distribution_type: local-plus-together-inference
description: Use Together.ai for running LLM inference
docker_image: null
providers:

View file

@ -1,4 +1,3 @@
distribution_type: local-ollama
description: Like local, but use ollama for running LLM inference
docker_image: null
providers:

View file

@ -1,4 +1,3 @@
distribution_type: local-plus-fireworks-inference
description: Use Fireworks.ai for running LLM inference
docker_image: null
providers:

View file

@ -1,4 +1,3 @@
distribution_type: local-plus-tgi-inference
description: Use TGI (local or with Hugging Face Inference Endpoints for running LLM inference. When using HF Inference Endpoints, you must provide the name of the endpoint).
docker_image: null
providers:

View file

@ -1,4 +1,3 @@
distribution_type: local-plus-together-inference
description: Use Together.ai for running LLM inference
docker_image: null
providers:

View file

@ -1,4 +1,3 @@
distribution_type: local
description: Use code from `llama_toolchain` itself to serve all llama stack APIs
docker_image: null
providers:

View file

@ -1,6 +1,5 @@
name: local-docker-example
distribution_spec:
distribution_type: local
description: Use code from `llama_toolchain` itself to serve all llama stack APIs
docker_image: null
providers: