diff --git a/llama_stack/distribution/templates/build_configs/local-bedrock-conda-example-build.yaml b/llama_stack/distribution/templates/build_configs/local-bedrock-conda-example-build.yaml deleted file mode 100644 index 50d5e7048..000000000 --- a/llama_stack/distribution/templates/build_configs/local-bedrock-conda-example-build.yaml +++ /dev/null @@ -1,10 +0,0 @@ -name: local-bedrock-conda-example -distribution_spec: - description: Use Amazon Bedrock APIs. - providers: - inference: remote::bedrock - memory: meta-reference - safety: meta-reference - agents: meta-reference - telemetry: meta-reference -image_type: conda diff --git a/llama_stack/distribution/templates/build_configs/local-cpu-docker-build.yaml b/llama_stack/distribution/templates/build_configs/local-cpu-docker-build.yaml deleted file mode 100644 index 9db019454..000000000 --- a/llama_stack/distribution/templates/build_configs/local-cpu-docker-build.yaml +++ /dev/null @@ -1,15 +0,0 @@ -name: local-cpu -distribution_spec: - description: remote inference + local safety/agents/memory - docker_image: null - providers: - inference: - - remote::ollama - - remote::tgi - - remote::together - - remote::fireworks - safety: meta-reference - agents: meta-reference - memory: meta-reference - telemetry: meta-reference -image_type: docker diff --git a/llama_stack/distribution/templates/build_configs/local-databricks-build.yaml b/llama_stack/distribution/templates/build_configs/local-databricks-build.yaml deleted file mode 100644 index 754af7668..000000000 --- a/llama_stack/distribution/templates/build_configs/local-databricks-build.yaml +++ /dev/null @@ -1,10 +0,0 @@ -name: local-databricks -distribution_spec: - description: Use Databricks for running LLM inference - providers: - inference: remote::databricks - memory: meta-reference - safety: meta-reference - agents: meta-reference - telemetry: meta-reference -image_type: conda \ No newline at end of file diff --git a/llama_stack/distribution/templates/build_configs/local-fireworks-build.yaml b/llama_stack/distribution/templates/build_configs/local-fireworks-build.yaml deleted file mode 100644 index 33bdee3b5..000000000 --- a/llama_stack/distribution/templates/build_configs/local-fireworks-build.yaml +++ /dev/null @@ -1,10 +0,0 @@ -name: local-fireworks -distribution_spec: - description: Use Fireworks.ai for running LLM inference - providers: - inference: remote::fireworks - memory: meta-reference - safety: meta-reference - agents: meta-reference - telemetry: meta-reference -image_type: conda diff --git a/llama_stack/distribution/templates/build_configs/local-gpu-docker-build.yaml b/llama_stack/distribution/templates/build_configs/local-gpu-docker-build.yaml deleted file mode 100644 index 01af1021e..000000000 --- a/llama_stack/distribution/templates/build_configs/local-gpu-docker-build.yaml +++ /dev/null @@ -1,10 +0,0 @@ -name: local-gpu -distribution_spec: - description: Use code from `llama_stack` itself to serve all llama stack APIs - providers: - inference: meta-reference - memory: meta-reference - safety: meta-reference - agents: meta-reference - telemetry: meta-reference -image_type: docker diff --git a/llama_stack/distribution/templates/build_configs/local-hf-endpoint-build.yaml b/llama_stack/distribution/templates/build_configs/local-hf-endpoint-build.yaml deleted file mode 100644 index e5c4ae8cc..000000000 --- a/llama_stack/distribution/templates/build_configs/local-hf-endpoint-build.yaml +++ /dev/null @@ -1,10 +0,0 @@ -name: local-hf-endpoint -distribution_spec: - description: "Like local, but use Hugging Face Inference Endpoints for running LLM inference.\nSee https://hf.co/docs/api-endpoints." - providers: - inference: remote::hf::endpoint - memory: meta-reference - safety: meta-reference - agents: meta-reference - telemetry: meta-reference -image_type: conda diff --git a/llama_stack/distribution/templates/build_configs/local-hf-serverless-build.yaml b/llama_stack/distribution/templates/build_configs/local-hf-serverless-build.yaml deleted file mode 100644 index 752390b40..000000000 --- a/llama_stack/distribution/templates/build_configs/local-hf-serverless-build.yaml +++ /dev/null @@ -1,10 +0,0 @@ -name: local-hf-serverless -distribution_spec: - description: "Like local, but use Hugging Face Inference API (serverless) for running LLM inference.\nSee https://hf.co/docs/api-inference." - providers: - inference: remote::hf::serverless - memory: meta-reference - safety: meta-reference - agents: meta-reference - telemetry: meta-reference -image_type: conda diff --git a/llama_stack/distribution/templates/build_configs/local-ollama-build.yaml b/llama_stack/distribution/templates/build_configs/local-ollama-build.yaml deleted file mode 100644 index e89b19d9d..000000000 --- a/llama_stack/distribution/templates/build_configs/local-ollama-build.yaml +++ /dev/null @@ -1,13 +0,0 @@ -name: local-ollama -distribution_spec: - description: Like local, but use ollama for running LLM inference - providers: - inference: remote::ollama - memory: - - meta-reference - - remote::chromadb - - remote::pgvector - safety: meta-reference - agents: meta-reference - telemetry: meta-reference -image_type: conda diff --git a/llama_stack/distribution/templates/build_configs/local-tgi-build.yaml b/llama_stack/distribution/templates/build_configs/local-tgi-build.yaml deleted file mode 100644 index d4752539d..000000000 --- a/llama_stack/distribution/templates/build_configs/local-tgi-build.yaml +++ /dev/null @@ -1,10 +0,0 @@ -name: local-tgi -distribution_spec: - description: Like local, but use a TGI server for running LLM inference. - providers: - inference: remote::tgi - memory: meta-reference - safety: meta-reference - agents: meta-reference - telemetry: meta-reference -image_type: conda diff --git a/llama_stack/distribution/templates/build_configs/local-tgi-chroma-docker-build.yaml b/llama_stack/distribution/templates/build_configs/local-tgi-chroma-docker-build.yaml deleted file mode 100644 index 30715c551..000000000 --- a/llama_stack/distribution/templates/build_configs/local-tgi-chroma-docker-build.yaml +++ /dev/null @@ -1,11 +0,0 @@ -name: local-tgi-chroma -distribution_spec: - description: remote tgi inference + chromadb memory - docker_image: null - providers: - inference: remote::tgi - safety: meta-reference - agents: meta-reference - memory: remote::chromadb - telemetry: meta-reference -image_type: docker diff --git a/llama_stack/distribution/templates/build_configs/local-together-build.yaml b/llama_stack/distribution/templates/build_configs/local-together-build.yaml deleted file mode 100644 index ebf0bf1fb..000000000 --- a/llama_stack/distribution/templates/build_configs/local-together-build.yaml +++ /dev/null @@ -1,10 +0,0 @@ -name: local-together -distribution_spec: - description: Use Together.ai for running LLM inference - providers: - inference: remote::together - memory: meta-reference - safety: remote::together - agents: meta-reference - telemetry: meta-reference -image_type: conda diff --git a/llama_stack/distribution/templates/build_configs/local-vllm-build.yaml b/llama_stack/distribution/templates/build_configs/local-vllm-build.yaml deleted file mode 100644 index e907cb7c9..000000000 --- a/llama_stack/distribution/templates/build_configs/local-vllm-build.yaml +++ /dev/null @@ -1,10 +0,0 @@ -name: local-vllm -distribution_spec: - description: Like local, but use vLLM for running LLM inference - providers: - inference: vllm - memory: meta-reference - safety: meta-reference - agents: meta-reference - telemetry: meta-reference -image_type: conda diff --git a/llama_stack/distribution/templates/run_configs/local-run.yaml b/llama_stack/distribution/templates/run_configs/local-run.yaml deleted file mode 100644 index 7abf2b4dc..000000000 --- a/llama_stack/distribution/templates/run_configs/local-run.yaml +++ /dev/null @@ -1,50 +0,0 @@ -version: '2' -built_at: '2024-10-08T17:40:45.325529' -image_name: local -docker_image: null -conda_env: local -apis: -- shields -- agents -- models -- memory -- memory_banks -- inference -- safety -providers: - inference: - - provider_id: meta-reference - provider_type: meta-reference - config: - model: Llama3.1-8B-Instruct - quantization: null - torch_seed: null - max_seq_len: 4096 - max_batch_size: 1 - safety: - - provider_id: meta-reference - provider_type: meta-reference - config: - llama_guard_shield: - model: Llama-Guard-3-1B - excluded_categories: [] - disable_input_check: false - disable_output_check: false - prompt_guard_shield: - model: Prompt-Guard-86M - memory: - - provider_id: meta-reference - provider_type: meta-reference - config: {} - agents: - - provider_id: meta-reference - provider_type: meta-reference - config: - persistence_store: - namespace: null - type: sqlite - db_path: ~/.llama/runtime/kvstore.db - telemetry: - - provider_id: meta-reference - provider_type: meta-reference - config: {} diff --git a/llama_stack/distribution/templates/run_configs/local-tgi-chroma-run.yaml b/llama_stack/distribution/templates/run_configs/local-tgi-chroma-run.yaml deleted file mode 100644 index e86ea2722..000000000 --- a/llama_stack/distribution/templates/run_configs/local-tgi-chroma-run.yaml +++ /dev/null @@ -1,48 +0,0 @@ -version: '2' -built_at: '2024-10-08T17:40:45.325529' -image_name: local -docker_image: null -conda_env: local -apis: -- shields -- agents -- models -- memory -- memory_banks -- inference -- safety -providers: - inference: - - provider_id: tgi0 - provider_type: remote::tgi - config: - url: http://127.0.0.1:5009 - safety: - - provider_id: meta-reference - provider_type: meta-reference - config: - llama_guard_shield: - model: Llama-Guard-3-1B - excluded_categories: [] - disable_input_check: false - disable_output_check: false - prompt_guard_shield: - model: Prompt-Guard-86M - memory: - - provider_id: chroma0 - provider_type: remote::chromadb - config: - host: localhost - port: 6000 - agents: - - provider_id: meta-reference - provider_type: meta-reference - config: - persistence_store: - namespace: null - type: sqlite - db_path: ~/.llama/runtime/kvstore.db - telemetry: - - provider_id: meta-reference - provider_type: meta-reference - config: {} diff --git a/llama_stack/distribution/templates/run_configs/local-tgi-run.yaml b/llama_stack/distribution/templates/run_configs/local-tgi-run.yaml deleted file mode 100644 index ec3af742c..000000000 --- a/llama_stack/distribution/templates/run_configs/local-tgi-run.yaml +++ /dev/null @@ -1,46 +0,0 @@ -version: '2' -built_at: '2024-10-08T17:40:45.325529' -image_name: local -docker_image: null -conda_env: local -apis: -- shields -- agents -- models -- memory -- memory_banks -- inference -- safety -providers: - inference: - - provider_id: tgi0 - provider_type: remote::tgi - config: - url: http://127.0.0.1:5009 - safety: - - provider_id: meta-reference - provider_type: meta-reference - config: - llama_guard_shield: - model: Llama-Guard-3-1B - excluded_categories: [] - disable_input_check: false - disable_output_check: false - prompt_guard_shield: - model: Prompt-Guard-86M - memory: - - provider_id: meta-reference - provider_type: meta-reference - config: {} - agents: - - provider_id: meta-reference - provider_type: meta-reference - config: - persistence_store: - namespace: null - type: sqlite - db_path: ~/.llama/runtime/kvstore.db - telemetry: - - provider_id: meta-reference - provider_type: meta-reference - config: {}