From ca2e7f52bdf49b075fcbba02f5c21ec0cced6d25 Mon Sep 17 00:00:00 2001 From: Xi Yan Date: Mon, 21 Oct 2024 11:00:50 -0700 Subject: [PATCH] vllm --- llama_stack/distribution/templates/remote-vllm-build.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/llama_stack/distribution/templates/remote-vllm-build.yaml b/llama_stack/distribution/templates/remote-vllm-build.yaml index 525c3a930..e907cb7c9 100644 --- a/llama_stack/distribution/templates/remote-vllm-build.yaml +++ b/llama_stack/distribution/templates/remote-vllm-build.yaml @@ -1,10 +1,10 @@ -name: remote-vllm +name: local-vllm distribution_spec: - description: Use remote vLLM for running LLM inference + description: Like local, but use vLLM for running LLM inference providers: - inference: remote::vllm + inference: vllm memory: meta-reference safety: meta-reference agents: meta-reference telemetry: meta-reference -image_type: docker \ No newline at end of file +image_type: conda