From 258d4c0b0f3bd9c0faa311508ea9a20f87ca1633 Mon Sep 17 00:00:00 2001 From: Fred Reiss Date: Sat, 15 Feb 2025 17:13:10 -0800 Subject: [PATCH] Update llama_stack/providers/inline/inference/vllm/vllm.py MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Sébastien Han --- llama_stack/providers/inline/inference/vllm/vllm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama_stack/providers/inline/inference/vllm/vllm.py b/llama_stack/providers/inline/inference/vllm/vllm.py index 984d28c89..b3f226b98 100644 --- a/llama_stack/providers/inline/inference/vllm/vllm.py +++ b/llama_stack/providers/inline/inference/vllm/vllm.py @@ -263,7 +263,7 @@ class VLLMInferenceImpl(Inference, ModelsProtocolPrivate): # Load from Hugging Face repo into default local cache dir resolved_model_id = resolved_llama_model.huggingface_repo - # Detect a geniune Meta Llama model to trigger Meta-specific preprocessing. + # Detect a genuine Meta Llama model to trigger Meta-specific preprocessing. # Don't set self.is_meta_llama_model until we actually load the model. is_meta_llama_model = True else: # if resolved_llama_model is None