diff --git a/llama_stack/providers/inline/inference/vllm/vllm.py b/llama_stack/providers/inline/inference/vllm/vllm.py index 984d28c89..b3f226b98 100644 --- a/llama_stack/providers/inline/inference/vllm/vllm.py +++ b/llama_stack/providers/inline/inference/vllm/vllm.py @@ -263,7 +263,7 @@ class VLLMInferenceImpl(Inference, ModelsProtocolPrivate): # Load from Hugging Face repo into default local cache dir resolved_model_id = resolved_llama_model.huggingface_repo - # Detect a geniune Meta Llama model to trigger Meta-specific preprocessing. + # Detect a genuine Meta Llama model to trigger Meta-specific preprocessing. # Don't set self.is_meta_llama_model until we actually load the model. is_meta_llama_model = True else: # if resolved_llama_model is None