From 82c10c917faadb761cf7167c30d4935133dc66e7 Mon Sep 17 00:00:00 2001 From: Fred Reiss Date: Thu, 19 Dec 2024 15:06:47 -0800 Subject: [PATCH] Minor change to force rerun of automatic jobs --- llama_stack/providers/inline/inference/vllm/vllm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama_stack/providers/inline/inference/vllm/vllm.py b/llama_stack/providers/inline/inference/vllm/vllm.py index 1caae9687..2672c3dbb 100644 --- a/llama_stack/providers/inline/inference/vllm/vllm.py +++ b/llama_stack/providers/inline/inference/vllm/vllm.py @@ -78,7 +78,7 @@ class VLLMInferenceImpl(Inference, ModelsProtocolPrivate): self.engine = AsyncLLMEngine.from_engine_args(engine_args) async def shutdown(self): - """Shutdown the vLLM inference adapter.""" + """Shut down the vLLM inference adapter.""" log.info("Shutting down vLLM inference provider.") if self.engine: self.engine.shutdown_background_loop()