mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-03 09:21:45 +00:00
Update llama_stack/providers/remote/inference/vllm/vllm.py
Co-authored-by: Sébastien Han <seb@redhat.com>
This commit is contained in:
parent
6a0ee7180b
commit
6f77ca1755
1 changed files with 1 additions and 1 deletions
|
@ -248,7 +248,7 @@ class VLLMInferenceAdapter(Inference, ModelsProtocolPrivate):
|
|||
if self.client is not None:
|
||||
return
|
||||
|
||||
log.info(f"Initializing VLLM client with base_url={self.config.url}")
|
||||
log.info(f"Initializing vLLM client with base_url={self.config.url}")
|
||||
self.client = self._create_client()
|
||||
|
||||
def _create_client(self):
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue