mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-02 20:40:36 +00:00
fix: vllm starter name
Signed-off-by: Sébastien Han <seb@redhat.com>
This commit is contained in:
parent
ed69c1b3cc
commit
af0d6014c1
4 changed files with 6 additions and 4 deletions
|
@ -108,7 +108,7 @@ def get_inference_providers() -> tuple[list[Provider], dict[str, list[ProviderMo
|
|||
SambaNovaImplConfig.sample_run_config(api_key="${env.SAMBANOVA_API_KEY:}"),
|
||||
),
|
||||
(
|
||||
"remote-vllm",
|
||||
"vllm",
|
||||
[],
|
||||
VLLMInferenceAdapterConfig.sample_run_config(
|
||||
url="${env.VLLM_URL:http://localhost:8000/v1}",
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue