Add vllm to the inference registry

Signed-off-by: Russell Bryant <rbryant@redhat.com>
This commit is contained in:
Russell Bryant 2024-09-28 19:06:53 +00:00
parent a08fd8f331
commit 31a0c51dea

View file

@ -104,4 +104,13 @@ def available_providers() -> List[ProviderSpec]:
config_class="llama_stack.providers.adapters.inference.bedrock.BedrockConfig", config_class="llama_stack.providers.adapters.inference.bedrock.BedrockConfig",
), ),
), ),
InlineProviderSpec(
api=Api.inference,
provider_id="vllm",
pip_packages=[
"vllm",
],
module="llama_stack.providers.impls.vllm",
config_class="llama_stack.providers.impls.vllm.VLLMConfig",
),
] ]