Enable remote::vllm

This commit is contained in:
Ashwin Bharambe 2024-11-06 14:11:31 -08:00
parent 6ebd553da5
commit 6deeee9b87
5 changed files with 70 additions and 24 deletions

View file

@ -61,15 +61,15 @@ def available_providers() -> List[ProviderSpec]:
module="llama_stack.providers.adapters.inference.ollama",
),
),
# remote_provider_spec(
# api=Api.inference,
# adapter=AdapterSpec(
# adapter_type="vllm",
# pip_packages=["openai"],
# module="llama_stack.providers.adapters.inference.vllm",
# config_class="llama_stack.providers.adapters.inference.vllm.VLLMImplConfig",
# ),
# ),
remote_provider_spec(
api=Api.inference,
adapter=AdapterSpec(
adapter_type="vllm",
pip_packages=["openai"],
module="llama_stack.providers.adapters.inference.vllm",
config_class="llama_stack.providers.adapters.inference.vllm.VLLMInferenceAdapterConfig",
),
),
remote_provider_spec(
api=Api.inference,
adapter=AdapterSpec(