forked from phoenix-oss/llama-stack-mirror
Add vLLM inference provider for OpenAI compatible vLLM server (#178)
This PR adds vLLM inference provider for OpenAI compatible vLLM server.
This commit is contained in:
parent
59c43736e8
commit
a27a2cd2af
6 changed files with 209 additions and 1 deletions
15
llama_stack/providers/adapters/inference/vllm/__init__.py
Normal file
15
llama_stack/providers/adapters/inference/vllm/__init__.py
Normal file
|
@ -0,0 +1,15 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from .config import VLLMImplConfig
|
||||
from .vllm import VLLMInferenceAdapter
|
||||
|
||||
|
||||
async def get_adapter_impl(config: VLLMImplConfig, _deps):
|
||||
assert isinstance(config, VLLMImplConfig), f"Unexpected config type: {type(config)}"
|
||||
impl = VLLMInferenceAdapter(config)
|
||||
await impl.initialize()
|
||||
return impl
|
Loading…
Add table
Add a link
Reference in a new issue