This commit is contained in:
Yuan Tang 2024-10-10 20:37:20 -04:00
parent ad4e65e876
commit cdadf0f87d
No known key found for this signature in database
3 changed files with 71 additions and 217 deletions

View file

@ -0,0 +1,10 @@
name: remote-vllm
distribution_spec:
description: Use remote vLLM for running LLM inference
providers:
inference: remote::vllm
memory: meta-reference
safety: meta-reference
agents: meta-reference
telemetry: meta-reference
image_type: docker

View file

@ -9,14 +9,9 @@ from .vllm import VLLMInferenceAdapter
async def get_adapter_impl(config: VLLMImplConfig, _deps): async def get_adapter_impl(config: VLLMImplConfig, _deps):
assert isinstance(config, VLLMImplConfig), f"Unexpected config type: {type(config)}" assert isinstance(
config, VLLMImplConfig
if config.url is not None: ), f"Unexpected config type: {type(config)}"
impl = VLLMInferenceAdapter(config) impl = VLLMInferenceAdapter(config)
else:
raise ValueError(
"Invalid configuration. Specify either an URL or HF Inference Endpoint details (namespace and endpoint name)."
)
await impl.initialize() await impl.initialize()
return impl return impl

View file

@ -3,42 +3,44 @@
# #
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from typing import AsyncGenerator from typing import AsyncGenerator
from llama_models.llama3.api.chat_format import ChatFormat from llama_models.llama3.api.chat_format import ChatFormat
from llama_models.llama3.api.datatypes import Message, StopReason from llama_models.llama3.api.datatypes import Message
from llama_models.llama3.api.tokenizer import Tokenizer from llama_models.llama3.api.tokenizer import Tokenizer
from llama_models.sku_list import resolve_model
from openai import OpenAI from openai import OpenAI
from llama_stack.apis.inference import * # noqa: F403 from llama_stack.apis.inference import * # noqa: F403
from llama_stack.providers.utils.inference.augment_messages import augment_messages_for_tools
from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper
from llama_stack.providers.utils.inference.openai_compat import (
get_sampling_options,
process_chat_completion_response,
process_chat_completion_stream_response,
)
from llama_stack.providers.utils.inference.prompt_adapter import (
chat_completion_request_to_prompt,
)
from .config import VLLMImplConfig from .config import VLLMImplConfig
# Reference: https://docs.vllm.ai/en/latest/models/supported_models.html # Reference: https://docs.vllm.ai/en/latest/models/supported_models.html
VLLM_SUPPORTED_MODELS = { VLLM_SUPPORTED_MODELS = {
"Llama3.1-8B-Instruct": "meta-llama/Meta-Llama-3.1-8B-Instruct", "Llama3.1-70B-Instruct": "meta-llama/Meta-Llama-3-70B-Instruct",
# "Llama3.1-70B-Instruct": "meta-llama/Meta-Llama-3.1-70B-Instruct", "Llama3.1-405B-Instruct": "meta-llama/Meta-Llama-3.1-405B-Instruct",
# "Llama3.1-405B-Instruct": "meta-llama/Meta-Llama-3.1-405B-Instruct",
} }
class VLLMInferenceAdapter(Inference): class VLLMInferenceAdapter(ModelRegistryHelper, Inference):
def __init__(self, config: VLLMImplConfig) -> None: def __init__(self, config: VLLMImplConfig) -> None:
self.config = config ModelRegistryHelper.__init__(
tokenizer = Tokenizer.get_instance() self, stack_to_provider_models_map=VLLM_SUPPORTED_MODELS
self.formatter = ChatFormat(tokenizer)
@property
def client(self) -> OpenAI:
return OpenAI(
api_key=self.config.api_token,
base_url=self.config.url
) )
self.config = config
self.formatter = ChatFormat(Tokenizer.get_instance())
async def initialize(self) -> None: async def initialize(self) -> None:
return return
@ -46,41 +48,10 @@ class VLLMInferenceAdapter(Inference):
async def shutdown(self) -> None: async def shutdown(self) -> None:
pass pass
async def completion(self, request: CompletionRequest) -> AsyncGenerator: def completion(self, request: CompletionRequest) -> AsyncGenerator:
raise NotImplementedError() raise NotImplementedError()
def _messages_to_vllm_messages(self, messages: list[Message]) -> list: def chat_completion(
vllm_messages = []
for message in messages:
if message.role == "ipython":
role = "tool"
else:
role = message.role
vllm_messages.append({"role": role, "content": message.content})
return vllm_messages
def resolve_vllm_model(self, model_name: str) -> str:
model = resolve_model(model_name)
assert (
model is not None
and model.descriptor(shorten_default_variant=True)
in VLLM_SUPPORTED_MODELS
), f"Unsupported model: {model_name}, use one of the supported models: {','.join(VLLM_SUPPORTED_MODELS.keys())}"
return VLLM_SUPPORTED_MODELS.get(
model.descriptor(shorten_default_variant=True)
)
def get_vllm_chat_options(self, request: ChatCompletionRequest) -> dict:
options = {}
if request.sampling_params is not None:
for attr in {"temperature", "top_p", "top_k", "max_tokens"}:
if getattr(request.sampling_params, attr):
options[attr] = getattr(request.sampling_params, attr)
return options
async def chat_completion(
self, self,
model: str, model: str,
messages: List[Message], messages: List[Message],
@ -91,7 +62,6 @@ class VLLMInferenceAdapter(Inference):
stream: Optional[bool] = False, stream: Optional[bool] = False,
logprobs: Optional[LogProbConfig] = None, logprobs: Optional[LogProbConfig] = None,
) -> AsyncGenerator: ) -> AsyncGenerator:
# wrapper request to make it easier to pass around (internal only, not exposed to API)
request = ChatCompletionRequest( request = ChatCompletionRequest(
model=model, model=model,
messages=messages, messages=messages,
@ -103,167 +73,46 @@ class VLLMInferenceAdapter(Inference):
logprobs=logprobs, logprobs=logprobs,
) )
# accumulate sampling params and other options to pass to vLLM client = OpenAI(base_url=self.config.url, api_key=self.config.api_token)
options = self.get_vllm_chat_options(request) if stream:
vllm_model = self.resolve_vllm_model(request.model) return self._stream_chat_completion(request, client)
messages = augment_messages_for_tools(request)
model_input = self.formatter.encode_dialog_prompt(messages)
input_tokens = len(model_input.tokens)
max_new_tokens = min(
request.sampling_params.max_tokens or (self.max_tokens - input_tokens),
self.max_tokens - input_tokens - 1,
)
print(f"Calculated max_new_tokens: {max_new_tokens}")
assert (
request.model == self.model_name
), f"Model mismatch, expected {self.model_name}, got {request.model}"
if not request.stream:
r = self.client.chat.completions.create(
model=vllm_model,
messages=self._messages_to_vllm_messages(messages),
max_tokens=max_new_tokens,
stream=False,
**options,
)
stop_reason = None
if r.choices[0].finish_reason:
if (
r.choices[0].finish_reason == "stop"
or r.choices[0].finish_reason == "eos"
):
stop_reason = StopReason.end_of_turn
elif r.choices[0].finish_reason == "length":
stop_reason = StopReason.out_of_tokens
completion_message = self.formatter.decode_assistant_message_from_content(
r.choices[0].message.content, stop_reason
)
yield ChatCompletionResponse(
completion_message=completion_message,
logprobs=None,
)
else: else:
yield ChatCompletionResponseStreamChunk( return self._nonstream_chat_completion(request, client)
event=ChatCompletionResponseEvent(
event_type=ChatCompletionResponseEventType.start,
delta="",
)
)
buffer = "" async def _nonstream_chat_completion(
ipython = False self, request: ChatCompletionRequest, client: OpenAI
stop_reason = None ) -> ChatCompletionResponse:
params = self._get_params(request)
r = client.completions.create(**params)
return process_chat_completion_response(request, r, self.formatter)
for chunk in self.client.chat.completions.create( async def _stream_chat_completion(
model=vllm_model, self, request: ChatCompletionRequest, client: OpenAI
messages=self._messages_to_vllm_messages(messages), ) -> AsyncGenerator:
max_tokens=max_new_tokens, params = self._get_params(request)
stream=True,
**options,
):
if chunk.choices[0].finish_reason:
if (
stop_reason is None and chunk.choices[0].finish_reason == "stop"
) or (
stop_reason is None and chunk.choices[0].finish_reason == "eos"
):
stop_reason = StopReason.end_of_turn
elif (
stop_reason is None
and chunk.choices[0].finish_reason == "length"
):
stop_reason = StopReason.out_of_tokens
break
text = chunk.choices[0].message.content async def _to_async_generator():
if text is None: s = client.completions.create(**params)
continue for chunk in s:
yield chunk
# check if it's a tool call ( aka starts with <|python_tag|> ) stream = _to_async_generator()
if not ipython and text.startswith("<|python_tag|>"): async for chunk in process_chat_completion_stream_response(
ipython = True request, stream, self.formatter
yield ChatCompletionResponseStreamChunk( ):
event=ChatCompletionResponseEvent( yield chunk
event_type=ChatCompletionResponseEventType.progress,
delta=ToolCallDelta(
content="",
parse_status=ToolCallParseStatus.started,
),
)
)
buffer += text
continue
if ipython: def _get_params(self, request: ChatCompletionRequest) -> dict:
if text == "<|eot_id|>": return {
stop_reason = StopReason.end_of_turn "model": self.map_to_provider_model(request.model),
text = "" "prompt": chat_completion_request_to_prompt(request, self.formatter),
continue "stream": request.stream,
elif text == "<|eom_id|>": **get_sampling_options(request),
stop_reason = StopReason.end_of_message }
text = ""
continue
buffer += text async def embeddings(
delta = ToolCallDelta( self,
content=text, model: str,
parse_status=ToolCallParseStatus.in_progress, contents: List[InterleavedTextMedia],
) ) -> EmbeddingsResponse:
raise NotImplementedError()
yield ChatCompletionResponseStreamChunk(
event=ChatCompletionResponseEvent(
event_type=ChatCompletionResponseEventType.progress,
delta=delta,
stop_reason=stop_reason,
)
)
else:
buffer += text
yield ChatCompletionResponseStreamChunk(
event=ChatCompletionResponseEvent(
event_type=ChatCompletionResponseEventType.progress,
delta=text,
stop_reason=stop_reason,
)
)
# parse tool calls and report errors
message = self.formatter.decode_assistant_message_from_content(
buffer, stop_reason
)
parsed_tool_calls = len(message.tool_calls) > 0
if ipython and not parsed_tool_calls:
yield ChatCompletionResponseStreamChunk(
event=ChatCompletionResponseEvent(
event_type=ChatCompletionResponseEventType.progress,
delta=ToolCallDelta(
content="",
parse_status=ToolCallParseStatus.failure,
),
stop_reason=stop_reason,
)
)
for tool_call in message.tool_calls:
yield ChatCompletionResponseStreamChunk(
event=ChatCompletionResponseEvent(
event_type=ChatCompletionResponseEventType.progress,
delta=ToolCallDelta(
content=tool_call,
parse_status=ToolCallParseStatus.success,
),
stop_reason=stop_reason,
)
)
yield ChatCompletionResponseStreamChunk(
event=ChatCompletionResponseEvent(
event_type=ChatCompletionResponseEventType.complete,
delta="",
stop_reason=stop_reason,
)
)