mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-31 04:30:02 +00:00
Do not send an empty 'tools' param to remote vllm
Fixes: #1955 Since 0.2.0, the vLLM gets an empty list (vs ``None`` in 0.1.9 and before) when there are no tools configured which causes the issue described in #1955. This patch avoids sending the 'tools' param to the vLLM altogether instead of an empty list. It also adds a small unit test to avoid regressions. Signed-off-by: Daniel Alvarez <dalvarez@redhat.com>
This commit is contained in:
parent
83b5523e2d
commit
538d601472
2 changed files with 19 additions and 2 deletions
|
|
@ -374,7 +374,8 @@ class VLLMInferenceAdapter(Inference, ModelsProtocolPrivate):
|
|||
options["max_tokens"] = self.config.max_tokens
|
||||
|
||||
input_dict: dict[str, Any] = {}
|
||||
if isinstance(request, ChatCompletionRequest) and request.tools is not None:
|
||||
# Only include the 'tools' param if there is any. It can break things if an empty list is sent to the vLLM.
|
||||
if isinstance(request, ChatCompletionRequest) and request.tools:
|
||||
input_dict = {"tools": _convert_to_vllm_tools_in_request(request.tools)}
|
||||
|
||||
if isinstance(request, ChatCompletionRequest):
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue