From b981b49bfa08697603092e5a19320d6e02d6c81f Mon Sep 17 00:00:00 2001 From: Yuan Tang Date: Sat, 8 Feb 2025 23:42:57 -0500 Subject: [PATCH] test: Use JSON tool prompt format for remote::vllm provider (#1019) # What does this PR do? This PR removes the warnings when running tests for `remote::vllm` provider: ``` Detected the chat template content format to be 'openai'. You can set `--chat-template-content-format` to override this. ``` ## Test Plan All tests passed without the warning messages shown above. Signed-off-by: Yuan Tang --- tests/client-sdk/inference/test_text_inference.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/client-sdk/inference/test_text_inference.py b/tests/client-sdk/inference/test_text_inference.py index aa0e510dd..81b476218 100644 --- a/tests/client-sdk/inference/test_text_inference.py +++ b/tests/client-sdk/inference/test_text_inference.py @@ -11,6 +11,7 @@ PROVIDER_TOOL_PROMPT_FORMAT = { "remote::ollama": "json", "remote::together": "json", "remote::fireworks": "json", + "remote::vllm": "json", } PROVIDER_LOGPROBS_TOP_K = set(