From 19d85003deb756f4972cc3d8319c5801caa3fd7f Mon Sep 17 00:00:00 2001 From: Derek Higgins Date: Thu, 30 Oct 2025 13:48:21 +0000 Subject: [PATCH] test: Updated test skips that were marked with "inline::vllm" (#3979) This should be "remote::vllm". This causes some log probs tests to be skipped with remote vllm. (They fail if run). Signed-off-by: Derek Higgins --- tests/integration/inference/test_openai_completion.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/integration/inference/test_openai_completion.py b/tests/integration/inference/test_openai_completion.py index 65f773889..964d19c1d 100644 --- a/tests/integration/inference/test_openai_completion.py +++ b/tests/integration/inference/test_openai_completion.py @@ -39,7 +39,7 @@ def skip_if_model_doesnt_support_openai_completion(client_with_models, model_id) if provider.provider_type in ( "inline::meta-reference", "inline::sentence-transformers", - "inline::vllm", + "remote::vllm", "remote::bedrock", "remote::databricks", # Technically Nvidia does support OpenAI completions, but none of their hosted models @@ -120,7 +120,7 @@ def skip_if_model_doesnt_support_openai_chat_completion(client_with_models, mode if provider.provider_type in ( "inline::meta-reference", "inline::sentence-transformers", - "inline::vllm", + "remote::vllm", "remote::bedrock", "remote::databricks", "remote::cerebras",