mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-03 09:53:45 +00:00
test: Updated test skips that were marked with "inline::vllm" (#3979)
This should be "remote::vllm". This causes some log probs tests to be skipped with remote vllm. (They fail if run). Signed-off-by: Derek Higgins <derekh@redhat.com>
This commit is contained in:
parent
174ef162b3
commit
19d85003de
1 changed files with 2 additions and 2 deletions
|
|
@ -39,7 +39,7 @@ def skip_if_model_doesnt_support_openai_completion(client_with_models, model_id)
|
|||
if provider.provider_type in (
|
||||
"inline::meta-reference",
|
||||
"inline::sentence-transformers",
|
||||
"inline::vllm",
|
||||
"remote::vllm",
|
||||
"remote::bedrock",
|
||||
"remote::databricks",
|
||||
# Technically Nvidia does support OpenAI completions, but none of their hosted models
|
||||
|
|
@ -120,7 +120,7 @@ def skip_if_model_doesnt_support_openai_chat_completion(client_with_models, mode
|
|||
if provider.provider_type in (
|
||||
"inline::meta-reference",
|
||||
"inline::sentence-transformers",
|
||||
"inline::vllm",
|
||||
"remote::vllm",
|
||||
"remote::bedrock",
|
||||
"remote::databricks",
|
||||
"remote::cerebras",
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue