mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-06 02:32:40 +00:00
Mark xfail for other providers
Signed-off-by: Yuan Tang <terrytangyuan@gmail.com>
This commit is contained in:
parent
9032544b2e
commit
596afcb527
1 changed files with 4 additions and 1 deletions
|
@ -105,8 +105,11 @@ def test_text_completion_streaming(client_with_models, text_model_id, test_case)
|
|||
"inference:completion:stop_sequence",
|
||||
],
|
||||
)
|
||||
def test_text_completion_stop_sequence(client_with_models, text_model_id, test_case):
|
||||
def test_text_completion_stop_sequence(client_with_models, text_model_id, inference_provider_type, test_case):
|
||||
skip_if_model_doesnt_support_completion(client_with_models, text_model_id)
|
||||
# This is only supported/tested for remote vLLM: https://github.com/meta-llama/llama-stack/issues/1771
|
||||
if inference_provider_type != "remote::vllm":
|
||||
pytest.xfail(f"{inference_provider_type} doesn't support 'stop' parameter yet")
|
||||
tc = TestCase(test_case)
|
||||
|
||||
response = client_with_models.inference.completion(
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue