chore(misc): make tests and starter faster

This commit is contained in:
Ashwin Bharambe 2025-08-05 13:57:15 -07:00
parent ea46f74092
commit 2b4e88a3de
19 changed files with 2860 additions and 1660 deletions

View file

@ -6,9 +6,6 @@
import pytest
from openai import OpenAI
from llama_stack.core.library_client import LlamaStackAsLibraryClient
from ..test_cases.test_case import TestCase
@ -59,9 +56,6 @@ def skip_if_model_doesnt_support_suffix(client_with_models, model_id):
def skip_if_model_doesnt_support_openai_chat_completion(client_with_models, model_id):
if isinstance(client_with_models, LlamaStackAsLibraryClient):
pytest.skip("OpenAI chat completions are not supported when testing with library client yet.")
provider = provider_from_model(client_with_models, model_id)
if provider.provider_type in (
"inline::meta-reference",
@ -90,17 +84,6 @@ def skip_if_provider_isnt_openai(client_with_models, model_id):
)
@pytest.fixture
def openai_client(client_with_models):
base_url = f"{client_with_models.base_url}/v1/openai/v1"
return OpenAI(base_url=base_url, api_key="bar")
@pytest.fixture(params=["openai_client", "llama_stack_client"])
def compat_client(request):
return request.getfixturevalue(request.param)
@pytest.mark.parametrize(
"test_case",
[