mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-31 16:01:46 +00:00
fixes
This commit is contained in:
parent
cad646478f
commit
bf8d76f19b
3 changed files with 7 additions and 8 deletions
|
@ -359,8 +359,6 @@ class OpenAIResponsesImpl:
|
||||||
temperature=temperature,
|
temperature=temperature,
|
||||||
)
|
)
|
||||||
|
|
||||||
print(f"chat_tools: {chat_tools}")
|
|
||||||
print(f"messages: {messages}")
|
|
||||||
inference_result = await self.inference_api.openai_chat_completion(
|
inference_result = await self.inference_api.openai_chat_completion(
|
||||||
model=model,
|
model=model,
|
||||||
messages=messages,
|
messages=messages,
|
||||||
|
|
|
@ -10,17 +10,17 @@ from tests.verifications.openai_api.fixtures.fixtures import _load_all_verificat
|
||||||
def pytest_generate_tests(metafunc):
|
def pytest_generate_tests(metafunc):
|
||||||
"""Dynamically parametrize tests based on the selected provider and config."""
|
"""Dynamically parametrize tests based on the selected provider and config."""
|
||||||
if "model" in metafunc.fixturenames:
|
if "model" in metafunc.fixturenames:
|
||||||
|
model = metafunc.config.getoption("model")
|
||||||
|
if model:
|
||||||
|
metafunc.parametrize("model", [model])
|
||||||
|
return
|
||||||
|
|
||||||
provider = metafunc.config.getoption("provider")
|
provider = metafunc.config.getoption("provider")
|
||||||
if not provider:
|
if not provider:
|
||||||
print("Warning: --provider not specified. Skipping model parametrization.")
|
print("Warning: --provider not specified. Skipping model parametrization.")
|
||||||
metafunc.parametrize("model", [])
|
metafunc.parametrize("model", [])
|
||||||
return
|
return
|
||||||
|
|
||||||
model = metafunc.config.getoption("model")
|
|
||||||
if model:
|
|
||||||
metafunc.parametrize("model", [model])
|
|
||||||
return
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
config_data = _load_all_verification_configs()
|
config_data = _load_all_verification_configs()
|
||||||
except (OSError, FileNotFoundError) as e:
|
except (OSError, FileNotFoundError) as e:
|
||||||
|
|
|
@ -7,6 +7,7 @@
|
||||||
import json
|
import json
|
||||||
|
|
||||||
import httpx
|
import httpx
|
||||||
|
import openai
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from llama_stack import LlamaStackAsLibraryClient
|
from llama_stack import LlamaStackAsLibraryClient
|
||||||
|
@ -306,7 +307,7 @@ def test_response_non_streaming_mcp_tool(request, openai_client, model, provider
|
||||||
exc_type = (
|
exc_type = (
|
||||||
AuthenticationRequiredError
|
AuthenticationRequiredError
|
||||||
if isinstance(openai_client, LlamaStackAsLibraryClient)
|
if isinstance(openai_client, LlamaStackAsLibraryClient)
|
||||||
else httpx.HTTPStatusError
|
else (httpx.HTTPStatusError, openai.AuthenticationError)
|
||||||
)
|
)
|
||||||
with pytest.raises(exc_type):
|
with pytest.raises(exc_type):
|
||||||
openai_client.responses.create(
|
openai_client.responses.create(
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue