mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-29 10:02:20 +00:00
Stub in an initial OpenAI Responses API
Signed-off-by: Ben Browning <bbrownin@redhat.com>
This commit is contained in:
parent
c149cf2e0f
commit
70c088af3a
18 changed files with 441 additions and 0 deletions
5
tests/integration/openai_responses/__init__.py
Normal file
5
tests/integration/openai_responses/__init__.py
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
90
tests/integration/openai_responses/test_openai_responses.py
Normal file
90
tests/integration/openai_responses/test_openai_responses.py
Normal file
|
|
@ -0,0 +1,90 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
|
||||
import pytest
|
||||
from openai import OpenAI
|
||||
|
||||
from ..test_cases.test_case import TestCase
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def openai_client(client_with_models):
|
||||
base_url = f"{client_with_models.base_url}/v1/openai/v1"
|
||||
return OpenAI(base_url=base_url, api_key="bar")
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"test_case",
|
||||
[
|
||||
"openai:responses:non_streaming_01",
|
||||
"openai:responses:non_streaming_02",
|
||||
],
|
||||
)
|
||||
def test_openai_responses_non_streaming(openai_client, client_with_models, text_model_id, test_case):
|
||||
tc = TestCase(test_case)
|
||||
question = tc["question"]
|
||||
expected = tc["expected"]
|
||||
|
||||
response = openai_client.responses.create(
|
||||
model=text_model_id,
|
||||
input=question,
|
||||
stream=False,
|
||||
)
|
||||
output_text = response.output_text.lower().strip()
|
||||
assert len(output_text) > 0
|
||||
assert expected.lower() in output_text
|
||||
|
||||
retrieved_response = openai_client.responses.retrieve(response_id=response.id)
|
||||
assert retrieved_response.output_text == response.output_text
|
||||
|
||||
next_response = openai_client.responses.create(
|
||||
model=text_model_id, input="Repeat your previous response in all caps.", previous_response_id=response.id
|
||||
)
|
||||
next_output_text = next_response.output_text.strip()
|
||||
assert expected.upper() in next_output_text
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"test_case",
|
||||
[
|
||||
"openai:responses:streaming_01",
|
||||
"openai:responses:streaming_02",
|
||||
],
|
||||
)
|
||||
def test_openai_responses_streaming(openai_client, client_with_models, text_model_id, test_case):
|
||||
tc = TestCase(test_case)
|
||||
question = tc["question"]
|
||||
expected = tc["expected"]
|
||||
|
||||
response = openai_client.responses.create(
|
||||
model=text_model_id,
|
||||
input=question,
|
||||
stream=True,
|
||||
timeout=120, # Increase timeout to 2 minutes for large conversation history
|
||||
)
|
||||
streamed_content = []
|
||||
response_id = ""
|
||||
for chunk in response:
|
||||
response_id = chunk.response.id
|
||||
streamed_content.append(chunk.response.output_text.strip())
|
||||
|
||||
assert len(streamed_content) > 0
|
||||
assert expected.lower() in "".join(streamed_content).lower()
|
||||
|
||||
retrieved_response = openai_client.responses.retrieve(response_id=response_id)
|
||||
assert retrieved_response.output_text == "".join(streamed_content)
|
||||
|
||||
next_response = openai_client.responses.create(
|
||||
model=text_model_id,
|
||||
input="Repeat your previous response in all caps.",
|
||||
previous_response_id=response_id,
|
||||
stream=True,
|
||||
)
|
||||
next_streamed_content = []
|
||||
for chunk in next_response:
|
||||
next_streamed_content.append(chunk.response.output_text.strip())
|
||||
assert expected.upper() in "".join(next_streamed_content)
|
||||
26
tests/integration/test_cases/openai/responses.json
Normal file
26
tests/integration/test_cases/openai/responses.json
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
{
|
||||
"non_streaming_01": {
|
||||
"data": {
|
||||
"question": "Which planet do humans live on?",
|
||||
"expected": "Earth"
|
||||
}
|
||||
},
|
||||
"non_streaming_02": {
|
||||
"data": {
|
||||
"question": "Which planet has rings around it with a name starting with letter S?",
|
||||
"expected": "Saturn"
|
||||
}
|
||||
},
|
||||
"streaming_01": {
|
||||
"data": {
|
||||
"question": "What's the name of the Sun in latin?",
|
||||
"expected": "Sol"
|
||||
}
|
||||
},
|
||||
"streaming_02": {
|
||||
"data": {
|
||||
"question": "What is the name of the US captial?",
|
||||
"expected": "Washington"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -12,6 +12,7 @@ class TestCase:
|
|||
_apis = [
|
||||
"inference/chat_completion",
|
||||
"inference/completion",
|
||||
"openai/responses",
|
||||
]
|
||||
_jsonblob = {}
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue