mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-27 20:50:24 +00:00
chore: Enabling Milvus for VectorIO CI
Signed-off-by: Francisco Javier Arceo <farceo@redhat.com>
This commit is contained in:
parent
709eb7da33
commit
c8d41d45ec
115 changed files with 2919 additions and 184 deletions
|
|
@ -4,7 +4,7 @@
|
|||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
import pytest
|
||||
from openai import OpenAI
|
||||
from openai import BadRequestError, OpenAI
|
||||
|
||||
from llama_stack.distribution.library_client import LlamaStackAsLibraryClient
|
||||
|
||||
|
|
@ -92,6 +92,13 @@ def test_responses_store(openai_client, client_with_models, text_model_id, strea
|
|||
if output_type == "message":
|
||||
assert retrieved_response.output[0].content[0].text == content
|
||||
|
||||
# Delete the response
|
||||
delete_response = client.responses.delete(response_id)
|
||||
assert delete_response is None
|
||||
|
||||
with pytest.raises(BadRequestError):
|
||||
client.responses.retrieve(response_id)
|
||||
|
||||
|
||||
def test_list_response_input_items(openai_client, client_with_models, text_model_id):
|
||||
"""Test the new list_openai_response_input_items endpoint."""
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue