mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-29 07:14:20 +00:00
minor fixes
Signed-off-by: Francisco Javier Arceo <farceo@redhat.com>
This commit is contained in:
parent
d432445011
commit
c85397589f
2 changed files with 1 additions and 1 deletions
|
@ -42,7 +42,6 @@ VECTOR_INDEX_PREFIX = f"vector_index:milvus:{VERSION}::"
|
|||
OPENAI_VECTOR_STORES_PREFIX = f"openai_vector_stores:milvus:{VERSION}::"
|
||||
OPENAI_VECTOR_STORES_FILES_PREFIX = f"openai_vector_stores_files:milvus:{VERSION}::"
|
||||
OPENAI_VECTOR_STORES_FILES_CONTENTS_PREFIX = f"openai_vector_stores_files_contents:milvus:{VERSION}::"
|
||||
from llama_stack.providers.utils.vector_io.chunk_utils import generate_chunk_id
|
||||
|
||||
|
||||
def sanitize_collection_name(name: str) -> str:
|
||||
|
|
|
@ -296,6 +296,7 @@ def test_response_non_streaming_file_search(
|
|||
if isinstance(openai_client, LlamaStackAsLibraryClient):
|
||||
pytest.skip("Responses API file search is not yet supported in library client.")
|
||||
|
||||
print(f"list models = {openai_client.models.list()}")
|
||||
test_name_base = get_base_test_name(request)
|
||||
if should_skip_test(verification_config, provider, model, test_name_base):
|
||||
pytest.skip(f"Skipping {test_name_base} for model {model} on provider {provider} based on config.")
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue