mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-15 14:08:00 +00:00
fix
This commit is contained in:
parent
c3bd927c01
commit
713eae9b91
2 changed files with 14 additions and 6 deletions
3
.github/workflows/integration-tests.yml
vendored
3
.github/workflows/integration-tests.yml
vendored
|
@ -52,7 +52,8 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
# Get test directories dynamically, excluding non-test directories
|
# Get test directories dynamically, excluding non-test directories
|
||||||
# NOTE: we are excluding post_training since the tests take too long
|
# NOTE: we are excluding post_training since the tests take too long
|
||||||
TEST_TYPES=$(find tests/integration -maxdepth 1 -mindepth 1 -type d -printf "%f\n" |
|
TEST_TYPES=$(find tests/integration -maxdepth 1 -mindepth 1 -type d |
|
||||||
|
sed 's|tests/integration/||' |
|
||||||
grep -Ev "^(__pycache__|fixtures|test_cases|recordings|non_ci|post_training)$" |
|
grep -Ev "^(__pycache__|fixtures|test_cases|recordings|non_ci|post_training)$" |
|
||||||
sort | jq -R -s -c 'split("\n")[:-1]')
|
sort | jq -R -s -c 'split("\n")[:-1]')
|
||||||
echo "test-types=$TEST_TYPES" >> $GITHUB_OUTPUT
|
echo "test-types=$TEST_TYPES" >> $GITHUB_OUTPUT
|
||||||
|
|
|
@ -9,10 +9,11 @@ import time
|
||||||
from io import BytesIO
|
from io import BytesIO
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from llama_stack_client import BadRequestError, LlamaStackClient
|
from llama_stack_client import BadRequestError
|
||||||
from openai import BadRequestError as OpenAIBadRequestError
|
from openai import BadRequestError as OpenAIBadRequestError
|
||||||
|
|
||||||
from llama_stack.apis.vector_io import Chunk
|
from llama_stack.apis.vector_io import Chunk
|
||||||
|
from llama_stack.core.library_client import LlamaStackAsLibraryClient
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -638,7 +639,7 @@ def test_openai_vector_store_list_files_invalid_vector_store(compat_client_with_
|
||||||
skip_if_provider_doesnt_support_openai_vector_stores(client_with_models)
|
skip_if_provider_doesnt_support_openai_vector_stores(client_with_models)
|
||||||
|
|
||||||
compat_client = compat_client_with_empty_stores
|
compat_client = compat_client_with_empty_stores
|
||||||
if isinstance(compat_client, LlamaStackClient):
|
if isinstance(compat_client, LlamaStackAsLibraryClient):
|
||||||
errors = ValueError
|
errors = ValueError
|
||||||
else:
|
else:
|
||||||
errors = (BadRequestError, OpenAIBadRequestError)
|
errors = (BadRequestError, OpenAIBadRequestError)
|
||||||
|
@ -678,9 +679,15 @@ def test_openai_vector_store_retrieve_file_contents(compat_client_with_empty_sto
|
||||||
file_id=file.id,
|
file_id=file.id,
|
||||||
)
|
)
|
||||||
|
|
||||||
assert file_contents
|
assert file_contents is not None
|
||||||
assert file_contents.content[0].type == "text"
|
assert len(file_contents.content) == 1
|
||||||
assert file_contents.content[0].text == test_content.decode("utf-8")
|
content = file_contents.content[0]
|
||||||
|
|
||||||
|
# llama-stack-client returns a model, openai-python is a badboy and returns a dict
|
||||||
|
if not isinstance(content, dict):
|
||||||
|
content = content.model_dump()
|
||||||
|
assert content["type"] == "text"
|
||||||
|
assert content["text"] == test_content.decode("utf-8")
|
||||||
assert file_contents.filename == file_name
|
assert file_contents.filename == file_name
|
||||||
assert file_contents.attributes == attributes
|
assert file_contents.attributes == attributes
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue