mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-28 02:53:30 +00:00
Some checks failed
Integration Auth Tests / test-matrix (oauth2_token) (push) Failing after 2s
Integration Tests / test-matrix (http, post_training) (push) Failing after 9s
Integration Tests / test-matrix (http, agents) (push) Failing after 10s
Integration Tests / test-matrix (http, providers) (push) Failing after 8s
Integration Tests / test-matrix (http, inference) (push) Failing after 11s
Integration Tests / test-matrix (http, inspect) (push) Failing after 10s
Integration Tests / test-matrix (http, datasets) (push) Failing after 11s
Integration Tests / test-matrix (library, datasets) (push) Failing after 8s
Integration Tests / test-matrix (http, scoring) (push) Failing after 10s
Integration Tests / test-matrix (library, inference) (push) Failing after 8s
Integration Tests / test-matrix (library, agents) (push) Failing after 10s
Integration Tests / test-matrix (http, tool_runtime) (push) Failing after 11s
Integration Tests / test-matrix (library, inspect) (push) Failing after 8s
Test External Providers / test-external-providers (venv) (push) Failing after 7s
Integration Tests / test-matrix (library, post_training) (push) Failing after 9s
Integration Tests / test-matrix (library, scoring) (push) Failing after 8s
Integration Tests / test-matrix (library, tool_runtime) (push) Failing after 8s
Integration Tests / test-matrix (library, providers) (push) Failing after 9s
Unit Tests / unit-tests (3.11) (push) Failing after 7s
Unit Tests / unit-tests (3.10) (push) Failing after 7s
Unit Tests / unit-tests (3.12) (push) Failing after 8s
Unit Tests / unit-tests (3.13) (push) Failing after 8s
Update ReadTheDocs / update-readthedocs (push) Failing after 6s
Pre-commit / pre-commit (push) Successful in 53s
# What does this PR do? TSIA Added Files provider to the fireworks template. Might want to add to all templates as a follow-up. ## Test Plan llama-stack pytest tests/unit/files/test_files.py llama-stack llama stack build --template fireworks --image-type conda --run LLAMA_STACK_CONFIG=http://localhost:8321 pytest -s -v tests/integration/files/
51 lines
1.7 KiB
Python
51 lines
1.7 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
from io import BytesIO
|
|
|
|
|
|
def test_openai_client_basic_operations(openai_client):
|
|
"""Test basic file operations through OpenAI client."""
|
|
client = openai_client
|
|
|
|
test_content = b"files test content"
|
|
|
|
try:
|
|
# Upload file using OpenAI client
|
|
with BytesIO(test_content) as file_buffer:
|
|
file_buffer.name = "openai_test.txt"
|
|
uploaded_file = client.files.create(file=file_buffer, purpose="assistants")
|
|
|
|
# Verify basic response structure
|
|
assert uploaded_file.id.startswith("file-")
|
|
assert hasattr(uploaded_file, "filename")
|
|
|
|
# List files
|
|
files_list = client.files.list()
|
|
file_ids = [f.id for f in files_list.data]
|
|
assert uploaded_file.id in file_ids
|
|
|
|
# Retrieve file info
|
|
retrieved_file = client.files.retrieve(uploaded_file.id)
|
|
assert retrieved_file.id == uploaded_file.id
|
|
|
|
# Retrieve file content - OpenAI client returns httpx Response object
|
|
content_response = client.files.content(uploaded_file.id)
|
|
# The response is an httpx Response object with .content attribute containing bytes
|
|
content = content_response.content
|
|
assert content == test_content
|
|
|
|
# Delete file
|
|
delete_response = client.files.delete(uploaded_file.id)
|
|
assert delete_response.deleted is True
|
|
|
|
except Exception as e:
|
|
# Cleanup in case of failure
|
|
try:
|
|
client.files.delete(uploaded_file.id)
|
|
except Exception:
|
|
pass
|
|
raise e
|