mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-08 13:00:52 +00:00
persist file batches and clean up after 7 days
This commit is contained in:
parent
943255697e
commit
9d2d8ab61c
3 changed files with 459 additions and 49 deletions
|
@ -18,6 +18,13 @@ from llama_stack.log import get_logger
|
|||
logger = get_logger(name=__name__, category="vector_io")
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def rate_limit_between_tests():
|
||||
"""Add 10 second delay between integration tests to prevent rate limiting."""
|
||||
yield # Run the test first
|
||||
time.sleep(10) # Delay after each test
|
||||
|
||||
|
||||
def skip_if_provider_doesnt_support_openai_vector_stores(client_with_models):
|
||||
vector_io_providers = [p for p in client_with_models.providers.list() if p.api == "vector_io"]
|
||||
for p in vector_io_providers:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue