mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-15 11:32:42 +00:00
feat: Enable setting a default embedding model in the stack
Signed-off-by: Francisco Javier Arceo <farceo@redhat.com>
This commit is contained in:
parent
007efa6eb5
commit
86c1e3b217
27 changed files with 435 additions and 403 deletions
|
|
@ -159,6 +159,12 @@ def test_openai_create_vector_store(
|
|||
assert hasattr(vector_store, "created_at")
|
||||
|
||||
|
||||
def test_openai_create_vector_store_default(compat_client_with_empty_stores, client_with_models):
|
||||
skip_if_provider_doesnt_support_openai_vector_stores(client_with_models)
|
||||
vector_store = compat_client_with_empty_stores.vector_stores.create()
|
||||
assert vector_store.id
|
||||
|
||||
|
||||
def test_openai_list_vector_stores(
|
||||
compat_client_with_empty_stores, client_with_models, embedding_model_id, embedding_dimension
|
||||
):
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue