mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-12 12:06:04 +00:00
update resolver to only pass vector_stores section of run config
Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> Using Router only from VectorDBs Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> removing model_api from vector store providers Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> fix test Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> updating integration tests Signed-off-by: Francisco Javier Arceo <farceo@redhat.com> special handling for replay mode for available providers Signed-off-by: Francisco Javier Arceo <farceo@redhat.com>
This commit is contained in:
parent
24a1430c8b
commit
accc4c437e
46 changed files with 397 additions and 702 deletions
|
|
@ -241,7 +241,7 @@ def instantiate_llama_stack_client(session):
|
|||
# --stack-config bypasses template so need this to set default embedding model
|
||||
if "vector_io" in config and "inference" in config:
|
||||
run_config.vector_stores = VectorStoresConfig(
|
||||
default_embedding_model_id="inline::sentence-transformers/nomic-ai/nomic-embed-text-v1.5"
|
||||
default_embedding_model_id="sentence-transformers/nomic-ai/nomic-embed-text-v1.5"
|
||||
)
|
||||
|
||||
run_config_file = tempfile.NamedTemporaryFile(delete=False, suffix=".yaml")
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue