mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-03 09:21:45 +00:00
add info to upgrade run.yaml when the default embedding model is not found
This commit is contained in:
parent
ee5066806e
commit
d73575dd9d
1 changed files with 8 additions and 1 deletions
|
@ -308,7 +308,14 @@ class MemoryBanksRoutingTable(CommonRoutingTableImpl, MemoryBanks):
|
||||||
)
|
)
|
||||||
model = await self.get_object_by_identifier("model", params.embedding_model)
|
model = await self.get_object_by_identifier("model", params.embedding_model)
|
||||||
if model is None:
|
if model is None:
|
||||||
raise ValueError(f"Model {params.embedding_model} not found")
|
if params.embedding_model == "all-MiniLM-L6-v2":
|
||||||
|
raise ValueError(
|
||||||
|
"Embeddings are now served via Inference providers. "
|
||||||
|
"Please upgrade your run.yaml to include inline::sentence-transformer as an additional inference provider. "
|
||||||
|
"See https://github.com/meta-llama/llama-stack/blob/main/llama_stack/templates/together/run.yaml for an example."
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Model {params.embedding_model} not found")
|
||||||
if model.model_type != ModelType.embedding:
|
if model.model_type != ModelType.embedding:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
f"Model {params.embedding_model} is not an embedding model"
|
f"Model {params.embedding_model} is not an embedding model"
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue