mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-23 00:52:26 +00:00
chore: Enabling teste for Weaviate and some minor changes
Signed-off-by: Francisco Javier Arceo <farceo@redhat.com>
This commit is contained in:
parent
266e2afb9c
commit
01d90eb8ab
17 changed files with 216 additions and 103 deletions
|
|
@ -11,3 +11,11 @@ class UnsupportedModelError(ValueError):
|
|||
def __init__(self, model_name: str, supported_models_list: list[str]):
|
||||
message = f"'{model_name}' model is not supported. Supported models are: {', '.join(supported_models_list)}"
|
||||
super().__init__(message)
|
||||
|
||||
|
||||
class ModelNotFoundError(ValueError):
|
||||
"""raised when Llama Stack cannot find a referenced model"""
|
||||
|
||||
def __init__(self, model_name: str) -> None:
|
||||
message = f"Model '{model_name}' not found. Use client.models.list() to list available models."
|
||||
super().__init__(message)
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ from pydantic import BaseModel, Field
|
|||
from llama_stack.apis.inference import InterleavedContent
|
||||
from llama_stack.apis.vector_dbs import VectorDB
|
||||
from llama_stack.providers.utils.telemetry.trace_protocol import trace_protocol
|
||||
from llama_stack.providers.utils.vector_io.chunk_utils import generate_chunk_id
|
||||
from llama_stack.providers.utils.vector_io.vector_utils import generate_chunk_id
|
||||
from llama_stack.schema_utils import json_schema_type, webmethod
|
||||
from llama_stack.strong_typing.schema import register_schema
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue