diff --git a/llama_stack/providers/utils/kvstore/config.py b/llama_stack/providers/utils/kvstore/config.py index 0a6fae58b..b6132e03d 100644 --- a/llama_stack/providers/utils/kvstore/config.py +++ b/llama_stack/providers/utils/kvstore/config.py @@ -47,7 +47,7 @@ class RedisKVStoreConfig(CommonConfig): class SqliteKVStoreConfig(CommonConfig): - type: Literal[KVStoreType.sqlite] = KVStoreType.sqlite.value + type: Literal[KVStoreType.sqlite.value] = KVStoreType.sqlite.value db_path: str = Field( default=(RUNTIME_BASE_DIR / "kvstore.db").as_posix(), description="File path for the sqlite database", @@ -63,7 +63,7 @@ class SqliteKVStoreConfig(CommonConfig): class PostgresKVStoreConfig(CommonConfig): - type: Literal[KVStoreType.postgres] = KVStoreType.postgres.value + type: Literal[KVStoreType.postgres.value] = KVStoreType.postgres.value host: str = "localhost" port: int = 5432 db: str = "llamastack" @@ -102,7 +102,7 @@ class PostgresKVStoreConfig(CommonConfig): class MongoDBKVStoreConfig(CommonConfig): - type: Literal[KVStoreType.mongodb] = KVStoreType.mongodb.value + type: Literal[KVStoreType.mongodb.value] = KVStoreType.mongodb.value host: str = "localhost" port: int = 27017 db: str = "llamastack" diff --git a/llama_stack/templates/ollama/ollama.py b/llama_stack/templates/ollama/ollama.py index 07c9e5013..dde72f1b2 100644 --- a/llama_stack/templates/ollama/ollama.py +++ b/llama_stack/templates/ollama/ollama.py @@ -28,7 +28,7 @@ def get_distribution_template() -> DistributionTemplate: "telemetry": ["inline::meta-reference"], "eval": ["inline::meta-reference"], "datasetio": ["inline::localfs"], - "scoring": ["inline::basic", "inline::llm-as-judge"], + "scoring": ["inline::llm-as-judge"], "tool_runtime": [ "remote::brave-search", "remote::tavily-search", @@ -42,11 +42,6 @@ def get_distribution_template() -> DistributionTemplate: provider_type="remote::ollama", config=OllamaImplConfig.sample_run_config(), ) - #vector_io_provider_faiss = Provider( - # provider_id="faiss", - # provider_type="inline::faiss", - # config=FaissVectorIOConfig.sample_run_config(f"~/.llama/distributions/{name}"), - #) inference_model = ModelInput( model_id="${env.INFERENCE_MODEL}", provider_id="ollama", @@ -86,7 +81,6 @@ def get_distribution_template() -> DistributionTemplate: "run.yaml": RunConfigSettings( provider_overrides={ "inference": [inference_provider], - "vector_io": [vector_io_provider_faiss], }, default_models=[inference_model, embedding_model], default_tool_groups=default_tool_groups, @@ -94,7 +88,6 @@ def get_distribution_template() -> DistributionTemplate: "run-with-safety.yaml": RunConfigSettings( provider_overrides={ "inference": [inference_provider], - "vector_io": [vector_io_provider_faiss] "safety": [ Provider( provider_id="llama-guard",