mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-06 10:42:39 +00:00
delete
This commit is contained in:
parent
8a576d7d72
commit
b3060ce8a1
4 changed files with 7 additions and 13 deletions
|
@ -14,7 +14,6 @@ from .routing_tables import (
|
|||
BenchmarksRoutingTable,
|
||||
DatasetsRoutingTable,
|
||||
ModelsRoutingTable,
|
||||
ScoringFunctionsRoutingTable,
|
||||
ShieldsRoutingTable,
|
||||
ToolGroupsRoutingTable,
|
||||
VectorDBsRoutingTable,
|
||||
|
@ -47,10 +46,8 @@ async def get_routing_table_impl(
|
|||
async def get_auto_router_impl(api: Api, routing_table: RoutingTable, deps: Dict[str, Any]) -> Any:
|
||||
from .routers import (
|
||||
DatasetIORouter,
|
||||
EvalRouter,
|
||||
InferenceRouter,
|
||||
SafetyRouter,
|
||||
ScoringRouter,
|
||||
ToolRuntimeRouter,
|
||||
VectorIORouter,
|
||||
)
|
||||
|
@ -60,8 +57,6 @@ async def get_auto_router_impl(api: Api, routing_table: RoutingTable, deps: Dict
|
|||
"inference": InferenceRouter,
|
||||
"safety": SafetyRouter,
|
||||
"datasetio": DatasetIORouter,
|
||||
"scoring": ScoringRouter,
|
||||
"eval": EvalRouter,
|
||||
"tool_runtime": ToolRuntimeRouter,
|
||||
}
|
||||
api_to_deps = {
|
||||
|
|
|
@ -20,7 +20,6 @@ async def get_provider_impl(
|
|||
config,
|
||||
deps[Api.datasetio],
|
||||
deps[Api.datasets],
|
||||
deps[Api.scoring],
|
||||
deps[Api.inference],
|
||||
deps[Api.agents],
|
||||
)
|
||||
|
|
|
@ -5,14 +5,12 @@
|
|||
# the root directory of this source tree.
|
||||
|
||||
from enum import Enum
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from llama_stack.apis.common.type_system import (
|
||||
ChatCompletionInputType,
|
||||
CompletionInputType,
|
||||
StringType,
|
||||
)
|
||||
from llama_stack.distribution.datatypes import Api
|
||||
|
||||
|
||||
class ColumnName(Enum):
|
||||
|
|
|
@ -30,12 +30,14 @@ from llama_stack.providers.remote.vector_io.pgvector.config import (
|
|||
from llama_stack.providers.utils.inference.model_registry import ProviderModelEntry
|
||||
from llama_stack.templates.template import (
|
||||
DistributionTemplate,
|
||||
RunConfigSettings,
|
||||
get_model_registry,
|
||||
RunConfigSettings,
|
||||
)
|
||||
|
||||
|
||||
def get_inference_providers() -> Tuple[List[Provider], Dict[str, List[ProviderModelEntry]]]:
|
||||
def get_inference_providers() -> (
|
||||
Tuple[List[Provider], Dict[str, List[ProviderModelEntry]]]
|
||||
):
|
||||
# in this template, we allow each API key to be optional
|
||||
providers = [
|
||||
(
|
||||
|
@ -116,7 +118,9 @@ def get_distribution_template() -> DistributionTemplate:
|
|||
Provider(
|
||||
provider_id="sqlite-vec",
|
||||
provider_type="inline::sqlite-vec",
|
||||
config=SQLiteVectorIOConfig.sample_run_config(f"~/.llama/distributions/{name}"),
|
||||
config=SQLiteVectorIOConfig.sample_run_config(
|
||||
f"~/.llama/distributions/{name}"
|
||||
),
|
||||
),
|
||||
Provider(
|
||||
provider_id="${env.ENABLE_CHROMADB+chromadb}",
|
||||
|
@ -208,7 +212,6 @@ def get_distribution_template() -> DistributionTemplate:
|
|||
]
|
||||
|
||||
# TODO(xiyan): fix this back as registerable resources
|
||||
default_benchmarks = []
|
||||
# default_benchmarks = [
|
||||
# BenchmarkInput(
|
||||
# benchmark_id="meta-reference-simpleqa",
|
||||
|
@ -255,7 +258,6 @@ def get_distribution_template() -> DistributionTemplate:
|
|||
default_tool_groups=default_tool_groups,
|
||||
default_shields=[ShieldInput(shield_id="meta-llama/Llama-Guard-3-8B")],
|
||||
default_datasets=default_datasets,
|
||||
default_benchmarks=default_benchmarks,
|
||||
),
|
||||
},
|
||||
run_config_env_vars={
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue