mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-09 05:08:37 +00:00
instantiate inference models
This commit is contained in:
parent
d2ec822b12
commit
7071c46422
6 changed files with 40 additions and 20 deletions
|
@ -9,9 +9,9 @@ from typing import Any, List, Tuple
|
|||
from llama_stack.distribution.datatypes import Api
|
||||
|
||||
|
||||
async def get_router_impl(inner_impls: List[Tuple[str, Any]], deps: List[Api]):
|
||||
async def get_router_impl(models_api: Api):
|
||||
from .inference import InferenceRouterImpl
|
||||
|
||||
impl = InferenceRouterImpl(inner_impls, deps)
|
||||
impl = InferenceRouterImpl(models_api)
|
||||
await impl.initialize()
|
||||
return impl
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue