mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-04 12:07:34 +00:00
Several smaller fixes to make adapters work
Also, reorganized the pattern of __init__ inside providers so configuration can stay lightweight
This commit is contained in:
parent
2a1552a5eb
commit
45987996c4
23 changed files with 164 additions and 160 deletions
|
@ -6,7 +6,7 @@
|
|||
|
||||
import asyncio
|
||||
|
||||
from typing import Dict, List, Optional
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
import fire
|
||||
import httpx
|
||||
|
@ -16,7 +16,7 @@ from llama_toolchain.distribution.datatypes import RemoteProviderConfig
|
|||
from .api import * # noqa: F403
|
||||
|
||||
|
||||
async def get_provider_impl(config: RemoteProviderConfig) -> Memory:
|
||||
async def get_client_impl(config: RemoteProviderConfig, _deps: Any) -> Memory:
|
||||
return MemoryClient(config.url)
|
||||
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue