mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-04 04:04:14 +00:00
bunch more work to make adapters work
This commit is contained in:
parent
68f3db62e9
commit
c4fe72c3a3
20 changed files with 461 additions and 173 deletions
|
@ -6,24 +6,23 @@
|
|||
|
||||
import asyncio
|
||||
|
||||
# import json
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
import fire
|
||||
import httpx
|
||||
|
||||
# from termcolor import cprint
|
||||
from llama_toolchain.distribution.datatypes import RemoteProviderConfig
|
||||
|
||||
from .api import * # noqa: F403
|
||||
|
||||
|
||||
async def get_client_impl(base_url: str):
|
||||
return MemoryClient(base_url)
|
||||
async def get_adapter_impl(config: RemoteProviderConfig) -> Memory:
|
||||
return MemoryClient(config.url)
|
||||
|
||||
|
||||
class MemoryClient(Memory):
|
||||
def __init__(self, base_url: str):
|
||||
print(f"Initializing client for {base_url}")
|
||||
print(f"Memory passthrough to -> {base_url}")
|
||||
self.base_url = base_url
|
||||
|
||||
async def initialize(self) -> None:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue