mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-04 12:07:34 +00:00
Several smaller fixes to make adapters work
Also, reorganized the pattern of __init__ inside providers so configuration can stay lightweight
This commit is contained in:
parent
2a1552a5eb
commit
45987996c4
23 changed files with 164 additions and 160 deletions
|
@ -6,11 +6,12 @@
|
|||
|
||||
import asyncio
|
||||
|
||||
from typing import Any
|
||||
|
||||
import fire
|
||||
import httpx
|
||||
|
||||
from llama_models.llama3.api.datatypes import UserMessage
|
||||
|
||||
from pydantic import BaseModel
|
||||
from termcolor import cprint
|
||||
|
||||
|
@ -19,7 +20,7 @@ from llama_toolchain.distribution.datatypes import RemoteProviderConfig
|
|||
from .api import * # noqa: F403
|
||||
|
||||
|
||||
async def get_provider_impl(config: RemoteProviderConfig) -> Safety:
|
||||
async def get_client_impl(config: RemoteProviderConfig, _deps: Any) -> Safety:
|
||||
return SafetyClient(config.url)
|
||||
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue