mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-04 20:14:13 +00:00
Several smaller fixes to make adapters work
Also, reorganized the pattern of __init__ inside providers so configuration can stay lightweight
This commit is contained in:
parent
2a1552a5eb
commit
45987996c4
23 changed files with 164 additions and 160 deletions
|
@ -6,7 +6,7 @@
|
|||
|
||||
import asyncio
|
||||
import json
|
||||
from typing import AsyncGenerator
|
||||
from typing import Any, AsyncGenerator
|
||||
|
||||
import fire
|
||||
import httpx
|
||||
|
@ -26,7 +26,7 @@ from .api import (
|
|||
from .event_logger import EventLogger
|
||||
|
||||
|
||||
async def get_provider_impl(config: RemoteProviderConfig) -> Inference:
|
||||
async def get_client_impl(config: RemoteProviderConfig, _deps: Any) -> Inference:
|
||||
return InferenceClient(config.url)
|
||||
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue