forked from phoenix-oss/llama-stack-mirror
fix prompt guard (#177)
Several other fixes to configure. Add support for 1b/3b models in ollama.
This commit is contained in:
parent
b9b1e8b08b
commit
210b71b0ba
11 changed files with 50 additions and 45 deletions
|
@ -7,6 +7,10 @@
|
|||
from llama_stack.distribution.datatypes import RemoteProviderConfig
|
||||
|
||||
|
||||
class OllamaImplConfig(RemoteProviderConfig):
|
||||
port: int = 11434
|
||||
|
||||
|
||||
async def get_adapter_impl(config: RemoteProviderConfig, _deps):
|
||||
from .ollama import OllamaInferenceAdapter
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue