mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-28 02:53:30 +00:00
Added Ollama as an inference impl (#20)
* fix non-streaming api in inference server * unit test for inline inference * Added non-streaming ollama inference impl * add streaming support for ollama inference with tests * addressing comments --------- Co-authored-by: Hardik Shah <hjshah@fb.com>
This commit is contained in:
parent
c253c1c9ad
commit
156bfa0e15
9 changed files with 921 additions and 33 deletions
|
@ -12,6 +12,10 @@ async def get_inference_api_instance(config: InferenceConfig):
|
|||
from .inference import InferenceImpl
|
||||
|
||||
return InferenceImpl(config.impl_config)
|
||||
elif config.impl_config.impl_type == ImplType.ollama.value:
|
||||
from .ollama import OllamaInference
|
||||
|
||||
return OllamaInference(config.impl_config)
|
||||
|
||||
from .client import InferenceClient
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue