Added Ollama as an inference impl (#20)

* fix non-streaming api in inference server

* unit test for inline inference

* Added non-streaming ollama inference impl

* add streaming support for ollama inference with tests

* addressing comments

---------

Co-authored-by: Hardik Shah <hjshah@fb.com>
This commit is contained in:
Hardik Shah 2024-07-31 22:08:37 -07:00 committed by GitHub
parent c253c1c9ad
commit 156bfa0e15
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
9 changed files with 921 additions and 33 deletions

View file

@ -23,6 +23,7 @@ from .datatypes import QuantizationConfig
class ImplType(Enum):
inline = "inline"
remote = "remote"
ollama = "ollama"
@json_schema_type
@ -80,10 +81,17 @@ class RemoteImplConfig(BaseModel):
url: str = Field(..., description="The URL of the remote module")
@json_schema_type
class OllamaImplConfig(BaseModel):
impl_type: Literal[ImplType.ollama.value] = ImplType.ollama.value
model: str = Field(..., description="The name of the model in ollama catalog")
url: str = Field(..., description="The URL for the ollama server")
@json_schema_type
class InferenceConfig(BaseModel):
impl_config: Annotated[
Union[InlineImplConfig, RemoteImplConfig],
Union[InlineImplConfig, RemoteImplConfig, OllamaImplConfig],
Field(discriminator="impl_type"),
]