mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-29 15:23:51 +00:00
Undo ollama commenting lol
This commit is contained in:
parent
21b844c155
commit
2d7ce81302
1 changed files with 9 additions and 9 deletions
|
@ -30,21 +30,21 @@ OLLAMA_SUPPORTED_SKUS = {
|
||||||
class OllamaInferenceAdapter(Inference):
|
class OllamaInferenceAdapter(Inference):
|
||||||
def __init__(self, url: str) -> None:
|
def __init__(self, url: str) -> None:
|
||||||
self.url = url
|
self.url = url
|
||||||
# tokenizer = Tokenizer.get_instance()
|
tokenizer = Tokenizer.get_instance()
|
||||||
# self.formatter = ChatFormat(tokenizer)
|
self.formatter = ChatFormat(tokenizer)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def client(self) -> AsyncClient:
|
def client(self) -> AsyncClient:
|
||||||
return AsyncClient(host=self.url)
|
return AsyncClient(host=self.url)
|
||||||
|
|
||||||
async def initialize(self) -> None:
|
async def initialize(self) -> None:
|
||||||
print("Ollama init")
|
print("Initializing Ollama, checking connectivity to server...")
|
||||||
# try:
|
try:
|
||||||
# await self.client.ps()
|
await self.client.ps()
|
||||||
# except httpx.ConnectError as e:
|
except httpx.ConnectError as e:
|
||||||
# raise RuntimeError(
|
raise RuntimeError(
|
||||||
# "Ollama Server is not running, start it using `ollama serve` in a separate terminal"
|
"Ollama Server is not running, start it using `ollama serve` in a separate terminal"
|
||||||
# ) from e
|
) from e
|
||||||
|
|
||||||
async def shutdown(self) -> None:
|
async def shutdown(self) -> None:
|
||||||
pass
|
pass
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue