forked from phoenix/litellm-mirror
Fail gracefully if ollama is already being served
This commit is contained in:
parent
824136667f
commit
01fad94485
1 changed files with 9 additions and 4 deletions
|
@ -227,10 +227,15 @@ def celery_setup(use_queue: bool):
|
||||||
celery_app_conn = celery_app
|
celery_app_conn = celery_app
|
||||||
|
|
||||||
def run_ollama_serve():
|
def run_ollama_serve():
|
||||||
command = ['ollama', 'serve']
|
try:
|
||||||
|
command = ['ollama', 'serve']
|
||||||
|
|
||||||
with open(os.devnull, 'w') as devnull:
|
with open(os.devnull, 'w') as devnull:
|
||||||
process = subprocess.Popen(command, stdout=devnull, stderr=devnull)
|
process = subprocess.Popen(command, stdout=devnull, stderr=devnull)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"""
|
||||||
|
LiteLLM Warning: proxy started with `ollama` model\n`ollama serve` failed with Exception{e}. \nEnsure you run `ollama serve`
|
||||||
|
""")
|
||||||
|
|
||||||
def load_router_config(router: Optional[litellm.Router], config_file_path: str):
|
def load_router_config(router: Optional[litellm.Router], config_file_path: str):
|
||||||
global master_key
|
global master_key
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue