mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-24 18:24:20 +00:00
fix(proxy_server.py): run ollama serve when ollama in config.yaml
This commit is contained in:
parent
c4f9ef86c9
commit
846a824c47
2 changed files with 10 additions and 0 deletions
|
@ -115,6 +115,7 @@ class APIConnectionError(APIConnectionError): # type: ignore
|
|||
self.message = message
|
||||
self.llm_provider = llm_provider
|
||||
self.model = model
|
||||
self.status_code = 500
|
||||
super().__init__(
|
||||
message=self.message,
|
||||
request=request
|
||||
|
|
|
@ -199,6 +199,11 @@ def prisma_setup(database_url: Optional[str]):
|
|||
from prisma import Client
|
||||
prisma_client = Client()
|
||||
|
||||
def run_ollama_serve():
|
||||
command = ['ollama', 'serve']
|
||||
|
||||
with open(os.devnull, 'w') as devnull:
|
||||
process = subprocess.Popen(command, stdout=devnull, stderr=devnull)
|
||||
|
||||
def load_router_config(router: Optional[litellm.Router], config_file_path: str):
|
||||
global master_key
|
||||
|
@ -243,6 +248,10 @@ def load_router_config(router: Optional[litellm.Router], config_file_path: str):
|
|||
print(f"\033[32mLiteLLM: Proxy initialized with Config, Set models:\033[0m")
|
||||
for model in model_list:
|
||||
print(f"\033[32m {model.get('model_name', '')}\033[0m")
|
||||
litellm_model_name = model["litellm_params"]["model"]
|
||||
print(f"litellm_model_name: {litellm_model_name}")
|
||||
if "ollama" in litellm_model_name:
|
||||
run_ollama_serve()
|
||||
|
||||
return router, model_list, server_settings
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue