From 885ae139b87b7f3e29ea72328a3692619b8c7769 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Sat, 13 Apr 2024 13:44:17 -0700 Subject: [PATCH] fix - testing lagfuse service --- litellm/proxy/proxy_server.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 8a6dbe034..3eaa99760 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -8326,12 +8326,16 @@ async def health_services_endpoint( ) if service == "langfuse": # run mock completion request - return litellm.completion( + _ = litellm.completion( model="openai/litellm-mock-response-model", messages=[{"role": "user", "content": "Hey, how's it going?"}], user="litellm:/health/services", mock_response="This is a mock response", ) + return { + "status": "success", + "message": "Mock LLM request made - check langfuse.", + } if service not in ["slack_budget_alerts", "langfuse"]: raise HTTPException(