fix - testing lagfuse service

This commit is contained in:
Ishaan Jaff 2024-04-13 13:44:17 -07:00
parent ffb1393b94
commit 885ae139b8

View file

@ -8326,12 +8326,16 @@ async def health_services_endpoint(
) )
if service == "langfuse": if service == "langfuse":
# run mock completion request # run mock completion request
return litellm.completion( _ = litellm.completion(
model="openai/litellm-mock-response-model", model="openai/litellm-mock-response-model",
messages=[{"role": "user", "content": "Hey, how's it going?"}], messages=[{"role": "user", "content": "Hey, how's it going?"}],
user="litellm:/health/services", user="litellm:/health/services",
mock_response="This is a mock response", mock_response="This is a mock response",
) )
return {
"status": "success",
"message": "Mock LLM request made - check langfuse.",
}
if service not in ["slack_budget_alerts", "langfuse"]: if service not in ["slack_budget_alerts", "langfuse"]:
raise HTTPException( raise HTTPException(