forked from phoenix/litellm-mirror
fix - testing lagfuse service
This commit is contained in:
parent
ffb1393b94
commit
885ae139b8
1 changed files with 5 additions and 1 deletions
|
@ -8326,12 +8326,16 @@ async def health_services_endpoint(
|
||||||
)
|
)
|
||||||
if service == "langfuse":
|
if service == "langfuse":
|
||||||
# run mock completion request
|
# run mock completion request
|
||||||
return litellm.completion(
|
_ = litellm.completion(
|
||||||
model="openai/litellm-mock-response-model",
|
model="openai/litellm-mock-response-model",
|
||||||
messages=[{"role": "user", "content": "Hey, how's it going?"}],
|
messages=[{"role": "user", "content": "Hey, how's it going?"}],
|
||||||
user="litellm:/health/services",
|
user="litellm:/health/services",
|
||||||
mock_response="This is a mock response",
|
mock_response="This is a mock response",
|
||||||
)
|
)
|
||||||
|
return {
|
||||||
|
"status": "success",
|
||||||
|
"message": "Mock LLM request made - check langfuse.",
|
||||||
|
}
|
||||||
|
|
||||||
if service not in ["slack_budget_alerts", "langfuse"]:
|
if service not in ["slack_budget_alerts", "langfuse"]:
|
||||||
raise HTTPException(
|
raise HTTPException(
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue