mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
feat(llama_guard.py): add llama guard support for content moderation + new async_moderation_hook
endpoint
This commit is contained in:
parent
5e7dda4f88
commit
2a4a6995ac
12 changed files with 163 additions and 132 deletions
16
enterprise/utils.py
Normal file
16
enterprise/utils.py
Normal file
|
@ -0,0 +1,16 @@
|
|||
# Enterprise Proxy Util Endpoints
|
||||
|
||||
|
||||
async def get_spend_by_tags(start_date=None, end_date=None, prisma_client=None):
|
||||
response = await prisma_client.db.query_raw(
|
||||
"""
|
||||
SELECT
|
||||
jsonb_array_elements_text(request_tags) AS individual_request_tag,
|
||||
COUNT(*) AS log_count,
|
||||
SUM(spend) AS total_spend
|
||||
FROM "LiteLLM_SpendLogs"
|
||||
GROUP BY individual_request_tag;
|
||||
"""
|
||||
)
|
||||
|
||||
return response
|
Loading…
Add table
Add a link
Reference in a new issue