mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 11:14:04 +00:00
Litellm dev 12 07 2024 (#7086)
* fix(main.py): support passing max retries to azure/openai embedding integrations Fixes https://github.com/BerriAI/litellm/issues/7003 * feat(team_endpoints.py): allow updating team model aliases Closes https://github.com/BerriAI/litellm/issues/6956 * feat(router.py): allow specifying model id as fallback - skips any cooldown check Allows a default model to be checked if all models in cooldown s/o @micahjsmith * docs(reliability.md): add fallback to specific model to docs * fix(utils.py): new 'is_prompt_caching_valid_prompt' helper util Allows user to identify if messages/tools have prompt caching Related issue: https://github.com/BerriAI/litellm/issues/6784 * feat(router.py): store model id for prompt caching valid prompt Allows routing to that model id on subsequent requests * fix(router.py): only cache if prompt is valid prompt caching prompt prevents storing unnecessary items in cache * feat(router.py): support routing prompt caching enabled models to previous deployments Closes https://github.com/BerriAI/litellm/issues/6784 * test: fix linting errors * feat(databricks/): convert basemodel to dict and exclude none values allow passing pydantic message to databricks * fix(utils.py): ensure all chat completion messages are dict * (feat) Track `custom_llm_provider` in LiteLLMSpendLogs (#7081) * add custom_llm_provider to SpendLogsPayload * add custom_llm_provider to SpendLogs * add custom llm provider to SpendLogs payload * test_spend_logs_payload * Add MLflow to the side bar (#7031) Signed-off-by: B-Step62 <yuki.watanabe@databricks.com> * (bug fix) SpendLogs update DB catch all possible DB errors for retrying (#7082) * catch DB_CONNECTION_ERROR_TYPES * fix DB retry mechanism for SpendLog updates * use DB_CONNECTION_ERROR_TYPES in auth checks * fix exp back off for writing SpendLogs * use _raise_failed_update_spend_exception to ensure errors print as NON blocking * test_update_spend_logs_multiple_batches_with_failure * (Feat) Add StructuredOutputs support for Fireworks.AI (#7085) * fix model cost map fireworks ai "supports_response_schema": true, * fix supports_response_schema * fix map openai params fireworks ai * test_map_response_format * test_map_response_format * added deepinfra/Meta-Llama-3.1-405B-Instruct (#7084) * bump: version 1.53.9 → 1.54.0 * fix deepinfra * litellm db fixes LiteLLM_UserTable (#7089) * ci/cd queue new release * fix llama-3.3-70b-versatile * refactor - use consistent file naming convention `AI21/` -> `ai21` (#7090) * fix refactor - use consistent file naming convention * ci/cd run again * fix naming structure * fix use consistent naming (#7092) --------- Signed-off-by: B-Step62 <yuki.watanabe@databricks.com> Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com> Co-authored-by: Yuki Watanabe <31463517+B-Step62@users.noreply.github.com> Co-authored-by: ali sayyah <ali.sayyah2@gmail.com>
This commit is contained in:
parent
664d82ca9e
commit
70c4e1b4d2
24 changed files with 840 additions and 193 deletions
|
@ -1207,173 +1207,6 @@ class PrismaClient:
|
|||
|
||||
except Exception:
|
||||
raise
|
||||
|
||||
# try:
|
||||
# # Try to select one row from the view
|
||||
# await self.db.query_raw(
|
||||
# """SELECT 1 FROM "LiteLLM_VerificationTokenView" LIMIT 1"""
|
||||
# )
|
||||
# print("LiteLLM_VerificationTokenView Exists!") # noqa
|
||||
# except Exception as e:
|
||||
# If an error occurs, the view does not exist, so create it
|
||||
|
||||
# try:
|
||||
# await self.db.query_raw("""SELECT 1 FROM "MonthlyGlobalSpend" LIMIT 1""")
|
||||
# print("MonthlyGlobalSpend Exists!") # noqa
|
||||
# except Exception as e:
|
||||
# sql_query = """
|
||||
# CREATE OR REPLACE VIEW "MonthlyGlobalSpend" AS
|
||||
# SELECT
|
||||
# DATE("startTime") AS date,
|
||||
# SUM("spend") AS spend
|
||||
# FROM
|
||||
# "LiteLLM_SpendLogs"
|
||||
# WHERE
|
||||
# "startTime" >= (CURRENT_DATE - INTERVAL '30 days')
|
||||
# GROUP BY
|
||||
# DATE("startTime");
|
||||
# """
|
||||
# await self.db.execute_raw(query=sql_query)
|
||||
|
||||
# print("MonthlyGlobalSpend Created!") # noqa
|
||||
|
||||
# try:
|
||||
# await self.db.query_raw("""SELECT 1 FROM "Last30dKeysBySpend" LIMIT 1""")
|
||||
# print("Last30dKeysBySpend Exists!") # noqa
|
||||
# except Exception as e:
|
||||
# sql_query = """
|
||||
# CREATE OR REPLACE VIEW "Last30dKeysBySpend" AS
|
||||
# SELECT
|
||||
# L."api_key",
|
||||
# V."key_alias",
|
||||
# V."key_name",
|
||||
# SUM(L."spend") AS total_spend
|
||||
# FROM
|
||||
# "LiteLLM_SpendLogs" L
|
||||
# LEFT JOIN
|
||||
# "LiteLLM_VerificationToken" V
|
||||
# ON
|
||||
# L."api_key" = V."token"
|
||||
# WHERE
|
||||
# L."startTime" >= (CURRENT_DATE - INTERVAL '30 days')
|
||||
# GROUP BY
|
||||
# L."api_key", V."key_alias", V."key_name"
|
||||
# ORDER BY
|
||||
# total_spend DESC;
|
||||
# """
|
||||
# await self.db.execute_raw(query=sql_query)
|
||||
|
||||
# print("Last30dKeysBySpend Created!") # noqa
|
||||
|
||||
# try:
|
||||
# await self.db.query_raw("""SELECT 1 FROM "Last30dModelsBySpend" LIMIT 1""")
|
||||
# print("Last30dModelsBySpend Exists!") # noqa
|
||||
# except Exception as e:
|
||||
# sql_query = """
|
||||
# CREATE OR REPLACE VIEW "Last30dModelsBySpend" AS
|
||||
# SELECT
|
||||
# "model",
|
||||
# SUM("spend") AS total_spend
|
||||
# FROM
|
||||
# "LiteLLM_SpendLogs"
|
||||
# WHERE
|
||||
# "startTime" >= (CURRENT_DATE - INTERVAL '30 days')
|
||||
# AND "model" != ''
|
||||
# GROUP BY
|
||||
# "model"
|
||||
# ORDER BY
|
||||
# total_spend DESC;
|
||||
# """
|
||||
# await self.db.execute_raw(query=sql_query)
|
||||
|
||||
# print("Last30dModelsBySpend Created!") # noqa
|
||||
# try:
|
||||
# await self.db.query_raw(
|
||||
# """SELECT 1 FROM "MonthlyGlobalSpendPerKey" LIMIT 1"""
|
||||
# )
|
||||
# print("MonthlyGlobalSpendPerKey Exists!") # noqa
|
||||
# except Exception as e:
|
||||
# sql_query = """
|
||||
# CREATE OR REPLACE VIEW "MonthlyGlobalSpendPerKey" AS
|
||||
# SELECT
|
||||
# DATE("startTime") AS date,
|
||||
# SUM("spend") AS spend,
|
||||
# api_key as api_key
|
||||
# FROM
|
||||
# "LiteLLM_SpendLogs"
|
||||
# WHERE
|
||||
# "startTime" >= (CURRENT_DATE - INTERVAL '30 days')
|
||||
# GROUP BY
|
||||
# DATE("startTime"),
|
||||
# api_key;
|
||||
# """
|
||||
# await self.db.execute_raw(query=sql_query)
|
||||
|
||||
# print("MonthlyGlobalSpendPerKey Created!") # noqa
|
||||
# try:
|
||||
# await self.db.query_raw(
|
||||
# """SELECT 1 FROM "MonthlyGlobalSpendPerUserPerKey" LIMIT 1"""
|
||||
# )
|
||||
# print("MonthlyGlobalSpendPerUserPerKey Exists!") # noqa
|
||||
# except Exception as e:
|
||||
# sql_query = """
|
||||
# CREATE OR REPLACE VIEW "MonthlyGlobalSpendPerUserPerKey" AS
|
||||
# SELECT
|
||||
# DATE("startTime") AS date,
|
||||
# SUM("spend") AS spend,
|
||||
# api_key as api_key,
|
||||
# "user" as "user"
|
||||
# FROM
|
||||
# "LiteLLM_SpendLogs"
|
||||
# WHERE
|
||||
# "startTime" >= (CURRENT_DATE - INTERVAL '20 days')
|
||||
# GROUP BY
|
||||
# DATE("startTime"),
|
||||
# "user",
|
||||
# api_key;
|
||||
# """
|
||||
# await self.db.execute_raw(query=sql_query)
|
||||
|
||||
# print("MonthlyGlobalSpendPerUserPerKey Created!") # noqa
|
||||
|
||||
# try:
|
||||
# await self.db.query_raw("""SELECT 1 FROM "DailyTagSpend" LIMIT 1""")
|
||||
# print("DailyTagSpend Exists!") # noqa
|
||||
# except Exception as e:
|
||||
# sql_query = """
|
||||
# CREATE OR REPLACE VIEW DailyTagSpend AS
|
||||
# SELECT
|
||||
# jsonb_array_elements_text(request_tags) AS individual_request_tag,
|
||||
# DATE(s."startTime") AS spend_date,
|
||||
# COUNT(*) AS log_count,
|
||||
# SUM(spend) AS total_spend
|
||||
# FROM "LiteLLM_SpendLogs" s
|
||||
# GROUP BY individual_request_tag, DATE(s."startTime");
|
||||
# """
|
||||
# await self.db.execute_raw(query=sql_query)
|
||||
|
||||
# print("DailyTagSpend Created!") # noqa
|
||||
|
||||
# try:
|
||||
# await self.db.query_raw(
|
||||
# """SELECT 1 FROM "Last30dTopEndUsersSpend" LIMIT 1"""
|
||||
# )
|
||||
# print("Last30dTopEndUsersSpend Exists!") # noqa
|
||||
# except Exception as e:
|
||||
# sql_query = """
|
||||
# CREATE VIEW "Last30dTopEndUsersSpend" AS
|
||||
# SELECT end_user, COUNT(*) AS total_events, SUM(spend) AS total_spend
|
||||
# FROM "LiteLLM_SpendLogs"
|
||||
# WHERE end_user <> '' AND end_user <> user
|
||||
# AND "startTime" >= CURRENT_DATE - INTERVAL '30 days'
|
||||
# GROUP BY end_user
|
||||
# ORDER BY total_spend DESC
|
||||
# LIMIT 100;
|
||||
# """
|
||||
# await self.db.execute_raw(query=sql_query)
|
||||
|
||||
# print("Last30dTopEndUsersSpend Created!") # noqa
|
||||
|
||||
return
|
||||
|
||||
@log_db_metrics
|
||||
|
@ -1784,6 +1617,14 @@ class PrismaClient:
|
|||
)
|
||||
raise e
|
||||
|
||||
def jsonify_team_object(self, db_data: dict):
|
||||
db_data = self.jsonify_object(data=db_data)
|
||||
if db_data.get("members_with_roles", None) is not None and isinstance(
|
||||
db_data["members_with_roles"], list
|
||||
):
|
||||
db_data["members_with_roles"] = json.dumps(db_data["members_with_roles"])
|
||||
return db_data
|
||||
|
||||
# Define a retrying strategy with exponential backoff
|
||||
@backoff.on_exception(
|
||||
backoff.expo,
|
||||
|
@ -2348,7 +2189,6 @@ def get_instance_fn(value: str, config_file_path: Optional[str] = None) -> Any:
|
|||
module_name = value
|
||||
instance_name = None
|
||||
try:
|
||||
print_verbose(f"value: {value}")
|
||||
# Split the path by dots to separate module from instance
|
||||
parts = value.split(".")
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue