mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 11:43:54 +00:00
* LiteLLM Minor Fixes & Improvements (09/23/2024) (#5842) * feat(auth_utils.py): enable admin to allow client-side credentials to be passed Makes it easier for devs to experiment with finetuned fireworks ai models * feat(router.py): allow setting configurable_clientside_auth_params for a model Closes https://github.com/BerriAI/litellm/issues/5843 * build(model_prices_and_context_window.json): fix anthropic claude-3-5-sonnet max output token limit Fixes https://github.com/BerriAI/litellm/issues/5850 * fix(azure_ai/): support content list for azure ai Fixes https://github.com/BerriAI/litellm/issues/4237 * fix(litellm_logging.py): always set saved_cache_cost Set to 0 by default * fix(fireworks_ai/cost_calculator.py): add fireworks ai default pricing handles calling 405b+ size models * fix(slack_alerting.py): fix error alerting for failed spend tracking Fixes regression with slack alerting error monitoring * fix(vertex_and_google_ai_studio_gemini.py): handle gemini no candidates in streaming chunk error * docs(bedrock.md): add llama3-1 models * test: fix tests * fix(azure_ai/chat): fix transformation for azure ai calls * feat(azure_ai/embed): Add azure ai embeddings support Closes https://github.com/BerriAI/litellm/issues/5861 * fix(azure_ai/embed): enable async embedding * feat(azure_ai/embed): support azure ai multimodal embeddings * fix(azure_ai/embed): support async multi modal embeddings * feat(together_ai/embed): support together ai embedding calls * feat(rerank/main.py): log source documents for rerank endpoints to langfuse improves rerank endpoint logging * fix(langfuse.py): support logging `/audio/speech` input to langfuse * test(test_embedding.py): fix test * test(test_completion_cost.py): fix helper util
79 lines
2.6 KiB
Python
79 lines
2.6 KiB
Python
"""
|
|
Handles calculating cost for together ai models
|
|
"""
|
|
|
|
import re
|
|
|
|
from litellm.types.utils import CallTypes
|
|
|
|
|
|
# Extract the number of billion parameters from the model name
|
|
# only used for together_computer LLMs
|
|
def get_model_params_and_category(model_name, call_type: CallTypes) -> str:
|
|
"""
|
|
Helper function for calculating together ai pricing.
|
|
|
|
Returns
|
|
- str - model pricing category if mapped else received model name
|
|
"""
|
|
if call_type == CallTypes.embedding or call_type == CallTypes.aembedding:
|
|
return get_model_params_and_category_embeddings(model_name=model_name)
|
|
model_name = model_name.lower()
|
|
re_params_match = re.search(
|
|
r"(\d+b)", model_name
|
|
) # catch all decimals like 3b, 70b, etc
|
|
category = None
|
|
if re_params_match is not None:
|
|
params_match = str(re_params_match.group(1))
|
|
params_match = params_match.replace("b", "")
|
|
if params_match is not None:
|
|
params_billion = float(params_match)
|
|
else:
|
|
return model_name
|
|
# Determine the category based on the number of parameters
|
|
if params_billion <= 4.0:
|
|
category = "together-ai-up-to-4b"
|
|
elif params_billion <= 8.0:
|
|
category = "together-ai-4.1b-8b"
|
|
elif params_billion <= 21.0:
|
|
category = "together-ai-8.1b-21b"
|
|
elif params_billion <= 41.0:
|
|
category = "together-ai-21.1b-41b"
|
|
elif params_billion <= 80.0:
|
|
category = "together-ai-41.1b-80b"
|
|
elif params_billion <= 110.0:
|
|
category = "together-ai-81.1b-110b"
|
|
if category is not None:
|
|
return category
|
|
|
|
return model_name
|
|
|
|
|
|
def get_model_params_and_category_embeddings(model_name) -> str:
|
|
"""
|
|
Helper function for calculating together ai embedding pricing.
|
|
|
|
Returns
|
|
- str - model pricing category if mapped else received model name
|
|
"""
|
|
model_name = model_name.lower()
|
|
re_params_match = re.search(
|
|
r"(\d+m)", model_name
|
|
) # catch all decimals like 100m, 200m, etc.
|
|
category = None
|
|
if re_params_match is not None:
|
|
params_match = str(re_params_match.group(1))
|
|
params_match = params_match.replace("m", "")
|
|
if params_match is not None:
|
|
params_million = float(params_match)
|
|
else:
|
|
return model_name
|
|
# Determine the category based on the number of parameters
|
|
if params_million <= 150:
|
|
category = "together-ai-embedding-up-to-150m"
|
|
elif params_million <= 350:
|
|
category = "together-ai-embedding-151m-to-350m"
|
|
if category is not None:
|
|
return category
|
|
|
|
return model_name
|