LiteLLM Minor Fixes & Improvements (10/18/2024) (#6320)

* fix(converse_transformation.py): handle cross region model name when getting openai param support

Fixes https://github.com/BerriAI/litellm/issues/6291

* LiteLLM Minor Fixes & Improvements (10/17/2024)  (#6293)

* fix(ui_sso.py): fix faulty admin only check

Fixes https://github.com/BerriAI/litellm/issues/6286

* refactor(sso_helper_utils.py): refactor /sso/callback to use helper utils, covered by unit testing

Prevent future regressions

* feat(prompt_factory): support 'ensure_alternating_roles' param

Closes https://github.com/BerriAI/litellm/issues/6257

* fix(proxy/utils.py): add dailytagspend to expected views

* feat(auth_utils.py): support setting regex for clientside auth credentials

Fixes https://github.com/BerriAI/litellm/issues/6203

* build(cookbook): add tutorial for mlflow + langchain + litellm proxy tracing

* feat(argilla.py): add argilla logging integration

Closes https://github.com/BerriAI/litellm/issues/6201

* fix: fix linting errors

* fix: fix ruff error

* test: fix test

* fix: update vertex ai assumption - parts not always guaranteed (#6296)

* docs(configs.md): add argila env var to docs

* docs(user_keys.md): add regex doc for clientside auth params

* docs(argilla.md): add doc on argilla logging

* docs(argilla.md): add sampling rate to argilla calls

* bump: version 1.49.6 → 1.49.7

* add gpt-4o-audio models to model cost map (#6306)

* (code quality) add ruff check PLR0915 for `too-many-statements`  (#6309)

* ruff add PLR0915

* add noqa for PLR0915

* fix noqa

* add # noqa: PLR0915

* # noqa: PLR0915

* # noqa: PLR0915

* # noqa: PLR0915

* add # noqa: PLR0915

* # noqa: PLR0915

* # noqa: PLR0915

* # noqa: PLR0915

* # noqa: PLR0915

* doc fix Turn on / off caching per Key. (#6297)

* (feat) Support `audio`,  `modalities` params (#6304)

* add audio, modalities param

* add test for gpt audio models

* add get_supported_openai_params for GPT audio models

* add supported params for audio

* test_audio_output_from_model

* bump openai to openai==1.52.0

* bump openai on pyproject

* fix audio test

* fix test mock_chat_response

* handle audio for Message

* fix handling audio for OAI compatible API endpoints

* fix linting

* fix mock dbrx test

* (feat) Support audio param in responses streaming (#6312)

* add audio, modalities param

* add test for gpt audio models

* add get_supported_openai_params for GPT audio models

* add supported params for audio

* test_audio_output_from_model

* bump openai to openai==1.52.0

* bump openai on pyproject

* fix audio test

* fix test mock_chat_response

* handle audio for Message

* fix handling audio for OAI compatible API endpoints

* fix linting

* fix mock dbrx test

* add audio to Delta

* handle model_response.choices.delta.audio

* fix linting

* build(model_prices_and_context_window.json): add gpt-4o-audio audio token cost tracking

* refactor(model_prices_and_context_window.json): refactor 'supports_audio' to be 'supports_audio_input' and 'supports_audio_output'

Allows for flag to be used for openai + gemini models (both support audio input)

* feat(cost_calculation.py): support cost calc for audio model

Closes https://github.com/BerriAI/litellm/issues/6302

* feat(utils.py): expose new `supports_audio_input` and `supports_audio_output` functions

Closes https://github.com/BerriAI/litellm/issues/6303

* feat(handle_jwt.py): support single dict list

* fix(cost_calculator.py): fix linting errors

* fix: fix linting error

* fix(cost_calculator): move to using standard openai usage cached tokens value

* test: fix test

---------

Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com>
This commit is contained in:
Krish Dholakia 2024-10-19 22:23:27 -07:00 committed by GitHub
parent c58d542282
commit 7cc12bd5c6
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
19 changed files with 496 additions and 121 deletions

View file

@ -37,12 +37,16 @@ from litellm.llms.databricks.cost_calculator import (
from litellm.llms.fireworks_ai.cost_calculator import (
cost_per_token as fireworks_ai_cost_per_token,
)
from litellm.llms.OpenAI.cost_calculation import (
cost_per_second as openai_cost_per_second,
)
from litellm.llms.OpenAI.cost_calculation import cost_per_token as openai_cost_per_token
from litellm.llms.OpenAI.cost_calculation import cost_router as openai_cost_router
from litellm.llms.together_ai.cost_calculator import get_model_params_and_category
from litellm.types.llms.openai import HttpxBinaryResponseContent
from litellm.types.rerank import RerankResponse
from litellm.types.router import SPECIAL_MODEL_INFO_PARAMS
from litellm.types.utils import PassthroughCallTypes, Usage
from litellm.types.utils import CallTypesLiteral, PassthroughCallTypes, Usage
from litellm.utils import (
CallTypes,
CostPerToken,
@ -97,25 +101,10 @@ def cost_per_token( # noqa: PLR0915
custom_cost_per_second: Optional[float] = None,
### NUMBER OF QUERIES ###
number_of_queries: Optional[int] = None,
### USAGE OBJECT ###
usage_object: Optional[Usage] = None, # just read the usage object if provided
### CALL TYPE ###
call_type: Literal[
"embedding",
"aembedding",
"completion",
"acompletion",
"atext_completion",
"text_completion",
"image_generation",
"aimage_generation",
"moderation",
"amoderation",
"atranscription",
"transcription",
"aspeech",
"speech",
"rerank",
"arerank",
] = "completion",
call_type: CallTypesLiteral = "completion",
) -> Tuple[float, float]: # type: ignore
"""
Calculates the cost per token for a given model, prompt tokens, and completion tokens.
@ -139,13 +128,16 @@ def cost_per_token( # noqa: PLR0915
raise Exception("Invalid arg. Model cannot be none.")
## RECONSTRUCT USAGE BLOCK ##
usage_block = Usage(
prompt_tokens=prompt_tokens,
completion_tokens=completion_tokens,
total_tokens=prompt_tokens + completion_tokens,
cache_creation_input_tokens=cache_creation_input_tokens,
cache_read_input_tokens=cache_read_input_tokens,
)
if usage_object is not None:
usage_block = usage_object
else:
usage_block = Usage(
prompt_tokens=prompt_tokens,
completion_tokens=completion_tokens,
total_tokens=prompt_tokens + completion_tokens,
cache_creation_input_tokens=cache_creation_input_tokens,
cache_read_input_tokens=cache_read_input_tokens,
)
## CUSTOM PRICING ##
response_cost = _cost_per_token_custom_pricing_helper(
@ -264,9 +256,13 @@ def cost_per_token( # noqa: PLR0915
elif custom_llm_provider == "anthropic":
return anthropic_cost_per_token(model=model, usage=usage_block)
elif custom_llm_provider == "openai":
return openai_cost_per_token(
model=model, usage=usage_block, response_time_ms=response_time_ms
)
openai_cost_route = openai_cost_router(call_type=CallTypes(call_type))
if openai_cost_route == "cost_per_token":
return openai_cost_per_token(model=model, usage=usage_block)
elif openai_cost_route == "cost_per_second":
return openai_cost_per_second(
model=model, usage=usage_block, response_time_ms=response_time_ms
)
elif custom_llm_provider == "databricks":
return databricks_cost_per_token(model=model, usage=usage_block)
elif custom_llm_provider == "fireworks_ai":
@ -474,6 +470,45 @@ def _select_model_name_for_cost_calc(
return return_model
def _get_usage_object(
completion_response: Any,
) -> Optional[Usage]:
usage_obj: Optional[Usage] = None
if completion_response is not None and isinstance(
completion_response, ModelResponse
):
usage_obj = completion_response.get("usage")
return usage_obj
def _infer_call_type(
call_type: Optional[CallTypesLiteral], completion_response: Any
) -> Optional[CallTypesLiteral]:
if call_type is not None:
return call_type
if completion_response is None:
return None
if isinstance(completion_response, ModelResponse):
return "completion"
elif isinstance(completion_response, EmbeddingResponse):
return "embedding"
elif isinstance(completion_response, TranscriptionResponse):
return "transcription"
elif isinstance(completion_response, HttpxBinaryResponseContent):
return "speech"
elif isinstance(completion_response, RerankResponse):
return "rerank"
elif isinstance(completion_response, ImageResponse):
return "image_generation"
elif isinstance(completion_response, TextCompletionResponse):
return "text_completion"
return call_type
def completion_cost( # noqa: PLR0915
completion_response=None,
model: Optional[str] = None,
@ -481,24 +516,7 @@ def completion_cost( # noqa: PLR0915
messages: List = [],
completion="",
total_time: Optional[float] = 0.0, # used for replicate, sagemaker
call_type: Literal[
"embedding",
"aembedding",
"completion",
"acompletion",
"atext_completion",
"text_completion",
"image_generation",
"aimage_generation",
"moderation",
"amoderation",
"atranscription",
"transcription",
"aspeech",
"speech",
"rerank",
"arerank",
] = "completion",
call_type: Optional[CallTypesLiteral] = None,
### REGION ###
custom_llm_provider=None,
region_name=None, # used for bedrock pricing
@ -539,6 +557,7 @@ def completion_cost( # noqa: PLR0915
- For un-mapped Replicate models, the cost is calculated based on the total time used for the request.
"""
try:
call_type = _infer_call_type(call_type, completion_response) or "completion"
if (
(call_type == "aimage_generation" or call_type == "image_generation")
and model is not None
@ -554,6 +573,9 @@ def completion_cost( # noqa: PLR0915
completion_characters: Optional[int] = None
cache_creation_input_tokens: Optional[int] = None
cache_read_input_tokens: Optional[int] = None
cost_per_token_usage_object: Optional[litellm.Usage] = _get_usage_object(
completion_response=completion_response
)
if completion_response is not None and (
isinstance(completion_response, BaseModel)
or isinstance(completion_response, dict)
@ -760,6 +782,7 @@ def completion_cost( # noqa: PLR0915
completion_characters=completion_characters,
cache_creation_input_tokens=cache_creation_input_tokens,
cache_read_input_tokens=cache_read_input_tokens,
usage_object=cost_per_token_usage_object,
call_type=call_type,
)
_final_cost = prompt_tokens_cost_usd_dollar + completion_tokens_cost_usd_dollar