From c63217e4d4853573b9a18bd34b51303c26de0a81 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 21 Mar 2025 19:20:13 -0700 Subject: [PATCH] _get_litellm_supported_transcription_kwargs --- .../litellm_core_utils/model_param_helper.py | 22 ++++++++----- ...odel_prices_and_context_window_backup.json | 31 +++++++++++++++++++ 2 files changed, 45 insertions(+), 8 deletions(-) diff --git a/litellm/litellm_core_utils/model_param_helper.py b/litellm/litellm_core_utils/model_param_helper.py index 3542ec3a94..5316ab5d84 100644 --- a/litellm/litellm_core_utils/model_param_helper.py +++ b/litellm/litellm_core_utils/model_param_helper.py @@ -1,9 +1,5 @@ from typing import Set -from openai.types.audio.transcription_create_params import ( - TranscriptionCreateParamsNonStreaming, - TranscriptionCreateParamsStreaming, -) from openai.types.chat.completion_create_params import ( CompletionCreateParamsNonStreaming, CompletionCreateParamsStreaming, @@ -16,6 +12,7 @@ from openai.types.completion_create_params import ( ) from openai.types.embedding_create_params import EmbeddingCreateParams +from litellm._logging import verbose_logger from litellm.types.rerank import RerankRequest @@ -126,10 +123,19 @@ class ModelParamHelper: This follows the OpenAI API Spec """ - all_transcription_kwargs = set( - TranscriptionCreateParamsNonStreaming.__annotations__.keys() - ).union(set(TranscriptionCreateParamsStreaming.__annotations__.keys())) - return all_transcription_kwargs + try: + from openai.types.audio.transcription_create_params import ( + TranscriptionCreateParamsNonStreaming, + TranscriptionCreateParamsStreaming, + ) + + all_transcription_kwargs = set( + TranscriptionCreateParamsNonStreaming.__annotations__.keys() + ).union(set(TranscriptionCreateParamsStreaming.__annotations__.keys())) + return all_transcription_kwargs + except Exception as e: + verbose_logger.warning("Error getting transcription kwargs %s", str(e)) + return set() @staticmethod def _get_exclude_kwargs() -> Set[str]: diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index f2ca9156ad..1d7b8794b5 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -1426,6 +1426,25 @@ "supports_vision": false, "supports_prompt_caching": true }, + "azure/gpt-4.5-preview": { + "max_tokens": 16384, + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "input_cost_per_token": 0.000075, + "output_cost_per_token": 0.00015, + "input_cost_per_token_batches": 0.0000375, + "output_cost_per_token_batches": 0.000075, + "cache_read_input_token_cost": 0.0000375, + "litellm_provider": "openai", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_vision": true, + "supports_prompt_caching": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, "azure/gpt-4o": { "max_tokens": 16384, "max_input_tokens": 128000, @@ -2091,6 +2110,18 @@ "mode": "chat", "supports_tool_choice": true }, + "azure_ai/mistral-small-2503": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "input_cost_per_token": 0.000001, + "output_cost_per_token": 0.000003, + "litellm_provider": "azure_ai", + "mode": "chat", + "supports_function_calling": true, + "supports_vision": true, + "supports_tool_choice": true + }, "azure_ai/mistral-large-2407": { "max_tokens": 4096, "max_input_tokens": 128000,