mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
fix(utils.py): update get_model_info docstring
Fixes https://github.com/BerriAI/litellm/issues/4711
This commit is contained in:
parent
ff1bcdd3b5
commit
9cc2daeec9
2 changed files with 48 additions and 8 deletions
|
@ -4665,13 +4665,40 @@ def get_model_info(model: str, custom_llm_provider: Optional[str] = None) -> Mod
|
|||
|
||||
Returns:
|
||||
dict: A dictionary containing the following information:
|
||||
- max_tokens (int): The maximum number of tokens allowed for the given model.
|
||||
- input_cost_per_token (float): The cost per token for input.
|
||||
- output_cost_per_token (float): The cost per token for output.
|
||||
- litellm_provider (str): The provider of the model (e.g., "openai").
|
||||
- mode (str): The mode of the model (e.g., "chat" or "completion").
|
||||
- supported_openai_params (List[str]): A list of supported OpenAI parameters for the model.
|
||||
|
||||
max_tokens: Required[Optional[int]]
|
||||
max_input_tokens: Required[Optional[int]]
|
||||
max_output_tokens: Required[Optional[int]]
|
||||
input_cost_per_token: Required[float]
|
||||
input_cost_per_character: Optional[float] # only for vertex ai models
|
||||
input_cost_per_token_above_128k_tokens: Optional[float] # only for vertex ai models
|
||||
input_cost_per_character_above_128k_tokens: Optional[
|
||||
float
|
||||
] # only for vertex ai models
|
||||
input_cost_per_image: Optional[float] # only for vertex ai models
|
||||
input_cost_per_audio_per_second: Optional[float] # only for vertex ai models
|
||||
input_cost_per_video_per_second: Optional[float] # only for vertex ai models
|
||||
output_cost_per_token: Required[float]
|
||||
output_cost_per_character: Optional[float] # only for vertex ai models
|
||||
output_cost_per_token_above_128k_tokens: Optional[
|
||||
float
|
||||
] # only for vertex ai models
|
||||
output_cost_per_character_above_128k_tokens: Optional[
|
||||
float
|
||||
] # only for vertex ai models
|
||||
output_cost_per_image: Optional[float]
|
||||
output_vector_size: Optional[int]
|
||||
output_cost_per_video_per_second: Optional[float] # only for vertex ai models
|
||||
output_cost_per_audio_per_second: Optional[float] # only for vertex ai models
|
||||
litellm_provider: Required[str]
|
||||
mode: Required[
|
||||
Literal[
|
||||
"completion", "embedding", "image_generation", "chat", "audio_transcription"
|
||||
]
|
||||
]
|
||||
supported_openai_params: Required[Optional[List[str]]]
|
||||
supports_system_messages: Optional[bool]
|
||||
supports_response_schema: Optional[bool]
|
||||
supports_vision: Optional[bool]
|
||||
Raises:
|
||||
Exception: If the model is not mapped yet.
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue