mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
(fix) update docstring for get_max_tokens
This commit is contained in:
parent
fa488e29e0
commit
0c090e3675
1 changed files with 17 additions and 5 deletions
|
@ -1608,20 +1608,32 @@ def get_api_key(llm_provider: str, dynamic_api_key: Optional[str]):
|
||||||
|
|
||||||
def get_max_tokens(model: str):
|
def get_max_tokens(model: str):
|
||||||
"""
|
"""
|
||||||
Get the maximum tokens (context window) for a given model.
|
Get a dict for the maximum tokens (context window),
|
||||||
|
input_cost_per_token, output_cost_per_token for a given model.
|
||||||
|
|
||||||
Parameters:
|
Parameters:
|
||||||
model (str): The name of the model.
|
model (str): The name of the model.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
int: The maximum tokens for the given model.
|
dict: A dictionary containing the following information:
|
||||||
|
- max_tokens (int): The maximum number of tokens allowed for the given model.
|
||||||
|
- input_cost_per_token (float): The cost per token for input.
|
||||||
|
- output_cost_per_token (float): The cost per token for output.
|
||||||
|
- litellm_provider (str): The provider of the model (e.g., "openai").
|
||||||
|
- mode (str): The mode of the model (e.g., "chat" or "completion").
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
Exception: If the model is not mapped yet.
|
Exception: If the model is not mapped yet.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
>>> get_max_tokens("gpt-3.5-turbo")
|
>>> get_max_tokens("gpt-4")
|
||||||
4096
|
{
|
||||||
|
"max_tokens": 8192,
|
||||||
|
"input_cost_per_token": 0.00003,
|
||||||
|
"output_cost_per_token": 0.00006,
|
||||||
|
"litellm_provider": "openai",
|
||||||
|
"mode": "chat"
|
||||||
|
}
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
return litellm.model_cost[model]
|
return litellm.model_cost[model]
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue