mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
Return output_vector_size in get_model_info
This commit is contained in:
parent
52bcf37079
commit
17482ded74
2 changed files with 4 additions and 0 deletions
|
@ -4399,6 +4399,7 @@ def get_model_info(model: str, custom_llm_provider: Optional[str] = None) -> Mod
|
|||
output_cost_per_token_above_128k_tokens=_model_info.get(
|
||||
"output_cost_per_token_above_128k_tokens", None
|
||||
),
|
||||
output_vector_size=_model_info.get("output_vector_size", None),
|
||||
litellm_provider=_model_info.get(
|
||||
"litellm_provider", custom_llm_provider
|
||||
),
|
||||
|
@ -4433,6 +4434,7 @@ def get_model_info(model: str, custom_llm_provider: Optional[str] = None) -> Mod
|
|||
output_cost_per_token_above_128k_tokens=_model_info.get(
|
||||
"output_cost_per_token_above_128k_tokens", None
|
||||
),
|
||||
output_vector_size=_model_info.get("output_vector_size", None),
|
||||
litellm_provider=_model_info.get(
|
||||
"litellm_provider", custom_llm_provider
|
||||
),
|
||||
|
@ -4467,6 +4469,7 @@ def get_model_info(model: str, custom_llm_provider: Optional[str] = None) -> Mod
|
|||
output_cost_per_token_above_128k_tokens=_model_info.get(
|
||||
"output_cost_per_token_above_128k_tokens", None
|
||||
),
|
||||
output_vector_size=_model_info.get("output_vector_size", None),
|
||||
litellm_provider=_model_info.get(
|
||||
"litellm_provider", custom_llm_provider
|
||||
),
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue