mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 02:34:29 +00:00
add supports reasoning to model group info
This commit is contained in:
parent
05540713fd
commit
02c265181c
2 changed files with 17 additions and 13 deletions
|
@ -96,16 +96,18 @@ class ModelInfo(BaseModel):
|
|||
id: Optional[
|
||||
str
|
||||
] # Allow id to be optional on input, but it will always be present as a str in the model instance
|
||||
db_model: bool = False # used for proxy - to separate models which are stored in the db vs. config.
|
||||
db_model: bool = (
|
||||
False # used for proxy - to separate models which are stored in the db vs. config.
|
||||
)
|
||||
updated_at: Optional[datetime.datetime] = None
|
||||
updated_by: Optional[str] = None
|
||||
|
||||
created_at: Optional[datetime.datetime] = None
|
||||
created_by: Optional[str] = None
|
||||
|
||||
base_model: Optional[
|
||||
str
|
||||
] = None # specify if the base model is azure/gpt-3.5-turbo etc for accurate cost tracking
|
||||
base_model: Optional[str] = (
|
||||
None # specify if the base model is azure/gpt-3.5-turbo etc for accurate cost tracking
|
||||
)
|
||||
tier: Optional[Literal["free", "paid"]] = None
|
||||
|
||||
"""
|
||||
|
@ -178,12 +180,12 @@ class GenericLiteLLMParams(CredentialLiteLLMParams, CustomPricingLiteLLMParams):
|
|||
custom_llm_provider: Optional[str] = None
|
||||
tpm: Optional[int] = None
|
||||
rpm: Optional[int] = None
|
||||
timeout: Optional[
|
||||
Union[float, str, httpx.Timeout]
|
||||
] = None # if str, pass in as os.environ/
|
||||
stream_timeout: Optional[
|
||||
Union[float, str]
|
||||
] = None # timeout when making stream=True calls, if str, pass in as os.environ/
|
||||
timeout: Optional[Union[float, str, httpx.Timeout]] = (
|
||||
None # if str, pass in as os.environ/
|
||||
)
|
||||
stream_timeout: Optional[Union[float, str]] = (
|
||||
None # timeout when making stream=True calls, if str, pass in as os.environ/
|
||||
)
|
||||
max_retries: Optional[int] = None
|
||||
organization: Optional[str] = None # for openai orgs
|
||||
configurable_clientside_auth_params: CONFIGURABLE_CLIENTSIDE_AUTH_PARAMS = None
|
||||
|
@ -253,9 +255,9 @@ class GenericLiteLLMParams(CredentialLiteLLMParams, CustomPricingLiteLLMParams):
|
|||
if max_retries is not None and isinstance(max_retries, str):
|
||||
max_retries = int(max_retries) # cast to int
|
||||
# We need to keep max_retries in args since it's a parameter of GenericLiteLLMParams
|
||||
args[
|
||||
"max_retries"
|
||||
] = max_retries # Put max_retries back in args after popping it
|
||||
args["max_retries"] = (
|
||||
max_retries # Put max_retries back in args after popping it
|
||||
)
|
||||
super().__init__(**args, **params)
|
||||
|
||||
def __contains__(self, key):
|
||||
|
@ -562,6 +564,7 @@ class ModelGroupInfo(BaseModel):
|
|||
supports_parallel_function_calling: bool = Field(default=False)
|
||||
supports_vision: bool = Field(default=False)
|
||||
supports_web_search: bool = Field(default=False)
|
||||
supports_reasoning: bool = Field(default=False)
|
||||
supports_function_calling: bool = Field(default=False)
|
||||
supported_openai_params: Optional[List[str]] = Field(default=[])
|
||||
configurable_clientside_auth_params: CONFIGURABLE_CLIENTSIDE_AUTH_PARAMS = None
|
||||
|
|
|
@ -510,6 +510,7 @@ def test_aaamodel_prices_and_context_window_json_is_valid():
|
|||
"supports_video_input": {"type": "boolean"},
|
||||
"supports_vision": {"type": "boolean"},
|
||||
"supports_web_search": {"type": "boolean"},
|
||||
"supports_reasoning": {"type": "boolean"},
|
||||
"tool_use_system_prompt_tokens": {"type": "number"},
|
||||
"tpm": {"type": "number"},
|
||||
"supported_endpoints": {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue