forked from phoenix/litellm-mirror
Merge branch 'main' into feature/return-output-vector-size-in-modelinfo
This commit is contained in:
commit
0721e95b0b
319 changed files with 19767 additions and 3296 deletions
|
@ -44,16 +44,26 @@ class ModelInfo(TypedDict, total=False):
|
|||
max_input_tokens: Required[Optional[int]]
|
||||
max_output_tokens: Required[Optional[int]]
|
||||
input_cost_per_token: Required[float]
|
||||
input_cost_per_token_above_128k_tokens: Optional[float]
|
||||
input_cost_per_image: Optional[float]
|
||||
input_cost_per_audio_per_second: Optional[float]
|
||||
input_cost_per_video_per_second: Optional[float]
|
||||
input_cost_per_character: Optional[float] # only for vertex ai models
|
||||
input_cost_per_token_above_128k_tokens: Optional[float] # only for vertex ai models
|
||||
input_cost_per_character_above_128k_tokens: Optional[
|
||||
float
|
||||
] # only for vertex ai models
|
||||
input_cost_per_image: Optional[float] # only for vertex ai models
|
||||
input_cost_per_audio_per_second: Optional[float] # only for vertex ai models
|
||||
input_cost_per_video_per_second: Optional[float] # only for vertex ai models
|
||||
output_cost_per_token: Required[float]
|
||||
output_cost_per_token_above_128k_tokens: Optional[float]
|
||||
output_cost_per_character: Optional[float] # only for vertex ai models
|
||||
output_cost_per_token_above_128k_tokens: Optional[
|
||||
float
|
||||
] # only for vertex ai models
|
||||
output_cost_per_character_above_128k_tokens: Optional[
|
||||
float
|
||||
] # only for vertex ai models
|
||||
output_cost_per_image: Optional[float]
|
||||
output_cost_per_video_per_second: Optional[float]
|
||||
output_cost_per_audio_per_second: Optional[float]
|
||||
output_vector_size: Optional[int]
|
||||
output_cost_per_video_per_second: Optional[float] # only for vertex ai models
|
||||
output_cost_per_audio_per_second: Optional[float] # only for vertex ai models
|
||||
litellm_provider: Required[str]
|
||||
mode: Required[
|
||||
Literal[
|
||||
|
@ -62,6 +72,7 @@ class ModelInfo(TypedDict, total=False):
|
|||
]
|
||||
supported_openai_params: Required[Optional[List[str]]]
|
||||
supports_system_messages: Optional[bool]
|
||||
supports_response_schema: Optional[bool]
|
||||
|
||||
|
||||
class GenericStreamingChunk(TypedDict):
|
||||
|
@ -159,11 +170,13 @@ class Function(OpenAIObject):
|
|||
|
||||
def __init__(
|
||||
self,
|
||||
arguments: Union[Dict, str],
|
||||
arguments: Optional[Union[Dict, str]],
|
||||
name: Optional[str] = None,
|
||||
**params,
|
||||
):
|
||||
if isinstance(arguments, Dict):
|
||||
if arguments is None:
|
||||
arguments = ""
|
||||
elif isinstance(arguments, Dict):
|
||||
arguments = json.dumps(arguments)
|
||||
else:
|
||||
arguments = arguments
|
||||
|
@ -983,3 +996,8 @@ class GenericImageParsingChunk(TypedDict):
|
|||
type: str
|
||||
media_type: str
|
||||
data: str
|
||||
|
||||
|
||||
class ResponseFormatChunk(TypedDict, total=False):
|
||||
type: Required[Literal["json_object", "text"]]
|
||||
response_schema: dict
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue