forked from phoenix/litellm-mirror
(feat) proxy - define model info
This commit is contained in:
parent
102de97960
commit
29fb97f88a
2 changed files with 8 additions and 2 deletions
|
@ -10,6 +10,8 @@ model_list:
|
|||
input_cost_per_token: 0.0.00006
|
||||
output_cost_per_token: 0.00003
|
||||
max_tokens: 4096
|
||||
base_model: gpt-35-turbo
|
||||
|
||||
- model_name: openai-gpt-3.5
|
||||
litellm_params:
|
||||
model: gpt-3.5-turbo
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
from pydantic import BaseModel, Extra
|
||||
from typing import Optional, List, Union, Dict
|
||||
from typing import Optional, List, Union, Dict, Literal
|
||||
from datetime import datetime
|
||||
import uuid
|
||||
######### Request Class Definition ######
|
||||
|
@ -40,7 +40,11 @@ class ProxyChatCompletionRequest(BaseModel):
|
|||
|
||||
class ModelInfo(BaseModel):
|
||||
id: Optional[str]
|
||||
mode: Optional[str]
|
||||
mode: Optional[Literal['embedding', 'chat', 'completion']]
|
||||
input_cost_per_token: Optional[float]
|
||||
output_cost_per_token: Optional[float]
|
||||
max_tokens: Optional[int]
|
||||
base_model: Optional[Literal['gpt-4-1106-preview', 'gpt-4-32k', 'gpt-4', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo']]
|
||||
|
||||
class Config:
|
||||
extra = Extra.allow # Allow extra fields
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue