mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
add default_team_params as a config.yaml setting
This commit is contained in:
parent
7d383fc0c1
commit
02f8690735
1 changed files with 56 additions and 44 deletions
|
@ -65,6 +65,7 @@ from litellm.proxy._types import (
|
||||||
KeyManagementSystem,
|
KeyManagementSystem,
|
||||||
KeyManagementSettings,
|
KeyManagementSettings,
|
||||||
LiteLLM_UpperboundKeyGenerateParams,
|
LiteLLM_UpperboundKeyGenerateParams,
|
||||||
|
NewTeamRequest,
|
||||||
)
|
)
|
||||||
from litellm.types.utils import StandardKeyGenerationConfig, LlmProviders
|
from litellm.types.utils import StandardKeyGenerationConfig, LlmProviders
|
||||||
from litellm.integrations.custom_logger import CustomLogger
|
from litellm.integrations.custom_logger import CustomLogger
|
||||||
|
@ -126,19 +127,19 @@ prometheus_initialize_budget_metrics: Optional[bool] = False
|
||||||
require_auth_for_metrics_endpoint: Optional[bool] = False
|
require_auth_for_metrics_endpoint: Optional[bool] = False
|
||||||
argilla_batch_size: Optional[int] = None
|
argilla_batch_size: Optional[int] = None
|
||||||
datadog_use_v1: Optional[bool] = False # if you want to use v1 datadog logged payload
|
datadog_use_v1: Optional[bool] = False # if you want to use v1 datadog logged payload
|
||||||
gcs_pub_sub_use_v1: Optional[
|
gcs_pub_sub_use_v1: Optional[bool] = (
|
||||||
bool
|
False # if you want to use v1 gcs pubsub logged payload
|
||||||
] = False # if you want to use v1 gcs pubsub logged payload
|
)
|
||||||
argilla_transformation_object: Optional[Dict[str, Any]] = None
|
argilla_transformation_object: Optional[Dict[str, Any]] = None
|
||||||
_async_input_callback: List[
|
_async_input_callback: List[Union[str, Callable, CustomLogger]] = (
|
||||||
Union[str, Callable, CustomLogger]
|
[]
|
||||||
] = [] # internal variable - async custom callbacks are routed here.
|
) # internal variable - async custom callbacks are routed here.
|
||||||
_async_success_callback: List[
|
_async_success_callback: List[Union[str, Callable, CustomLogger]] = (
|
||||||
Union[str, Callable, CustomLogger]
|
[]
|
||||||
] = [] # internal variable - async custom callbacks are routed here.
|
) # internal variable - async custom callbacks are routed here.
|
||||||
_async_failure_callback: List[
|
_async_failure_callback: List[Union[str, Callable, CustomLogger]] = (
|
||||||
Union[str, Callable, CustomLogger]
|
[]
|
||||||
] = [] # internal variable - async custom callbacks are routed here.
|
) # internal variable - async custom callbacks are routed here.
|
||||||
pre_call_rules: List[Callable] = []
|
pre_call_rules: List[Callable] = []
|
||||||
post_call_rules: List[Callable] = []
|
post_call_rules: List[Callable] = []
|
||||||
turn_off_message_logging: Optional[bool] = False
|
turn_off_message_logging: Optional[bool] = False
|
||||||
|
@ -146,18 +147,18 @@ log_raw_request_response: bool = False
|
||||||
redact_messages_in_exceptions: Optional[bool] = False
|
redact_messages_in_exceptions: Optional[bool] = False
|
||||||
redact_user_api_key_info: Optional[bool] = False
|
redact_user_api_key_info: Optional[bool] = False
|
||||||
filter_invalid_headers: Optional[bool] = False
|
filter_invalid_headers: Optional[bool] = False
|
||||||
add_user_information_to_llm_headers: Optional[
|
add_user_information_to_llm_headers: Optional[bool] = (
|
||||||
bool
|
None # adds user_id, team_id, token hash (params from StandardLoggingMetadata) to request headers
|
||||||
] = None # adds user_id, team_id, token hash (params from StandardLoggingMetadata) to request headers
|
)
|
||||||
store_audit_logs = False # Enterprise feature, allow users to see audit logs
|
store_audit_logs = False # Enterprise feature, allow users to see audit logs
|
||||||
### end of callbacks #############
|
### end of callbacks #############
|
||||||
|
|
||||||
email: Optional[
|
email: Optional[str] = (
|
||||||
str
|
None # Not used anymore, will be removed in next MAJOR release - https://github.com/BerriAI/litellm/discussions/648
|
||||||
] = None # Not used anymore, will be removed in next MAJOR release - https://github.com/BerriAI/litellm/discussions/648
|
)
|
||||||
token: Optional[
|
token: Optional[str] = (
|
||||||
str
|
None # Not used anymore, will be removed in next MAJOR release - https://github.com/BerriAI/litellm/discussions/648
|
||||||
] = None # Not used anymore, will be removed in next MAJOR release - https://github.com/BerriAI/litellm/discussions/648
|
)
|
||||||
telemetry = True
|
telemetry = True
|
||||||
max_tokens: int = DEFAULT_MAX_TOKENS # OpenAI Defaults
|
max_tokens: int = DEFAULT_MAX_TOKENS # OpenAI Defaults
|
||||||
drop_params = bool(os.getenv("LITELLM_DROP_PARAMS", False))
|
drop_params = bool(os.getenv("LITELLM_DROP_PARAMS", False))
|
||||||
|
@ -233,20 +234,24 @@ enable_loadbalancing_on_batch_endpoints: Optional[bool] = None
|
||||||
enable_caching_on_provider_specific_optional_params: bool = (
|
enable_caching_on_provider_specific_optional_params: bool = (
|
||||||
False # feature-flag for caching on optional params - e.g. 'top_k'
|
False # feature-flag for caching on optional params - e.g. 'top_k'
|
||||||
)
|
)
|
||||||
caching: bool = False # Not used anymore, will be removed in next MAJOR release - https://github.com/BerriAI/litellm/discussions/648
|
caching: bool = (
|
||||||
caching_with_models: bool = False # # Not used anymore, will be removed in next MAJOR release - https://github.com/BerriAI/litellm/discussions/648
|
False # Not used anymore, will be removed in next MAJOR release - https://github.com/BerriAI/litellm/discussions/648
|
||||||
cache: Optional[
|
)
|
||||||
Cache
|
caching_with_models: bool = (
|
||||||
] = None # cache object <- use this - https://docs.litellm.ai/docs/caching
|
False # # Not used anymore, will be removed in next MAJOR release - https://github.com/BerriAI/litellm/discussions/648
|
||||||
|
)
|
||||||
|
cache: Optional[Cache] = (
|
||||||
|
None # cache object <- use this - https://docs.litellm.ai/docs/caching
|
||||||
|
)
|
||||||
default_in_memory_ttl: Optional[float] = None
|
default_in_memory_ttl: Optional[float] = None
|
||||||
default_redis_ttl: Optional[float] = None
|
default_redis_ttl: Optional[float] = None
|
||||||
default_redis_batch_cache_expiry: Optional[float] = None
|
default_redis_batch_cache_expiry: Optional[float] = None
|
||||||
model_alias_map: Dict[str, str] = {}
|
model_alias_map: Dict[str, str] = {}
|
||||||
model_group_alias_map: Dict[str, str] = {}
|
model_group_alias_map: Dict[str, str] = {}
|
||||||
max_budget: float = 0.0 # set the max budget across all providers
|
max_budget: float = 0.0 # set the max budget across all providers
|
||||||
budget_duration: Optional[
|
budget_duration: Optional[str] = (
|
||||||
str
|
None # proxy only - resets budget after fixed duration. You can set duration as seconds ("30s"), minutes ("30m"), hours ("30h"), days ("30d").
|
||||||
] = None # proxy only - resets budget after fixed duration. You can set duration as seconds ("30s"), minutes ("30m"), hours ("30h"), days ("30d").
|
)
|
||||||
default_soft_budget: float = (
|
default_soft_budget: float = (
|
||||||
DEFAULT_SOFT_BUDGET # by default all litellm proxy keys have a soft budget of 50.0
|
DEFAULT_SOFT_BUDGET # by default all litellm proxy keys have a soft budget of 50.0
|
||||||
)
|
)
|
||||||
|
@ -255,11 +260,15 @@ forward_traceparent_to_llm_provider: bool = False
|
||||||
|
|
||||||
_current_cost = 0.0 # private variable, used if max budget is set
|
_current_cost = 0.0 # private variable, used if max budget is set
|
||||||
error_logs: Dict = {}
|
error_logs: Dict = {}
|
||||||
add_function_to_prompt: bool = False # if function calling not supported by api, append function call details to system prompt
|
add_function_to_prompt: bool = (
|
||||||
|
False # if function calling not supported by api, append function call details to system prompt
|
||||||
|
)
|
||||||
client_session: Optional[httpx.Client] = None
|
client_session: Optional[httpx.Client] = None
|
||||||
aclient_session: Optional[httpx.AsyncClient] = None
|
aclient_session: Optional[httpx.AsyncClient] = None
|
||||||
model_fallbacks: Optional[List] = None # Deprecated for 'litellm.fallbacks'
|
model_fallbacks: Optional[List] = None # Deprecated for 'litellm.fallbacks'
|
||||||
model_cost_map_url: str = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json"
|
model_cost_map_url: str = (
|
||||||
|
"https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json"
|
||||||
|
)
|
||||||
suppress_debug_info = False
|
suppress_debug_info = False
|
||||||
dynamodb_table_name: Optional[str] = None
|
dynamodb_table_name: Optional[str] = None
|
||||||
s3_callback_params: Optional[Dict] = None
|
s3_callback_params: Optional[Dict] = None
|
||||||
|
@ -268,6 +277,7 @@ default_key_generate_params: Optional[Dict] = None
|
||||||
upperbound_key_generate_params: Optional[LiteLLM_UpperboundKeyGenerateParams] = None
|
upperbound_key_generate_params: Optional[LiteLLM_UpperboundKeyGenerateParams] = None
|
||||||
key_generation_settings: Optional[StandardKeyGenerationConfig] = None
|
key_generation_settings: Optional[StandardKeyGenerationConfig] = None
|
||||||
default_internal_user_params: Optional[Dict] = None
|
default_internal_user_params: Optional[Dict] = None
|
||||||
|
default_team_params: Optional[NewTeamRequest] = None
|
||||||
default_team_settings: Optional[List] = None
|
default_team_settings: Optional[List] = None
|
||||||
max_user_budget: Optional[float] = None
|
max_user_budget: Optional[float] = None
|
||||||
default_max_internal_user_budget: Optional[float] = None
|
default_max_internal_user_budget: Optional[float] = None
|
||||||
|
@ -281,7 +291,9 @@ disable_end_user_cost_tracking_prometheus_only: Optional[bool] = None
|
||||||
custom_prometheus_metadata_labels: List[str] = []
|
custom_prometheus_metadata_labels: List[str] = []
|
||||||
#### REQUEST PRIORITIZATION ####
|
#### REQUEST PRIORITIZATION ####
|
||||||
priority_reservation: Optional[Dict[str, float]] = None
|
priority_reservation: Optional[Dict[str, float]] = None
|
||||||
force_ipv4: bool = False # when True, litellm will force ipv4 for all LLM requests. Some users have seen httpx ConnectionError when using ipv6.
|
force_ipv4: bool = (
|
||||||
|
False # when True, litellm will force ipv4 for all LLM requests. Some users have seen httpx ConnectionError when using ipv6.
|
||||||
|
)
|
||||||
module_level_aclient = AsyncHTTPHandler(
|
module_level_aclient = AsyncHTTPHandler(
|
||||||
timeout=request_timeout, client_alias="module level aclient"
|
timeout=request_timeout, client_alias="module level aclient"
|
||||||
)
|
)
|
||||||
|
@ -295,13 +307,13 @@ fallbacks: Optional[List] = None
|
||||||
context_window_fallbacks: Optional[List] = None
|
context_window_fallbacks: Optional[List] = None
|
||||||
content_policy_fallbacks: Optional[List] = None
|
content_policy_fallbacks: Optional[List] = None
|
||||||
allowed_fails: int = 3
|
allowed_fails: int = 3
|
||||||
num_retries_per_request: Optional[
|
num_retries_per_request: Optional[int] = (
|
||||||
int
|
None # for the request overall (incl. fallbacks + model retries)
|
||||||
] = None # for the request overall (incl. fallbacks + model retries)
|
)
|
||||||
####### SECRET MANAGERS #####################
|
####### SECRET MANAGERS #####################
|
||||||
secret_manager_client: Optional[
|
secret_manager_client: Optional[Any] = (
|
||||||
Any
|
None # list of instantiated key management clients - e.g. azure kv, infisical, etc.
|
||||||
] = None # list of instantiated key management clients - e.g. azure kv, infisical, etc.
|
)
|
||||||
_google_kms_resource_name: Optional[str] = None
|
_google_kms_resource_name: Optional[str] = None
|
||||||
_key_management_system: Optional[KeyManagementSystem] = None
|
_key_management_system: Optional[KeyManagementSystem] = None
|
||||||
_key_management_settings: KeyManagementSettings = KeyManagementSettings()
|
_key_management_settings: KeyManagementSettings = KeyManagementSettings()
|
||||||
|
@ -1050,10 +1062,10 @@ from .types.llms.custom_llm import CustomLLMItem
|
||||||
from .types.utils import GenericStreamingChunk
|
from .types.utils import GenericStreamingChunk
|
||||||
|
|
||||||
custom_provider_map: List[CustomLLMItem] = []
|
custom_provider_map: List[CustomLLMItem] = []
|
||||||
_custom_providers: List[
|
_custom_providers: List[str] = (
|
||||||
str
|
[]
|
||||||
] = [] # internal helper util, used to track names of custom providers
|
) # internal helper util, used to track names of custom providers
|
||||||
disable_hf_tokenizer_download: Optional[
|
disable_hf_tokenizer_download: Optional[bool] = (
|
||||||
bool
|
None # disable huggingface tokenizer download. Defaults to openai clk100
|
||||||
] = None # disable huggingface tokenizer download. Defaults to openai clk100
|
)
|
||||||
global_disable_no_log_param: bool = False
|
global_disable_no_log_param: bool = False
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue