diff --git a/litellm/proxy/proxy_config.yaml b/litellm/proxy/proxy_config.yaml index 1c096aac7..0180d232e 100644 --- a/litellm/proxy/proxy_config.yaml +++ b/litellm/proxy/proxy_config.yaml @@ -11,8 +11,10 @@ model_list: output_cost_per_token: 0.00003 max_tokens: 4096 base_model: gpt-3.5-turbo - - - model_name: openai-gpt-3.5 + - model_name: BEDROCK_GROUP + litellm_params: + model: bedrock/cohere.command-text-v14 + - model_name: Azure OpenAI GPT-4 Canada-East (External) litellm_params: model: gpt-3.5-turbo api_key: os.environ/OPENAI_API_KEY @@ -41,11 +43,12 @@ model_list: mode: completion litellm_settings: + # cache: True # setting callback class # callbacks: custom_callbacks.proxy_handler_instance # sets litellm.callbacks = [proxy_handler_instance] - model_group_alias_map: {"gpt-4": "openai-gpt-3.5"} # all requests with gpt-4 model_name, get sent to openai-gpt-3.5 - general_settings: + +environment_variables: # otel: True # OpenTelemetry Logger # master_key: sk-1234 # [OPTIONAL] Only use this if you to require all calls to contain this key (Authorization: Bearer sk-1234)