From 1df487a45d4ab533fef113c9f8d5aca997cc025c Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Mon, 16 Oct 2023 15:38:50 -0700 Subject: [PATCH] docs(proxy_server): cleanup docs --- docs/my-website/docs/proxy_server.md | 41 +--------------------------- 1 file changed, 1 insertion(+), 40 deletions(-) diff --git a/docs/my-website/docs/proxy_server.md b/docs/my-website/docs/proxy_server.md index a5cb8fe4a..d2a9d521f 100644 --- a/docs/my-website/docs/proxy_server.md +++ b/docs/my-website/docs/proxy_server.md @@ -520,46 +520,7 @@ $ litellm --config -f ./litellm_config.toml LiteLLM will save a copy of this file in it's package, so it can persist these settings across restarts. -**Complete Config File** - -```shell -### API KEYS ### -[keys] -# HUGGINGFACE_API_KEY="" # Uncomment to save your Hugging Face API key -# OPENAI_API_KEY="" # Uncomment to save your OpenAI API Key -# TOGETHERAI_API_KEY="" # Uncomment to save your TogetherAI API key -# NLP_CLOUD_API_KEY="" # Uncomment to save your NLP Cloud API key -# ANTHROPIC_API_KEY="" # Uncomment to save your Anthropic API key -# REPLICATE_API_KEY="" # Uncomment to save your Replicate API key -# AWS_ACCESS_KEY_ID = "" # Uncomment to save your Bedrock/Sagemaker access keys -# AWS_SECRET_ACCESS_KEY = "" # Uncomment to save your Bedrock/Sagemaker access keys - -### LITELLM PARAMS ### -[general] -# add_function_to_prompt = True # e.g: Ollama doesn't support functions, so add it to the prompt instead -# drop_params = True # drop any params not supported by the provider (e.g. Ollama) -# default_model = "gpt-4" # route all requests to this model -# fallbacks = ["gpt-3.5-turbo", "gpt-3.5-turbo-16k"] # models you want to fallback to in case completion call fails (remember: add relevant keys) - -### MODEL PARAMS ### -[model."ollama/llama2"] # run via `litellm --model ollama/llama2` -# max_tokens = "" # set max tokens for the model -# temperature = "" # set temperature for the model -# api_base = "" # set a custom api base for the model - -[model."ollama/llama2".prompt_template] # [OPTIONAL] LiteLLM can automatically formats the prompt - docs: https://docs.litellm.ai/docs/completion/prompt_formatting -# MODEL_SYSTEM_MESSAGE_START_TOKEN = "[INST] <>\n" # This does not need to be a token, can be any string -# MODEL_SYSTEM_MESSAGE_END_TOKEN = "\n<>\n [/INST]\n" # This does not need to be a token, can be any string - -# MODEL_USER_MESSAGE_START_TOKEN = "[INST] " # This does not need to be a token, can be any string -# MODEL_USER_MESSAGE_END_TOKEN = " [/INST]\n" # Applies only to user messages. Can be any string. - -# MODEL_ASSISTANT_MESSAGE_START_TOKEN = "" # Applies only to assistant messages. Can be any string. -# MODEL_ASSISTANT_MESSAGE_END_TOKEN = "\n" # Applies only to system messages. Can be any string. - -# MODEL_PRE_PROMPT = "You are a good bot" # Applied at the start of the prompt -# MODEL_POST_PROMPT = "Now answer as best as you can" # Applied at the end of the prompt -``` +[**Complete Config File**](https://github.com/BerriAI/litellm/blob/main/secrets_template.toml) [**🔥 [Tutorial] modify a model prompt on the proxy**](./tutorials/model_config_proxy.md)