forked from phoenix/litellm-mirror
docs(proxy_server): cleanup docs
This commit is contained in:
parent
d8aa7d1aaf
commit
1df487a45d
1 changed files with 1 additions and 40 deletions
|
@ -520,46 +520,7 @@ $ litellm --config -f ./litellm_config.toml
|
|||
LiteLLM will save a copy of this file in it's package, so it can persist these settings across restarts.
|
||||
|
||||
|
||||
**Complete Config File**
|
||||
|
||||
```shell
|
||||
### API KEYS ###
|
||||
[keys]
|
||||
# HUGGINGFACE_API_KEY="" # Uncomment to save your Hugging Face API key
|
||||
# OPENAI_API_KEY="" # Uncomment to save your OpenAI API Key
|
||||
# TOGETHERAI_API_KEY="" # Uncomment to save your TogetherAI API key
|
||||
# NLP_CLOUD_API_KEY="" # Uncomment to save your NLP Cloud API key
|
||||
# ANTHROPIC_API_KEY="" # Uncomment to save your Anthropic API key
|
||||
# REPLICATE_API_KEY="" # Uncomment to save your Replicate API key
|
||||
# AWS_ACCESS_KEY_ID = "" # Uncomment to save your Bedrock/Sagemaker access keys
|
||||
# AWS_SECRET_ACCESS_KEY = "" # Uncomment to save your Bedrock/Sagemaker access keys
|
||||
|
||||
### LITELLM PARAMS ###
|
||||
[general]
|
||||
# add_function_to_prompt = True # e.g: Ollama doesn't support functions, so add it to the prompt instead
|
||||
# drop_params = True # drop any params not supported by the provider (e.g. Ollama)
|
||||
# default_model = "gpt-4" # route all requests to this model
|
||||
# fallbacks = ["gpt-3.5-turbo", "gpt-3.5-turbo-16k"] # models you want to fallback to in case completion call fails (remember: add relevant keys)
|
||||
|
||||
### MODEL PARAMS ###
|
||||
[model."ollama/llama2"] # run via `litellm --model ollama/llama2`
|
||||
# max_tokens = "" # set max tokens for the model
|
||||
# temperature = "" # set temperature for the model
|
||||
# api_base = "" # set a custom api base for the model
|
||||
|
||||
[model."ollama/llama2".prompt_template] # [OPTIONAL] LiteLLM can automatically formats the prompt - docs: https://docs.litellm.ai/docs/completion/prompt_formatting
|
||||
# MODEL_SYSTEM_MESSAGE_START_TOKEN = "[INST] <<SYS>>\n" # This does not need to be a token, can be any string
|
||||
# MODEL_SYSTEM_MESSAGE_END_TOKEN = "\n<</SYS>>\n [/INST]\n" # This does not need to be a token, can be any string
|
||||
|
||||
# MODEL_USER_MESSAGE_START_TOKEN = "[INST] " # This does not need to be a token, can be any string
|
||||
# MODEL_USER_MESSAGE_END_TOKEN = " [/INST]\n" # Applies only to user messages. Can be any string.
|
||||
|
||||
# MODEL_ASSISTANT_MESSAGE_START_TOKEN = "" # Applies only to assistant messages. Can be any string.
|
||||
# MODEL_ASSISTANT_MESSAGE_END_TOKEN = "\n" # Applies only to system messages. Can be any string.
|
||||
|
||||
# MODEL_PRE_PROMPT = "You are a good bot" # Applied at the start of the prompt
|
||||
# MODEL_POST_PROMPT = "Now answer as best as you can" # Applied at the end of the prompt
|
||||
```
|
||||
[**Complete Config File**](https://github.com/BerriAI/litellm/blob/main/secrets_template.toml)
|
||||
[**🔥 [Tutorial] modify a model prompt on the proxy**](./tutorials/model_config_proxy.md)
|
||||
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue