diff --git a/.gitignore b/.gitignore index 3028c4267..3bc0cdc13 100644 --- a/.gitignore +++ b/.gitignore @@ -5,4 +5,6 @@ __pycache__/ bun.lockb **/.DS_Store .aider* -litellm_results.jsonl \ No newline at end of file +litellm_results.jsonl +secrets.toml +.gitignore diff --git a/.gitmodules b/.gitmodules index 1502328e8..9a86ab7f8 100644 --- a/.gitmodules +++ b/.gitmodules @@ -3,4 +3,4 @@ url = https://github.com/BerriAI/litellm [submodule "proxy-server"] path = proxy-server - url = https://github.com/BerriAI/liteLLM-proxy.git + url = https://github.com/BerriAI/litellm/tree/main/litellm/proxy diff --git a/litellm/proxy/.gitignore b/litellm/proxy/.gitignore index 2eea525d8..caa4783d9 100644 --- a/litellm/proxy/.gitignore +++ b/litellm/proxy/.gitignore @@ -1 +1,2 @@ -.env \ No newline at end of file +.env +secrets.toml \ No newline at end of file diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 6bede366a..1aaa1472f 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -118,6 +118,10 @@ def load_config(): litellm.anthropic_key = user_config["keys"][key] elif key == "REPLICATE_API_KEY": litellm.replicate_key = user_config["keys"][key] + elif key == "AWS_ACCESS_KEY_ID": + os.environ["AWS_ACCESS_KEY_ID"] = user_config["keys"][key] + elif key == "AWS_SECRET_ACCESS_KEY": + os.environ["AWS_SECRET_ACCESS_KEY"] = user_config["keys"][key] ## settings litellm.add_function_to_prompt = user_config["general"].get("add_function_to_prompt", True) # by default add function to prompt if unsupported by provider diff --git a/litellm/proxy/secrets_template.toml b/litellm/proxy/secrets_template.toml new file mode 100644 index 000000000..3297ce724 --- /dev/null +++ b/litellm/proxy/secrets_template.toml @@ -0,0 +1,31 @@ +[keys] +# HUGGINGFACE_API_KEY="" # Uncomment to save your Hugging Face API key +# OPENAI_API_KEY="" # Uncomment to save your OpenAI API Key +# TOGETHERAI_API_KEY="" # Uncomment to save your TogetherAI API key +# NLP_CLOUD_API_KEY="" # Uncomment to save your NLP Cloud API key +# ANTHROPIC_API_KEY="" # Uncomment to save your Anthropic API key +# REPLICATE_API_KEY="" # Uncomment to save your Replicate API key +# AWS_ACCESS_KEY_ID = "" # Uncomment to save your Bedrock/Sagemaker access keys +# AWS_SECRET_ACCESS_KEY = "" # Uncomment to save your Bedrock/Sagemaker access keys + +[general] +# add_function_to_prompt = True # e.g: Ollama doesn't support functions, so add it to the prompt instead +# drop_params = True # drop any params not supported by the provider (e.g. Ollama) + +[model."ollama/llama2"] # run via `litellm --model ollama/llama2` +# max_tokens = "" # set max tokens for the model +# temperature = "" # set temperature for the model +# api_base = "" # set a custom api base for the model + +[model."ollama/llama2".prompt_template] # [OPTIONAL] LiteLLM can automatically formats the prompt - docs: https://docs.litellm.ai/docs/completion/prompt_formatting +# MODEL_SYSTEM_MESSAGE_START_TOKEN = "[INST] <>\n" # This does not need to be a token, can be any string +# MODEL_SYSTEM_MESSAGE_END_TOKEN = "\n<>\n [/INST]\n" # This does not need to be a token, can be any string + +# MODEL_USER_MESSAGE_START_TOKEN = "[INST] " # This does not need to be a token, can be any string +# MODEL_USER_MESSAGE_END_TOKEN = " [/INST]\n" # Applies only to user messages. Can be any string. + +# MODEL_ASSISTANT_MESSAGE_START_TOKEN = "" # Applies only to assistant messages. Can be any string. +# MODEL_ASSISTANT_MESSAGE_END_TOKEN = "\n" # Applies only to system messages. Can be any string. + +# MODEL_PRE_PROMPT = "You are a good bot" # Applied at the start of the prompt +# MODEL_POST_PROMPT = "Now answer as best as you can" # Applied at the end of the prompt \ No newline at end of file