forked from phoenix/litellm-mirror
fix(gitmodules): remapping to new proxy
This commit is contained in:
parent
4f172101df
commit
606543eac8
5 changed files with 41 additions and 3 deletions
4
.gitignore
vendored
4
.gitignore
vendored
|
@ -5,4 +5,6 @@ __pycache__/
|
||||||
bun.lockb
|
bun.lockb
|
||||||
**/.DS_Store
|
**/.DS_Store
|
||||||
.aider*
|
.aider*
|
||||||
litellm_results.jsonl
|
litellm_results.jsonl
|
||||||
|
secrets.toml
|
||||||
|
.gitignore
|
||||||
|
|
2
.gitmodules
vendored
2
.gitmodules
vendored
|
@ -3,4 +3,4 @@
|
||||||
url = https://github.com/BerriAI/litellm
|
url = https://github.com/BerriAI/litellm
|
||||||
[submodule "proxy-server"]
|
[submodule "proxy-server"]
|
||||||
path = proxy-server
|
path = proxy-server
|
||||||
url = https://github.com/BerriAI/liteLLM-proxy.git
|
url = https://github.com/BerriAI/litellm/tree/main/litellm/proxy
|
||||||
|
|
3
litellm/proxy/.gitignore
vendored
3
litellm/proxy/.gitignore
vendored
|
@ -1 +1,2 @@
|
||||||
.env
|
.env
|
||||||
|
secrets.toml
|
|
@ -118,6 +118,10 @@ def load_config():
|
||||||
litellm.anthropic_key = user_config["keys"][key]
|
litellm.anthropic_key = user_config["keys"][key]
|
||||||
elif key == "REPLICATE_API_KEY":
|
elif key == "REPLICATE_API_KEY":
|
||||||
litellm.replicate_key = user_config["keys"][key]
|
litellm.replicate_key = user_config["keys"][key]
|
||||||
|
elif key == "AWS_ACCESS_KEY_ID":
|
||||||
|
os.environ["AWS_ACCESS_KEY_ID"] = user_config["keys"][key]
|
||||||
|
elif key == "AWS_SECRET_ACCESS_KEY":
|
||||||
|
os.environ["AWS_SECRET_ACCESS_KEY"] = user_config["keys"][key]
|
||||||
|
|
||||||
## settings
|
## settings
|
||||||
litellm.add_function_to_prompt = user_config["general"].get("add_function_to_prompt", True) # by default add function to prompt if unsupported by provider
|
litellm.add_function_to_prompt = user_config["general"].get("add_function_to_prompt", True) # by default add function to prompt if unsupported by provider
|
||||||
|
|
31
litellm/proxy/secrets_template.toml
Normal file
31
litellm/proxy/secrets_template.toml
Normal file
|
@ -0,0 +1,31 @@
|
||||||
|
[keys]
|
||||||
|
# HUGGINGFACE_API_KEY="" # Uncomment to save your Hugging Face API key
|
||||||
|
# OPENAI_API_KEY="" # Uncomment to save your OpenAI API Key
|
||||||
|
# TOGETHERAI_API_KEY="" # Uncomment to save your TogetherAI API key
|
||||||
|
# NLP_CLOUD_API_KEY="" # Uncomment to save your NLP Cloud API key
|
||||||
|
# ANTHROPIC_API_KEY="" # Uncomment to save your Anthropic API key
|
||||||
|
# REPLICATE_API_KEY="" # Uncomment to save your Replicate API key
|
||||||
|
# AWS_ACCESS_KEY_ID = "" # Uncomment to save your Bedrock/Sagemaker access keys
|
||||||
|
# AWS_SECRET_ACCESS_KEY = "" # Uncomment to save your Bedrock/Sagemaker access keys
|
||||||
|
|
||||||
|
[general]
|
||||||
|
# add_function_to_prompt = True # e.g: Ollama doesn't support functions, so add it to the prompt instead
|
||||||
|
# drop_params = True # drop any params not supported by the provider (e.g. Ollama)
|
||||||
|
|
||||||
|
[model."ollama/llama2"] # run via `litellm --model ollama/llama2`
|
||||||
|
# max_tokens = "" # set max tokens for the model
|
||||||
|
# temperature = "" # set temperature for the model
|
||||||
|
# api_base = "" # set a custom api base for the model
|
||||||
|
|
||||||
|
[model."ollama/llama2".prompt_template] # [OPTIONAL] LiteLLM can automatically formats the prompt - docs: https://docs.litellm.ai/docs/completion/prompt_formatting
|
||||||
|
# MODEL_SYSTEM_MESSAGE_START_TOKEN = "[INST] <<SYS>>\n" # This does not need to be a token, can be any string
|
||||||
|
# MODEL_SYSTEM_MESSAGE_END_TOKEN = "\n<</SYS>>\n [/INST]\n" # This does not need to be a token, can be any string
|
||||||
|
|
||||||
|
# MODEL_USER_MESSAGE_START_TOKEN = "[INST] " # This does not need to be a token, can be any string
|
||||||
|
# MODEL_USER_MESSAGE_END_TOKEN = " [/INST]\n" # Applies only to user messages. Can be any string.
|
||||||
|
|
||||||
|
# MODEL_ASSISTANT_MESSAGE_START_TOKEN = "" # Applies only to assistant messages. Can be any string.
|
||||||
|
# MODEL_ASSISTANT_MESSAGE_END_TOKEN = "\n" # Applies only to system messages. Can be any string.
|
||||||
|
|
||||||
|
# MODEL_PRE_PROMPT = "You are a good bot" # Applied at the start of the prompt
|
||||||
|
# MODEL_POST_PROMPT = "Now answer as best as you can" # Applied at the end of the prompt
|
Loading…
Add table
Add a link
Reference in a new issue