mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 11:14:04 +00:00
decouple model list from hardcoded package variables
This commit is contained in:
parent
1ad9d7a98a
commit
93c41e8f6d
4 changed files with 43 additions and 86 deletions
|
@ -78,39 +78,43 @@ config_path = None
|
||||||
####### Secret Manager #####################
|
####### Secret Manager #####################
|
||||||
secret_manager_client = None
|
secret_manager_client = None
|
||||||
####### COMPLETION MODELS ###################
|
####### COMPLETION MODELS ###################
|
||||||
open_ai_chat_completion_models: List = [
|
open_ai_chat_completion_models: List = []
|
||||||
"gpt-4",
|
open_ai_text_completion_models: List = []
|
||||||
"gpt-4-0613",
|
cohere_models: List = []
|
||||||
"gpt-4-0314",
|
anthropic_models: List = []
|
||||||
"gpt-4-32k",
|
openrouter_models: List = []
|
||||||
"gpt-4-32k-0314",
|
vertex_chat_models: List = []
|
||||||
"gpt-4-32k-0613",
|
vertex_code_chat_models: List = []
|
||||||
#################
|
vertex_text_models: List = []
|
||||||
"gpt-3.5-turbo",
|
vertex_code_text_models: List = []
|
||||||
"gpt-3.5-turbo-0301",
|
ai21_models: List = []
|
||||||
"gpt-3.5-turbo-0613",
|
nlp_cloud_models: List = []
|
||||||
"gpt-3.5-turbo-16k",
|
aleph_alpha_models: List = []
|
||||||
"gpt-3.5-turbo-16k-0613",
|
for key, value in model_cost.items():
|
||||||
]
|
if value.get('litellm_provider') == 'openai':
|
||||||
open_ai_text_completion_models: List = [
|
open_ai_chat_completion_models.append(key)
|
||||||
"gpt-3.5-turbo-instruct",
|
elif value.get('litellm_provider') == 'text-completion-openai':
|
||||||
"text-davinci-003",
|
open_ai_text_completion_models.append(key)
|
||||||
"text-curie-001",
|
elif value.get('litellm_provider') == 'cohere':
|
||||||
"text-babbage-001",
|
cohere_models.append(key)
|
||||||
"text-ada-001",
|
elif value.get('litellm_provider') == 'anthropic':
|
||||||
"babbage-002",
|
anthropic_models.append(key)
|
||||||
"davinci-002",
|
elif value.get('litellm_provider') == 'openrouter':
|
||||||
]
|
openrouter_models.append(key)
|
||||||
|
elif value.get('litellm_provider') == 'vertex_ai-text-models':
|
||||||
cohere_models: List = [
|
vertex_text_models.append(key)
|
||||||
"command-nightly",
|
elif value.get('litellm_provider') == 'vertex_ai-code-text-models':
|
||||||
"command",
|
vertex_code_text_models.append(key)
|
||||||
"command-light",
|
elif value.get('litellm_provider') == 'vertex_ai-chat-models':
|
||||||
"command-medium-beta",
|
vertex_chat_models.append(key)
|
||||||
"command-xlarge-beta",
|
elif value.get('litellm_provider') == 'vertex_ai-code-chat-models':
|
||||||
]
|
vertex_code_chat_models.append(key)
|
||||||
|
elif value.get('litellm_provider') == 'ai21':
|
||||||
anthropic_models: List = ["claude-2", "claude-instant-1", "claude-instant-1.2"]
|
ai21_models.append(key)
|
||||||
|
elif value.get('litellm_provider') == 'nlp_cloud':
|
||||||
|
nlp_cloud_models.append(key)
|
||||||
|
elif value.get('litellm_provider') == 'aleph_alpha':
|
||||||
|
aleph_alpha_models.append(key)
|
||||||
|
|
||||||
# well supported replicate llms
|
# well supported replicate llms
|
||||||
replicate_models: List = [
|
replicate_models: List = [
|
||||||
|
@ -128,44 +132,6 @@ replicate_models: List = [
|
||||||
"replit/replit-code-v1-3b:b84f4c074b807211cd75e3e8b1589b6399052125b4c27106e43d47189e8415ad",
|
"replit/replit-code-v1-3b:b84f4c074b807211cd75e3e8b1589b6399052125b4c27106e43d47189e8415ad",
|
||||||
]
|
]
|
||||||
|
|
||||||
openrouter_models: List = [
|
|
||||||
"google/palm-2-codechat-bison",
|
|
||||||
"google/palm-2-chat-bison",
|
|
||||||
"openai/gpt-3.5-turbo",
|
|
||||||
"openai/gpt-3.5-turbo-16k",
|
|
||||||
"openai/gpt-4-32k",
|
|
||||||
"anthropic/claude-2",
|
|
||||||
"anthropic/claude-instant-v1",
|
|
||||||
"meta-llama/llama-2-13b-chat",
|
|
||||||
"meta-llama/llama-2-70b-chat",
|
|
||||||
]
|
|
||||||
|
|
||||||
vertex_chat_models: List = [
|
|
||||||
"chat-bison-32k",
|
|
||||||
"chat-bison",
|
|
||||||
"chat-bison@001",
|
|
||||||
]
|
|
||||||
|
|
||||||
vertex_code_chat_models: List = [
|
|
||||||
"codechat-bison",
|
|
||||||
"codechat-bison-32k",
|
|
||||||
"codechat-bison@001",
|
|
||||||
]
|
|
||||||
|
|
||||||
vertex_text_models: List = [
|
|
||||||
"text-bison",
|
|
||||||
"text-bison@001",
|
|
||||||
# "text-bison-32k",
|
|
||||||
]
|
|
||||||
|
|
||||||
vertex_code_text_models: List = [
|
|
||||||
"code-bison",
|
|
||||||
# "code-bison-32K",
|
|
||||||
"code-bison@001",
|
|
||||||
"code-gecko@001",
|
|
||||||
"code-gecko@latest",
|
|
||||||
]
|
|
||||||
|
|
||||||
huggingface_models: List = [
|
huggingface_models: List = [
|
||||||
"meta-llama/Llama-2-7b-hf",
|
"meta-llama/Llama-2-7b-hf",
|
||||||
"meta-llama/Llama-2-7b-chat-hf",
|
"meta-llama/Llama-2-7b-chat-hf",
|
||||||
|
@ -181,10 +147,6 @@ huggingface_models: List = [
|
||||||
"meta-llama/Llama-2-70b-chat",
|
"meta-llama/Llama-2-70b-chat",
|
||||||
] # these have been tested on extensively. But by default all text2text-generation and text-generation models are supported by liteLLM. - https://docs.litellm.ai/docs/providers
|
] # these have been tested on extensively. But by default all text2text-generation and text-generation models are supported by liteLLM. - https://docs.litellm.ai/docs/providers
|
||||||
|
|
||||||
ai21_models: List = ["j2-ultra", "j2-mid", "j2-light"]
|
|
||||||
|
|
||||||
nlp_cloud_models: List = ["dolphin", "chatdolphin"]
|
|
||||||
|
|
||||||
together_ai_models: List = [
|
together_ai_models: List = [
|
||||||
# llama llms - chat
|
# llama llms - chat
|
||||||
"togethercomputer/llama-2-70b-chat",
|
"togethercomputer/llama-2-70b-chat",
|
||||||
|
@ -222,14 +184,6 @@ together_ai_models: List = [
|
||||||
|
|
||||||
] # supports all together ai models, just pass in the model id e.g. completion(model="together_computer/replit_code_3b",...)
|
] # supports all together ai models, just pass in the model id e.g. completion(model="together_computer/replit_code_3b",...)
|
||||||
|
|
||||||
aleph_alpha_models: List = [
|
|
||||||
"luminous-base",
|
|
||||||
"luminous-base-control",
|
|
||||||
"luminous-extended",
|
|
||||||
"luminous-extended-control",
|
|
||||||
"luminous-supreme",
|
|
||||||
"luminous-supreme-control"
|
|
||||||
]
|
|
||||||
|
|
||||||
baseten_models: List = ["qvv0xeq", "q841o8w", "31dxrj3"] # FALCON 7B # WizardLM # Mosaic ML
|
baseten_models: List = ["qvv0xeq", "q841o8w", "31dxrj3"] # FALCON 7B # WizardLM # Mosaic ML
|
||||||
|
|
||||||
|
|
Binary file not shown.
Binary file not shown.
|
@ -5,6 +5,9 @@ sys.path.insert(
|
||||||
0, os.path.abspath("../..")
|
0, os.path.abspath("../..")
|
||||||
) # Adds the parent directory to the system path
|
) # Adds the parent directory to the system path
|
||||||
import time
|
import time
|
||||||
from litellm import get_max_tokens
|
from litellm import get_max_tokens, model_cost, open_ai_chat_completion_models
|
||||||
|
|
||||||
print(get_max_tokens("gpt-3.5-turbo"))
|
print(get_max_tokens("gpt-3.5-turbo"))
|
||||||
|
|
||||||
|
print(model_cost)
|
||||||
|
print(open_ai_chat_completion_models)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue