forked from phoenix/litellm-mirror
fix(utils.py): read env variables for known openai-compatible api's (e.g. perplexity), dynamically from th eenvironment
This commit is contained in:
parent
d77eee34f0
commit
9513d6b862
4 changed files with 9 additions and 7 deletions
Binary file not shown.
Binary file not shown.
|
@ -257,7 +257,7 @@ def completion(
|
||||||
if deployment_id != None: # azure llms
|
if deployment_id != None: # azure llms
|
||||||
model=deployment_id
|
model=deployment_id
|
||||||
custom_llm_provider="azure"
|
custom_llm_provider="azure"
|
||||||
model, custom_llm_provider = get_llm_provider(model=model, custom_llm_provider=custom_llm_provider, api_base=api_base)
|
model, custom_llm_provider, dynamic_api_key = get_llm_provider(model=model, custom_llm_provider=custom_llm_provider, api_base=api_base)
|
||||||
model_api_key = get_api_key(llm_provider=custom_llm_provider, dynamic_api_key=api_key) # get the api key from the environment if required for the model
|
model_api_key = get_api_key(llm_provider=custom_llm_provider, dynamic_api_key=api_key) # get the api key from the environment if required for the model
|
||||||
if model_api_key and "sk-litellm" in model_api_key:
|
if model_api_key and "sk-litellm" in model_api_key:
|
||||||
api_base = "https://proxy.litellm.ai"
|
api_base = "https://proxy.litellm.ai"
|
||||||
|
@ -391,6 +391,7 @@ def completion(
|
||||||
# set API KEY
|
# set API KEY
|
||||||
api_key = (
|
api_key = (
|
||||||
api_key or
|
api_key or
|
||||||
|
dynamic_api_key or # allows us to read env variables for compatible openai api's like perplexity
|
||||||
litellm.api_key or
|
litellm.api_key or
|
||||||
litellm.openai_key or
|
litellm.openai_key or
|
||||||
get_secret("OPENAI_API_KEY")
|
get_secret("OPENAI_API_KEY")
|
||||||
|
@ -1371,7 +1372,7 @@ def embedding(
|
||||||
caching=False,
|
caching=False,
|
||||||
custom_llm_provider=None,
|
custom_llm_provider=None,
|
||||||
):
|
):
|
||||||
model, custom_llm_provider = get_llm_provider(model, custom_llm_provider)
|
model, custom_llm_provider, dynamic_api_key = get_llm_provider(model, custom_llm_provider)
|
||||||
try:
|
try:
|
||||||
response = None
|
response = None
|
||||||
logging = litellm_logging_obj
|
logging = litellm_logging_obj
|
||||||
|
|
|
@ -1405,15 +1405,16 @@ def get_optional_params( # use the openai defaults
|
||||||
|
|
||||||
def get_llm_provider(model: str, custom_llm_provider: Optional[str] = None, api_base: Optional[str] = None):
|
def get_llm_provider(model: str, custom_llm_provider: Optional[str] = None, api_base: Optional[str] = None):
|
||||||
try:
|
try:
|
||||||
|
dynamic_api_key = None
|
||||||
# check if llm provider provided
|
# check if llm provider provided
|
||||||
if custom_llm_provider:
|
if custom_llm_provider:
|
||||||
return model, custom_llm_provider
|
return model, custom_llm_provider, dynamic_api_key
|
||||||
|
|
||||||
# check if llm provider part of model name
|
# check if llm provider part of model name
|
||||||
if model.split("/",1)[0] in litellm.provider_list:
|
if model.split("/",1)[0] in litellm.provider_list:
|
||||||
custom_llm_provider = model.split("/", 1)[0]
|
custom_llm_provider = model.split("/", 1)[0]
|
||||||
model = model.split("/", 1)[1]
|
model = model.split("/", 1)[1]
|
||||||
return model, custom_llm_provider
|
return model, custom_llm_provider, dynamic_api_key
|
||||||
|
|
||||||
# check if api base is a known openai compatible endpoint
|
# check if api base is a known openai compatible endpoint
|
||||||
if api_base:
|
if api_base:
|
||||||
|
@ -1421,8 +1422,8 @@ def get_llm_provider(model: str, custom_llm_provider: Optional[str] = None, api_
|
||||||
if endpoint in api_base:
|
if endpoint in api_base:
|
||||||
custom_llm_provider = "custom_openai"
|
custom_llm_provider = "custom_openai"
|
||||||
if endpoint == "api.perplexity.ai":
|
if endpoint == "api.perplexity.ai":
|
||||||
litellm.api_key = os.getenv("PERPLEXITYAI_API_KEY")
|
dynamic_api_key = os.getenv("PERPLEXITYAI_API_KEY")
|
||||||
return model, custom_llm_provider
|
return model, custom_llm_provider, dynamic_api_key
|
||||||
|
|
||||||
# check if model in known model provider list -> for huggingface models, raise exception as they don't have a fixed provider (can be togetherai, anyscale, baseten, runpod, et.)
|
# check if model in known model provider list -> for huggingface models, raise exception as they don't have a fixed provider (can be togetherai, anyscale, baseten, runpod, et.)
|
||||||
## openai - chatcompletion + text completion
|
## openai - chatcompletion + text completion
|
||||||
|
@ -1479,7 +1480,7 @@ def get_llm_provider(model: str, custom_llm_provider: Optional[str] = None, api_
|
||||||
print("\033[1;31mProvider List: https://docs.litellm.ai/docs/providers\033[0m")
|
print("\033[1;31mProvider List: https://docs.litellm.ai/docs/providers\033[0m")
|
||||||
print()
|
print()
|
||||||
raise ValueError(f"LLM Provider NOT provided. Pass in the LLM provider you are trying to call. E.g. For 'Huggingface' inference endpoints pass in `completion(model='huggingface/{model}',..)` Learn more: https://docs.litellm.ai/docs/providers")
|
raise ValueError(f"LLM Provider NOT provided. Pass in the LLM provider you are trying to call. E.g. For 'Huggingface' inference endpoints pass in `completion(model='huggingface/{model}',..)` Learn more: https://docs.litellm.ai/docs/providers")
|
||||||
return model, custom_llm_provider
|
return model, custom_llm_provider, dynamic_api_key
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue