mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-24 10:14:26 +00:00
fix(utils.py): support reading api keys dynamically from the os environment
This commit is contained in:
parent
87aa36a2ec
commit
4f183dc6a0
3 changed files with 18 additions and 13 deletions
|
@ -371,7 +371,7 @@ def completion(
|
|||
if deployment_id != None: # azure llms
|
||||
model=deployment_id
|
||||
custom_llm_provider="azure"
|
||||
model, custom_llm_provider, dynamic_api_key, api_base = get_llm_provider(model=model, custom_llm_provider=custom_llm_provider, api_base=api_base)
|
||||
model, custom_llm_provider, api_key, api_base = get_llm_provider(model=model, custom_llm_provider=custom_llm_provider, api_base=api_base, api_key=api_key)
|
||||
custom_prompt_dict = {} # type: ignore
|
||||
if initial_prompt_value or roles or final_prompt_value or bos_token or eos_token:
|
||||
custom_prompt_dict = {model: {}}
|
||||
|
@ -1709,7 +1709,7 @@ def embedding(
|
|||
- exception_type: If an exception occurs during the API call.
|
||||
"""
|
||||
azure = kwargs.get("azure", None)
|
||||
model, custom_llm_provider, dynamic_api_key, api_base = get_llm_provider(model=model, custom_llm_provider=custom_llm_provider, api_base=api_base)
|
||||
model, custom_llm_provider, dynamic_api_key, api_base = get_llm_provider(model=model, custom_llm_provider=custom_llm_provider, api_base=api_base, api_key=api_key)
|
||||
try:
|
||||
response = None
|
||||
logging = litellm_logging_obj
|
||||
|
|
|
@ -183,7 +183,7 @@ def test_completion_perplexity_api():
|
|||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
test_completion_perplexity_api()
|
||||
# test_completion_perplexity_api()
|
||||
|
||||
def test_completion_perplexity_api_2():
|
||||
try:
|
||||
|
@ -582,20 +582,22 @@ def test_completion_azure():
|
|||
response = completion(
|
||||
model="azure/chatgpt-v-2",
|
||||
messages=messages,
|
||||
api_key="os.environ/AZURE_API_KEY_NA"
|
||||
)
|
||||
print(f"response: {response}")
|
||||
## Test azure flag for backwards compatibility
|
||||
response = completion(
|
||||
model="chatgpt-v-2",
|
||||
messages=messages,
|
||||
azure=True,
|
||||
max_tokens=10
|
||||
)
|
||||
# response = completion(
|
||||
# model="chatgpt-v-2",
|
||||
# messages=messages,
|
||||
# azure=True,
|
||||
# max_tokens=10
|
||||
# )
|
||||
# Add any assertions here to check the response
|
||||
print(response)
|
||||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
# test_completion_azure()
|
||||
test_completion_azure()
|
||||
|
||||
def test_azure_openai_ad_token():
|
||||
# this tests if the azure ad token is set in the request header
|
||||
|
@ -833,7 +835,7 @@ def test_completion_replicate_llama2_stream():
|
|||
print(f"complete_response: {complete_response}")
|
||||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
test_completion_replicate_llama2_stream()
|
||||
# test_completion_replicate_llama2_stream()
|
||||
|
||||
# commenthing this out since we won't be always testing a custom replicate deployment
|
||||
# def test_completion_replicate_deployments():
|
||||
|
|
|
@ -2294,14 +2294,17 @@ def get_optional_params( # use the openai defaults
|
|||
optional_params[k] = passed_params[k]
|
||||
return optional_params
|
||||
|
||||
def get_llm_provider(model: str, custom_llm_provider: Optional[str] = None, api_base: Optional[str] = None):
|
||||
def get_llm_provider(model: str, custom_llm_provider: Optional[str] = None, api_base: Optional[str] = None, api_key: Optional[str] = None):
|
||||
try:
|
||||
dynamic_api_key = None
|
||||
# check if llm provider provided
|
||||
|
||||
if custom_llm_provider:
|
||||
return model, custom_llm_provider, dynamic_api_key, api_base
|
||||
|
||||
|
||||
if api_key and api_key.startswith("os.environ/"):
|
||||
api_key_env_name = api_key.replace("os.environ/", "")
|
||||
dynamic_api_key = os.getenv(api_key_env_name)
|
||||
# check if llm provider part of model name
|
||||
if model.split("/",1)[0] in litellm.provider_list and model.split("/",1)[0] not in litellm.model_list:
|
||||
custom_llm_provider = model.split("/", 1)[0]
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue