forked from phoenix/litellm-mirror
fix llama index validate environment error
This commit is contained in:
parent
9ee51872e6
commit
076affbff2
4 changed files with 15 additions and 3 deletions
Binary file not shown.
|
@ -10,7 +10,7 @@ sys.path.insert(
|
|||
) # Adds the parent directory to the system path
|
||||
import pytest
|
||||
import litellm
|
||||
from litellm.utils import trim_messages, get_token_count, get_valid_models, check_valid_key
|
||||
from litellm.utils import trim_messages, get_token_count, get_valid_models, check_valid_key, validate_environment
|
||||
|
||||
# Assuming your trim_messages, shorten_message_to_fit_limit, and get_token_count functions are all in a module named 'message_utils'
|
||||
|
||||
|
@ -93,3 +93,12 @@ def test_good_key():
|
|||
key = os.environ['OPENAI_API_KEY']
|
||||
response = check_valid_key(model="gpt-3.5-turbo", api_key=key)
|
||||
assert(response == True)
|
||||
|
||||
# test validate environment
|
||||
|
||||
def test_validate_environment_empty_model():
|
||||
api_key = validate_environment()
|
||||
if api_key is None:
|
||||
raise Exception()
|
||||
|
||||
# test_validate_environment_empty_model()
|
|
@ -1325,9 +1325,12 @@ def load_test_model(
|
|||
"exception": e,
|
||||
}
|
||||
|
||||
def validate_environment(model: str) -> dict:
|
||||
def validate_environment(model: Optional[str]=None) -> dict:
|
||||
keys_in_environment = False
|
||||
missing_keys = []
|
||||
|
||||
if model is None:
|
||||
return {"keys_in_environment": keys_in_environment, "missing_keys": missing_keys}
|
||||
## EXTRACT LLM PROVIDER - if model name provided
|
||||
custom_llm_provider = None
|
||||
# check if llm provider part of model name
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[tool.poetry]
|
||||
name = "litellm"
|
||||
version = "0.1.772"
|
||||
version = "0.1.773"
|
||||
description = "Library to easily interface with LLM API providers"
|
||||
authors = ["BerriAI"]
|
||||
license = "MIT License"
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue