forked from phoenix/litellm-mirror
with verbose secretmanager
This commit is contained in:
parent
41535ec314
commit
068930f81a
4 changed files with 6 additions and 15 deletions
|
@ -93,6 +93,7 @@ def completion(
|
||||||
openai.organization = litellm.organization
|
openai.organization = litellm.organization
|
||||||
# set API KEY
|
# set API KEY
|
||||||
openai.api_key = api_key or litellm.openai_key or get_secret("OPENAI_API_KEY")
|
openai.api_key = api_key or litellm.openai_key or get_secret("OPENAI_API_KEY")
|
||||||
|
print(f"value of set openai key {openai.api_key}")
|
||||||
|
|
||||||
## LOGGING
|
## LOGGING
|
||||||
logging(model=model, input=messages, additional_args=args, custom_llm_provider=custom_llm_provider, logger_fn=logger_fn)
|
logging(model=model, input=messages, additional_args=args, custom_llm_provider=custom_llm_provider, logger_fn=logger_fn)
|
||||||
|
|
|
@ -17,13 +17,6 @@ litellm.secret_manager_client = InfisicalClient(token=infisical_token)
|
||||||
user_message = "Hello, whats the weather in San Francisco??"
|
user_message = "Hello, whats the weather in San Francisco??"
|
||||||
messages = [{ "content": user_message,"role": "user"}]
|
messages = [{ "content": user_message,"role": "user"}]
|
||||||
|
|
||||||
def test_completion_azure():
|
|
||||||
try:
|
|
||||||
response = completion(model="gpt-3.5-turbo", deployment_id="chatgpt-test", messages=messages, custom_llm_provider="azure")
|
|
||||||
# Add any assertions here to check the response
|
|
||||||
print(response)
|
|
||||||
except Exception as e:
|
|
||||||
pytest.fail(f"Error occurred: {e}")
|
|
||||||
|
|
||||||
def test_completion_openai():
|
def test_completion_openai():
|
||||||
try:
|
try:
|
||||||
|
@ -33,10 +26,5 @@ def test_completion_openai():
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
pytest.fail(f"Error occurred: {e}")
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
|
||||||
def test_completion_openai_with_optional_params():
|
test_completion_openai()
|
||||||
try:
|
|
||||||
response = completion(model="gpt-3.5-turbo", messages=messages, temperature=0.5, top_p=0.1, user="ishaan_dev@berri.ai")
|
|
||||||
# Add any assertions here to check the response
|
|
||||||
print(response)
|
|
||||||
except Exception as e:
|
|
||||||
pytest.fail(f"Error occurred: {e}")
|
|
||||||
|
|
|
@ -836,6 +836,8 @@ def get_secret(secret_name):
|
||||||
secret = litellm.secret_manager_client.get_secret(secret_name).secret_value
|
secret = litellm.secret_manager_client.get_secret(secret_name).secret_value
|
||||||
if secret != None:
|
if secret != None:
|
||||||
return secret # if secret found in secret manager return it
|
return secret # if secret found in secret manager return it
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Secret '{secret_name}' not found in secret manager")
|
||||||
elif litellm.api_key != None: # if users use litellm default key
|
elif litellm.api_key != None: # if users use litellm default key
|
||||||
return litellm.api_key
|
return litellm.api_key
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
name = "litellm"
|
name = "litellm"
|
||||||
version = "0.1.415"
|
version = "0.1.416"
|
||||||
description = "Library to easily interface with LLM API providers"
|
description = "Library to easily interface with LLM API providers"
|
||||||
authors = ["BerriAI"]
|
authors = ["BerriAI"]
|
||||||
license = "MIT License"
|
license = "MIT License"
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue