mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
exception_type work
This commit is contained in:
parent
e9899db545
commit
fc10cf5eeb
4 changed files with 53 additions and 19 deletions
|
@ -38,6 +38,7 @@ cache: Optional[Cache] = None # cache object
|
||||||
model_alias_map: Dict[str, str] = {}
|
model_alias_map: Dict[str, str] = {}
|
||||||
max_budget: float = 0.0 # set the max budget across all providers
|
max_budget: float = 0.0 # set the max budget across all providers
|
||||||
_current_cost = 0 # private variable, used if max budget is set
|
_current_cost = 0 # private variable, used if max budget is set
|
||||||
|
error_logs: Dict = {}
|
||||||
#############################################
|
#############################################
|
||||||
|
|
||||||
def get_model_cost_map():
|
def get_model_cost_map():
|
||||||
|
|
|
@ -1046,7 +1046,7 @@ def completion(
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
## Map to OpenAI Exception
|
## Map to OpenAI Exception
|
||||||
raise exception_type(
|
raise exception_type(
|
||||||
model=model, custom_llm_provider=custom_llm_provider, original_exception=e
|
model=model, custom_llm_provider=custom_llm_provider, original_exception=e, completion_kwargs=args,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -134,24 +134,24 @@ def test_completion_with_litellm_call_id():
|
||||||
# pytest.fail(f"Error occurred: {e}")
|
# pytest.fail(f"Error occurred: {e}")
|
||||||
|
|
||||||
# using Non TGI or conversational LLMs
|
# using Non TGI or conversational LLMs
|
||||||
# def hf_test_completion():
|
def hf_test_completion():
|
||||||
# try:
|
try:
|
||||||
# # litellm.set_verbose=True
|
# litellm.set_verbose=True
|
||||||
# user_message = "My name is Merve and my favorite"
|
user_message = "My name is Merve and my favorite"
|
||||||
# messages = [{ "content": user_message,"role": "user"}]
|
messages = [{ "content": user_message,"role": "user"}]
|
||||||
# response = completion(
|
response = completion(
|
||||||
# model="huggingface/roneneldan/TinyStories-3M",
|
model="huggingface/roneneldan/TinyStories-3M",
|
||||||
# messages=messages,
|
messages=messages,
|
||||||
# api_base="https://p69xlsj6rpno5drq.us-east-1.aws.endpoints.huggingface.cloud",
|
api_base="https://p69xlsj6rpno5drq.us-east-1.aws.endpoints.huggingface.cloud",
|
||||||
# task=None,
|
|
||||||
# )
|
|
||||||
# # Add any assertions here to check the response
|
|
||||||
# print(response)
|
|
||||||
|
|
||||||
# except Exception as e:
|
)
|
||||||
# pytest.fail(f"Error occurred: {e}")
|
# Add any assertions here to check the response
|
||||||
|
print(response)
|
||||||
|
|
||||||
# hf_test_completion()
|
except Exception as e:
|
||||||
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
|
||||||
|
hf_test_completion()
|
||||||
|
|
||||||
def test_completion_cohere(): # commenting for now as the cohere endpoint is being flaky
|
def test_completion_cohere(): # commenting for now as the cohere endpoint is being flaky
|
||||||
try:
|
try:
|
||||||
|
@ -352,7 +352,7 @@ def test_completion_azure():
|
||||||
try:
|
try:
|
||||||
print("azure gpt-3.5 test\n\n")
|
print("azure gpt-3.5 test\n\n")
|
||||||
response = completion(
|
response = completion(
|
||||||
model="azure/chatgpt-v-2",
|
model="chatgpt-v-2",
|
||||||
messages=messages,
|
messages=messages,
|
||||||
)
|
)
|
||||||
# Add any assertions here to check the response
|
# Add any assertions here to check the response
|
||||||
|
|
|
@ -198,6 +198,7 @@ class Logging:
|
||||||
def pre_call(self, input, api_key, model=None, additional_args={}):
|
def pre_call(self, input, api_key, model=None, additional_args={}):
|
||||||
# Log the exact input to the LLM API
|
# Log the exact input to the LLM API
|
||||||
print_verbose(f"Logging Details Pre-API Call for call id {self.litellm_call_id}")
|
print_verbose(f"Logging Details Pre-API Call for call id {self.litellm_call_id}")
|
||||||
|
litellm.error_logs['PRE_CALL'] = locals()
|
||||||
try:
|
try:
|
||||||
# print_verbose(f"logging pre call for model: {self.model} with call type: {self.call_type}")
|
# print_verbose(f"logging pre call for model: {self.model} with call type: {self.call_type}")
|
||||||
self.model_call_details["input"] = input
|
self.model_call_details["input"] = input
|
||||||
|
@ -280,6 +281,7 @@ class Logging:
|
||||||
|
|
||||||
def post_call(self, original_response, input=None, api_key=None, additional_args={}):
|
def post_call(self, original_response, input=None, api_key=None, additional_args={}):
|
||||||
# Log the exact result from the LLM API, for streaming - log the type of response received
|
# Log the exact result from the LLM API, for streaming - log the type of response received
|
||||||
|
litellm.error_logs['POST_CALL'] = locals()
|
||||||
try:
|
try:
|
||||||
self.model_call_details["input"] = input
|
self.model_call_details["input"] = input
|
||||||
self.model_call_details["api_key"] = api_key
|
self.model_call_details["api_key"] = api_key
|
||||||
|
@ -1870,9 +1872,40 @@ def get_model_list():
|
||||||
)
|
)
|
||||||
|
|
||||||
####### EXCEPTION MAPPING ################
|
####### EXCEPTION MAPPING ################
|
||||||
def exception_type(model, original_exception, custom_llm_provider):
|
def exception_type(
|
||||||
|
model,
|
||||||
|
original_exception,
|
||||||
|
custom_llm_provider,
|
||||||
|
completion_kwargs={},
|
||||||
|
):
|
||||||
global user_logger_fn, liteDebuggerClient
|
global user_logger_fn, liteDebuggerClient
|
||||||
exception_mapping_worked = False
|
exception_mapping_worked = False
|
||||||
|
|
||||||
|
litellm.error_logs['EXCEPTION'] = original_exception
|
||||||
|
litellm.error_logs['KWARGS'] = completion_kwargs
|
||||||
|
|
||||||
|
import urllib.parse
|
||||||
|
import json
|
||||||
|
for log_key in litellm.error_logs:
|
||||||
|
current_logs = litellm.error_logs[log_key]
|
||||||
|
if type(current_logs) == dict:
|
||||||
|
filtered_error_logs = {key: value for key, value in current_logs.items() if isinstance(value, (str, int, float, bool, list, dict))}
|
||||||
|
litellm.error_logs[log_key] = filtered_error_logs
|
||||||
|
else:
|
||||||
|
litellm.error_logs[log_key] = str(current_logs)
|
||||||
|
|
||||||
|
# Convert the filtered_error_logs dictionary to a JSON string
|
||||||
|
error_logs_json = json.dumps(litellm.error_logs)
|
||||||
|
# URL-encode the JSON data
|
||||||
|
encoded_data = urllib.parse.quote(error_logs_json)
|
||||||
|
print(encoded_data)
|
||||||
|
# Print the encoded data (this is what you can include in a URL)
|
||||||
|
print("\033[91m" + str(litellm.error_logs) + "\033[0m")
|
||||||
|
|
||||||
|
decoded_data = urllib.parse.unquote(encoded_data)
|
||||||
|
|
||||||
|
# Print the decoded data
|
||||||
|
print(decoded_data)
|
||||||
try:
|
try:
|
||||||
if isinstance(original_exception, OriginalError):
|
if isinstance(original_exception, OriginalError):
|
||||||
# Handle the OpenAIError
|
# Handle the OpenAIError
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue