fix(openai.py): fix linting issue

This commit is contained in:
Krrish Dholakia 2024-01-22 18:20:15 -08:00 committed by ishaan-jaff
parent e4fda7c840
commit d57e57234e
8 changed files with 166 additions and 14 deletions

View file

@ -1064,13 +1064,21 @@ class Logging:
self.model_call_details["log_event_type"] = "successful_api_call"
self.model_call_details["end_time"] = end_time
self.model_call_details["cache_hit"] = cache_hit
if result is not None and (
isinstance(result, ModelResponse)
or isinstance(result, EmbeddingResponse)
## if model in model cost map - log the response cost
## else set cost to None
if (
result is not None
and (
isinstance(result, ModelResponse)
or isinstance(result, EmbeddingResponse)
)
and result.model in litellm.model_cost
):
self.model_call_details["response_cost"] = litellm.completion_cost(
completion_response=result,
)
else: # streaming chunks + image gen.
self.model_call_details["response_cost"] = None
if litellm.max_budget and self.stream:
time_diff = (end_time - start_time).total_seconds()
@ -1084,7 +1092,7 @@ class Logging:
return start_time, end_time, result
except Exception as e:
print_verbose(f"[Non-Blocking] LiteLLM.Success_Call Error: {str(e)}")
raise Exception(f"[Non-Blocking] LiteLLM.Success_Call Error: {str(e)}")
def success_handler(
self, result=None, start_time=None, end_time=None, cache_hit=None, **kwargs