forked from phoenix/litellm-mirror
feat(utils.py): emit response cost as part of logs
This commit is contained in:
parent
36c6d3cd90
commit
e917d0eee6
3 changed files with 15 additions and 34 deletions
|
@ -170,6 +170,7 @@ class CompletionCustomHandler(
|
|||
)
|
||||
assert isinstance(kwargs["additional_args"], (dict, type(None)))
|
||||
assert isinstance(kwargs["log_event_type"], str)
|
||||
assert isinstance(kwargs["response_cost"], (float, type(None)))
|
||||
except:
|
||||
print(f"Assertion Error: {traceback.format_exc()}")
|
||||
self.errors.append(traceback.format_exc())
|
||||
|
@ -262,6 +263,7 @@ class CompletionCustomHandler(
|
|||
assert isinstance(kwargs["additional_args"], (dict, type(None)))
|
||||
assert isinstance(kwargs["log_event_type"], str)
|
||||
assert kwargs["cache_hit"] is None or isinstance(kwargs["cache_hit"], bool)
|
||||
assert isinstance(kwargs["response_cost"], (float, type(None)))
|
||||
except:
|
||||
print(f"Assertion Error: {traceback.format_exc()}")
|
||||
self.errors.append(traceback.format_exc())
|
||||
|
@ -545,8 +547,9 @@ async def test_async_chat_bedrock_stream():
|
|||
|
||||
# asyncio.run(test_async_chat_bedrock_stream())
|
||||
|
||||
# Text Completion
|
||||
|
||||
# Text Completion
|
||||
|
||||
|
||||
## Test OpenAI text completion + Async
|
||||
@pytest.mark.asyncio
|
||||
async def test_async_text_completion_openai_stream():
|
||||
|
@ -585,6 +588,7 @@ async def test_async_text_completion_openai_stream():
|
|||
except Exception as e:
|
||||
pytest.fail(f"An exception occurred: {str(e)}")
|
||||
|
||||
|
||||
# EMBEDDING
|
||||
## Test OpenAI + Async
|
||||
@pytest.mark.asyncio
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue