mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
litellm call id's in responses when client true
This commit is contained in:
parent
bb766c34ca
commit
beac62b8fe
2 changed files with 25 additions and 0 deletions
|
@ -54,6 +54,26 @@ def test_completion_claude():
|
|||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
def test_completion_with_litellm_call_id():
|
||||
try:
|
||||
litellm.use_client = False
|
||||
response = completion(
|
||||
model="gpt-3.5-turbo", messages=messages)
|
||||
print(response)
|
||||
if 'litellm_call_id' in response:
|
||||
pytest.fail(f"Error occurred: litellm_call_id in response objects")
|
||||
|
||||
litellm.use_client = True
|
||||
response2 = completion(
|
||||
model="gpt-3.5-turbo", messages=messages)
|
||||
|
||||
if 'litellm_call_id' not in response2:
|
||||
pytest.fail(f"Error occurred: litellm_call_id not in response object when use_client = True")
|
||||
# Add any assertions here to check the response
|
||||
print(response2)
|
||||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
|
||||
def test_completion_claude_stream():
|
||||
try:
|
||||
|
|
|
@ -543,6 +543,11 @@ def client(original_function):
|
|||
# [OPTIONAL] ADD TO CACHE
|
||||
if litellm.caching or litellm.caching_with_models or litellm.cache != None: # user init a cache object
|
||||
litellm.cache.add_cache(result, *args, **kwargs)
|
||||
|
||||
# [OPTIONAL] Return LiteLLM call_id
|
||||
if litellm.use_client == True:
|
||||
result['litellm_call_id'] = litellm_call_id
|
||||
|
||||
# LOG SUCCESS
|
||||
my_thread = threading.Thread(
|
||||
target=handle_success, args=(args, kwargs, result, start_time, end_time)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue