diff --git a/litellm/tests/test_completion.py b/litellm/tests/test_completion.py index 93f7fedc8..7fd5b34f6 100644 --- a/litellm/tests/test_completion.py +++ b/litellm/tests/test_completion.py @@ -54,6 +54,26 @@ def test_completion_claude(): except Exception as e: pytest.fail(f"Error occurred: {e}") +def test_completion_with_litellm_call_id(): + try: + litellm.use_client = False + response = completion( + model="gpt-3.5-turbo", messages=messages) + print(response) + if 'litellm_call_id' in response: + pytest.fail(f"Error occurred: litellm_call_id in response objects") + + litellm.use_client = True + response2 = completion( + model="gpt-3.5-turbo", messages=messages) + + if 'litellm_call_id' not in response2: + pytest.fail(f"Error occurred: litellm_call_id not in response object when use_client = True") + # Add any assertions here to check the response + print(response2) + except Exception as e: + pytest.fail(f"Error occurred: {e}") + def test_completion_claude_stream(): try: diff --git a/litellm/utils.py b/litellm/utils.py index 7ba300aa3..bbbec6bf8 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -543,6 +543,11 @@ def client(original_function): # [OPTIONAL] ADD TO CACHE if litellm.caching or litellm.caching_with_models or litellm.cache != None: # user init a cache object litellm.cache.add_cache(result, *args, **kwargs) + + # [OPTIONAL] Return LiteLLM call_id + if litellm.use_client == True: + result['litellm_call_id'] = litellm_call_id + # LOG SUCCESS my_thread = threading.Thread( target=handle_success, args=(args, kwargs, result, start_time, end_time)