diff --git a/litellm/__pycache__/utils.cpython-311.pyc b/litellm/__pycache__/utils.cpython-311.pyc index a752c8f58..b2edcbec9 100644 Binary files a/litellm/__pycache__/utils.cpython-311.pyc and b/litellm/__pycache__/utils.cpython-311.pyc differ diff --git a/litellm/tests/test_litedebugger_integration.py b/litellm/tests/test_litedebugger_integration.py index 3eca24361..90e7e5bdc 100644 --- a/litellm/tests/test_litedebugger_integration.py +++ b/litellm/tests/test_litedebugger_integration.py @@ -22,7 +22,7 @@ response = completion(model="claude-instant-1", messages=[{"role": "user", "cont # print(f"response: {response}") # # Test 2: On embedding call -# response = embedding(model="text-embedding-ada-002", input=["sample text"]) +response = embedding(model="text-embedding-ada-002", input=["sample text"]) # print(f"response: {response}") # # Test 3: On streaming completion call diff --git a/litellm/utils.py b/litellm/utils.py index 3ef075a18..0cc826bfe 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -1627,7 +1627,7 @@ class CustomStreamWrapper: chunk = next(self.completion_stream) completion_obj['content'] = chunk['choices']['delta'] # LOGGING - self.logging_obj(completion_obj["content"]) + self.logging_obj.post_call(completion_obj["content"]) # return this for all models return {"choices": [{"delta": completion_obj}]}