diff --git a/litellm/integrations/langfuse.py b/litellm/integrations/langfuse.py index 983ec3942..e7b2f5e0b 100644 --- a/litellm/integrations/langfuse.py +++ b/litellm/integrations/langfuse.py @@ -32,6 +32,12 @@ class LangFuseLogger: self.langfuse_host = langfuse_host or os.getenv( "LANGFUSE_HOST", "https://cloud.langfuse.com" ) + if not ( + self.langfuse_host.startswith("http://") + or self.langfuse_host.startswith("https://") + ): + # add http:// if unset, assume communicating over private network - e.g. render + self.langfuse_host = "http://" + self.langfuse_host self.langfuse_release = os.getenv("LANGFUSE_RELEASE") self.langfuse_debug = os.getenv("LANGFUSE_DEBUG") diff --git a/litellm/proxy/hooks/presidio_pii_masking.py b/litellm/proxy/hooks/presidio_pii_masking.py index 609fbc7d4..207d024e9 100644 --- a/litellm/proxy/hooks/presidio_pii_masking.py +++ b/litellm/proxy/hooks/presidio_pii_masking.py @@ -70,15 +70,16 @@ class _OPTIONAL_PresidioPIIMasking(CustomLogger): ) # type: ignore self.presidio_anonymizer_api_base: Optional[str] = litellm.get_secret( "PRESIDIO_ANONYMIZER_API_BASE", None - ) + ) # type: ignore if self.presidio_analyzer_api_base is None: raise Exception("Missing `PRESIDIO_ANALYZER_API_BASE` from environment") if not self.presidio_analyzer_api_base.endswith("/"): self.presidio_analyzer_api_base += "/" - if not self.presidio_analyzer_api_base.startswith( - "http://" - ) or self.presidio_analyzer_api_base.startswith("https://"): + if not ( + self.presidio_analyzer_api_base.startswith("http://") + or self.presidio_analyzer_api_base.startswith("https://") + ): # add http:// if unset, assume communicating over private network - e.g. render self.presidio_analyzer_api_base = ( "http://" + self.presidio_analyzer_api_base @@ -88,9 +89,10 @@ class _OPTIONAL_PresidioPIIMasking(CustomLogger): raise Exception("Missing `PRESIDIO_ANONYMIZER_API_BASE` from environment") if not self.presidio_anonymizer_api_base.endswith("/"): self.presidio_anonymizer_api_base += "/" - if not self.presidio_anonymizer_api_base.startswith( - "http://" - ) or self.presidio_anonymizer_api_base.startswith("https://"): + if not ( + self.presidio_anonymizer_api_base.startswith("http://") + or self.presidio_anonymizer_api_base.startswith("https://") + ): # add http:// if unset, assume communicating over private network - e.g. render self.presidio_anonymizer_api_base = ( "http://" + self.presidio_anonymizer_api_base diff --git a/litellm/tests/langfuse.log b/litellm/tests/langfuse.log index 1921f3136..3d38395ab 100644 --- a/litellm/tests/langfuse.log +++ b/litellm/tests/langfuse.log @@ -8,47 +8,46 @@ `litellm.set_verbose` is deprecated. Please set `os.environ['LITELLM_LOG'] = 'DEBUG'` for debug logs. `litellm.set_verbose` is deprecated. Please set `os.environ['LITELLM_LOG'] = 'DEBUG'` for debug logs. `litellm.set_verbose` is deprecated. Please set `os.environ['LITELLM_LOG'] = 'DEBUG'` for debug logs. -Creating trace id='52a58bac-492b-433e-9228-2759b73303a6' timestamp=datetime.datetime(2024, 6, 23, 6, 26, 45, 565911, tzinfo=datetime.timezone.utc) name='litellm-acompletion' user_id='langfuse_latency_test_user' input={'messages': [{'role': 'user', 'content': 'This is a test'}]} output={'content': "It's simple to use and easy to get started", 'role': 'assistant'} session_id=None release=None version=None metadata=None tags=[] public=None -Creating trace id='28bc21fe-5955-4ec5-ba39-27325718af5a' timestamp=datetime.datetime(2024, 6, 23, 6, 26, 45, 566213, tzinfo=datetime.timezone.utc) name='litellm-acompletion' user_id='langfuse_latency_test_user' input={'messages': [{'role': 'user', 'content': 'This is a test'}]} output={'content': "It's simple to use and easy to get started", 'role': 'assistant'} session_id=None release=None version=None metadata=None tags=[] public=None -Creating generation trace_id='52a58bac-492b-433e-9228-2759b73303a6' name='litellm-acompletion' start_time=datetime.datetime(2024, 6, 22, 23, 26, 45, 561383) metadata={'litellm_response_cost': None, 'cache_hit': False} input={'messages': [{'role': 'user', 'content': 'This is a test'}]} output={'content': "It's simple to use and easy to get started", 'role': 'assistant'} level= status_message=None parent_observation_id=None version=None id='time-23-26-45-561383_chatcmpl-193fd5b6-87ce-4b8f-90bb-e2c2608f0f73' end_time=datetime.datetime(2024, 6, 22, 23, 26, 45, 564028) completion_start_time=datetime.datetime(2024, 6, 22, 23, 26, 45, 564028) model='chatgpt-v-2' model_parameters={'temperature': '0.7', 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'} usage=Usage(input=10, output=20, total=None, unit=, input_cost=None, output_cost=None, total_cost=None) prompt_name=None prompt_version=None... -Creating generation trace_id='28bc21fe-5955-4ec5-ba39-27325718af5a' name='litellm-acompletion' start_time=datetime.datetime(2024, 6, 22, 23, 26, 45, 562146) metadata={'litellm_response_cost': None, 'cache_hit': False} input={'messages': [{'role': 'user', 'content': 'This is a test'}]} output={'content': "It's simple to use and easy to get started", 'role': 'assistant'} level= status_message=None parent_observation_id=None version=None id='time-23-26-45-562146_chatcmpl-2dc26df5-d4e4-46f5-868e-138aac85dd95' end_time=datetime.datetime(2024, 6, 22, 23, 26, 45, 564312) completion_start_time=datetime.datetime(2024, 6, 22, 23, 26, 45, 564312) model='chatgpt-v-2' model_parameters={'temperature': '0.7', 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'} usage=Usage(input=10, output=20, total=None, unit=, input_cost=None, output_cost=None, total_cost=None) prompt_name=None prompt_version=None... +Creating trace id='22019f81-81fc-4b65-8ab3-e141f6625ac9' timestamp=datetime.datetime(2024, 7, 7, 0, 49, 49, 2109, tzinfo=datetime.timezone.utc) name='litellm-acompletion' user_id='langfuse_latency_test_user' input={'messages': [{'role': 'user', 'content': 'This is a test'}]} output={'content': "It's simple to use and easy to get started", 'role': 'assistant'} session_id=None release=None version=None metadata=None tags=[] public=None +Creating generation trace_id='22019f81-81fc-4b65-8ab3-e141f6625ac9' name='litellm-acompletion' start_time=datetime.datetime(2024, 7, 6, 17, 49, 48, 998308) metadata={'litellm_response_cost': None, 'cache_hit': False} input={'messages': [{'role': 'user', 'content': 'This is a test'}]} output={'content': "It's simple to use and easy to get started", 'role': 'assistant'} level= status_message=None parent_observation_id=None version=None id='time-17-49-48-998308_chatcmpl-4fc2302c-b7e4-4703-b716-cd01a3d0b322' end_time=datetime.datetime(2024, 7, 6, 17, 49, 49, 719) completion_start_time=datetime.datetime(2024, 7, 6, 17, 49, 49, 719) model='chatgpt-v-2' model_parameters={'temperature': '0.7', 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'} usage=Usage(input=10, output=20, total=None, unit=, input_cost=None, output_cost=None, total_cost=None) prompt_name=None prompt_version=None... item size 459 -Creating trace id='f545a5c8-dfdf-4226-a30c-f24ff8d75144' timestamp=datetime.datetime(2024, 6, 23, 6, 26, 45, 567765, tzinfo=datetime.timezone.utc) name='litellm-acompletion' user_id='langfuse_latency_test_user' input={'messages': [{'role': 'user', 'content': 'This is a test'}]} output={'content': "It's simple to use and easy to get started", 'role': 'assistant'} session_id=None release=None version=None metadata=None tags=[] public=None -item size 459 -Creating trace id='c8d266ca-c370-439e-9d14-f011e5cfa254' timestamp=datetime.datetime(2024, 6, 23, 6, 26, 45, 568137, tzinfo=datetime.timezone.utc) name='litellm-acompletion' user_id='langfuse_latency_test_user' input={'messages': [{'role': 'user', 'content': 'This is a test'}]} output={'content': "It's simple to use and easy to get started", 'role': 'assistant'} session_id=None release=None version=None metadata=None tags=[] public=None -Creating generation trace_id='f545a5c8-dfdf-4226-a30c-f24ff8d75144' name='litellm-acompletion' start_time=datetime.datetime(2024, 6, 22, 23, 26, 45, 562753) metadata={'litellm_response_cost': None, 'cache_hit': False} input={'messages': [{'role': 'user', 'content': 'This is a test'}]} output={'content': "It's simple to use and easy to get started", 'role': 'assistant'} level= status_message=None parent_observation_id=None version=None id='time-23-26-45-562753_chatcmpl-33ae3e6d-d66a-4447-82d9-c8f5d5be43e5' end_time=datetime.datetime(2024, 6, 22, 23, 26, 45, 564869) completion_start_time=datetime.datetime(2024, 6, 22, 23, 26, 45, 564869) model='chatgpt-v-2' model_parameters={'temperature': '0.7', 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'} usage=Usage(input=10, output=20, total=None, unit=, input_cost=None, output_cost=None, total_cost=None) prompt_name=None prompt_version=None... -item size 887 -Creating generation trace_id='c8d266ca-c370-439e-9d14-f011e5cfa254' name='litellm-acompletion' start_time=datetime.datetime(2024, 6, 22, 23, 26, 45, 563300) metadata={'litellm_response_cost': None, 'cache_hit': False} input={'messages': [{'role': 'user', 'content': 'This is a test'}]} output={'content': "It's simple to use and easy to get started", 'role': 'assistant'} level= status_message=None parent_observation_id=None version=None id='time-23-26-45-563300_chatcmpl-56c11246-4c9c-43c0-bb4e-0be309907acd' end_time=datetime.datetime(2024, 6, 22, 23, 26, 45, 565142) completion_start_time=datetime.datetime(2024, 6, 22, 23, 26, 45, 565142) model='chatgpt-v-2' model_parameters={'temperature': '0.7', 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'} usage=Usage(input=10, output=20, total=None, unit=, input_cost=None, output_cost=None, total_cost=None) prompt_name=None prompt_version=None... item size 887 +Creating trace id='4e3773e6-037a-43e5-825d-b712babc0daf' timestamp=datetime.datetime(2024, 7, 7, 0, 49, 49, 3537, tzinfo=datetime.timezone.utc) name='litellm-acompletion' user_id='langfuse_latency_test_user' input={'messages': [{'role': 'user', 'content': 'This is a test'}]} output={'content': "It's simple to use and easy to get started", 'role': 'assistant'} session_id=None release=None version=None metadata=None tags=[] public=None +Creating trace id='bf11b810-c475-4a96-b0ec-a581321a44af' timestamp=datetime.datetime(2024, 7, 7, 0, 49, 49, 3618, tzinfo=datetime.timezone.utc) name='litellm-acompletion' user_id='langfuse_latency_test_user' input={'messages': [{'role': 'user', 'content': 'This is a test'}]} output={'content': "It's simple to use and easy to get started", 'role': 'assistant'} session_id=None release=None version=None metadata=None tags=[] public=None +Creating generation trace_id='bf11b810-c475-4a96-b0ec-a581321a44af' name='litellm-acompletion' start_time=datetime.datetime(2024, 7, 6, 17, 49, 48, 998866) metadata={'litellm_response_cost': None, 'cache_hit': False} input={'messages': [{'role': 'user', 'content': 'This is a test'}]} output={'content': "It's simple to use and easy to get started", 'role': 'assistant'} level= status_message=None parent_observation_id=None version=None id='time-17-49-48-998866_chatcmpl-71dc0d36-7dfa-48af-ba45-613c2a31a6ae' end_time=datetime.datetime(2024, 7, 6, 17, 49, 49, 1253) completion_start_time=datetime.datetime(2024, 7, 6, 17, 49, 49, 1253) model='chatgpt-v-2' model_parameters={'temperature': '0.7', 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'} usage=Usage(input=10, output=20, total=None, unit=, input_cost=None, output_cost=None, total_cost=None) prompt_name=None prompt_version=None... item size 459 +Creating generation trace_id='4e3773e6-037a-43e5-825d-b712babc0daf' name='litellm-acompletion' start_time=datetime.datetime(2024, 7, 6, 17, 49, 48, 999544) metadata={'litellm_response_cost': None, 'cache_hit': False} input={'messages': [{'role': 'user', 'content': 'This is a test'}]} output={'content': "It's simple to use and easy to get started", 'role': 'assistant'} level= status_message=None parent_observation_id=None version=None id='time-17-49-48-999544_chatcmpl-d61b3ab9-3b9d-4913-afa5-e84420c90ac5' end_time=datetime.datetime(2024, 7, 6, 17, 49, 49, 1850) completion_start_time=datetime.datetime(2024, 7, 6, 17, 49, 49, 1850) model='chatgpt-v-2' model_parameters={'temperature': '0.7', 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'} usage=Usage(input=10, output=20, total=None, unit=, input_cost=None, output_cost=None, total_cost=None) prompt_name=None prompt_version=None... item size 459 item size 887 item size 887 -Creating trace id='7c6fec55-def1-4838-8ea1-86960a1ccb19' timestamp=datetime.datetime(2024, 6, 23, 6, 26, 45, 570331, tzinfo=datetime.timezone.utc) name='litellm-acompletion' user_id='langfuse_latency_test_user' input={'messages': [{'role': 'user', 'content': 'This is a test'}]} output={'content': "It's simple to use and easy to get started", 'role': 'assistant'} session_id=None release=None version=None metadata=None tags=[] public=None -Creating generation trace_id='7c6fec55-def1-4838-8ea1-86960a1ccb19' name='litellm-acompletion' start_time=datetime.datetime(2024, 6, 22, 23, 26, 45, 563792) metadata={'litellm_response_cost': None, 'cache_hit': False} input={'messages': [{'role': 'user', 'content': 'This is a test'}]} output={'content': "It's simple to use and easy to get started", 'role': 'assistant'} level= status_message=None parent_observation_id=None version=None id='time-23-26-45-563792_chatcmpl-c159069a-bc65-43a0-bef5-e2d42688cead' end_time=datetime.datetime(2024, 6, 22, 23, 26, 45, 569384) completion_start_time=datetime.datetime(2024, 6, 22, 23, 26, 45, 569384) model='chatgpt-v-2' model_parameters={'temperature': '0.7', 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'} usage=Usage(input=10, output=20, total=None, unit=, input_cost=None, output_cost=None, total_cost=None) prompt_name=None prompt_version=None... +Creating trace id='4a8fa429-f08d-4f6b-b3ca-9a404d7c61fa' timestamp=datetime.datetime(2024, 7, 7, 0, 49, 49, 5826, tzinfo=datetime.timezone.utc) name='litellm-acompletion' user_id='langfuse_latency_test_user' input={'messages': [{'role': 'user', 'content': 'This is a test'}]} output={'content': "It's simple to use and easy to get started", 'role': 'assistant'} session_id=None release=None version=None metadata=None tags=[] public=None +Creating generation trace_id='4a8fa429-f08d-4f6b-b3ca-9a404d7c61fa' name='litellm-acompletion' start_time=datetime.datetime(2024, 7, 6, 17, 49, 48, 999955) metadata={'litellm_response_cost': None, 'cache_hit': False} input={'messages': [{'role': 'user', 'content': 'This is a test'}]} output={'content': "It's simple to use and easy to get started", 'role': 'assistant'} level= status_message=None parent_observation_id=None version=None id='time-17-49-48-999955_chatcmpl-f1c470f6-2f80-4d4a-89a4-4a013bae4085' end_time=datetime.datetime(2024, 7, 6, 17, 49, 49, 5114) completion_start_time=datetime.datetime(2024, 7, 6, 17, 49, 49, 5114) model='chatgpt-v-2' model_parameters={'temperature': '0.7', 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'} usage=Usage(input=10, output=20, total=None, unit=, input_cost=None, output_cost=None, total_cost=None) prompt_name=None prompt_version=None... item size 459 item size 887 +Creating trace id='8da0e9ce-8b13-4b4d-9f0d-daa3d5e0afc0' timestamp=datetime.datetime(2024, 7, 7, 0, 49, 49, 6797, tzinfo=datetime.timezone.utc) name='litellm-acompletion' user_id='langfuse_latency_test_user' input={'messages': [{'role': 'user', 'content': 'This is a test'}]} output={'content': "It's simple to use and easy to get started", 'role': 'assistant'} session_id=None release=None version=None metadata=None tags=[] public=None +Creating generation trace_id='8da0e9ce-8b13-4b4d-9f0d-daa3d5e0afc0' name='litellm-acompletion' start_time=datetime.datetime(2024, 7, 6, 17, 49, 49, 372) metadata={'litellm_response_cost': None, 'cache_hit': False} input={'messages': [{'role': 'user', 'content': 'This is a test'}]} output={'content': "It's simple to use and easy to get started", 'role': 'assistant'} level= status_message=None parent_observation_id=None version=None id='time-17-49-49-000372_chatcmpl-f2672341-93c9-4fec-bccb-665a1df53ce5' end_time=datetime.datetime(2024, 7, 6, 17, 49, 49, 5904) completion_start_time=datetime.datetime(2024, 7, 6, 17, 49, 49, 5904) model='chatgpt-v-2' model_parameters={'temperature': '0.7', 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'} usage=Usage(input=10, output=20, total=None, unit=, input_cost=None, output_cost=None, total_cost=None) prompt_name=None prompt_version=None... +item size 459 +item size 887 +`litellm.set_verbose` is deprecated. Please set `os.environ['LITELLM_LOG'] = 'DEBUG'` for debug logs. ~0 items in the Langfuse queue uploading batch of 10 items -uploading data: {'batch': [{'id': 'cd6c78ba-81aa-4106-bc92-48adbda0ef1b', 'type': 'trace-create', 'body': {'id': '52a58bac-492b-433e-9228-2759b73303a6', 'timestamp': datetime.datetime(2024, 6, 23, 6, 26, 45, 565911, tzinfo=datetime.timezone.utc), 'name': 'litellm-acompletion', 'userId': 'langfuse_latency_test_user', 'input': {'messages': [{'role': 'user', 'content': 'This is a test'}]}, 'output': {'content': "It's simple to use and easy to get started", 'role': 'assistant'}, 'tags': []}, 'timestamp': datetime.datetime(2024, 6, 23, 6, 26, 45, 566569, tzinfo=datetime.timezone.utc)}, {'id': '57b678c1-d620-4aad-8052-1722a498972e', 'type': 'trace-create', 'body': {'id': '28bc21fe-5955-4ec5-ba39-27325718af5a', 'timestamp': datetime.datetime(2024, 6, 23, 6, 26, 45, 566213, tzinfo=datetime.timezone.utc), 'name': 'litellm-acompletion', 'userId': 'langfuse_latency_test_user', 'input': {'messages': [{'role': 'user', 'content': 'This is a test'}]}, 'output': {'content': "It's simple to use and easy to get started", 'role': 'assistant'}, 'tags': []}, 'timestamp': datetime.datetime(2024, 6, 23, 6, 26, 45, 566947, tzinfo=datetime.timezone.utc)}, {'id': '831370be-b2bd-48d8-b32b-bfcaf103712b', 'type': 'generation-create', 'body': {'traceId': '52a58bac-492b-433e-9228-2759b73303a6', 'name': 'litellm-acompletion', 'startTime': datetime.datetime(2024, 6, 22, 23, 26, 45, 561383), 'metadata': {'litellm_response_cost': None, 'cache_hit': False}, 'input': {'messages': [{'role': 'user', 'content': 'This is a test'}]}, 'output': {'content': "It's simple to use and easy to get started", 'role': 'assistant'}, 'level': , 'id': 'time-23-26-45-561383_chatcmpl-193fd5b6-87ce-4b8f-90bb-e2c2608f0f73', 'endTime': datetime.datetime(2024, 6, 22, 23, 26, 45, 564028), 'completionStartTime': datetime.datetime(2024, 6, 22, 23, 26, 45, 564028), 'model': 'chatgpt-v-2', 'modelParameters': {'temperature': '0.7', 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'}, 'usage': {'input': 10, 'output': 20, 'unit': }}, 'timestamp': datetime.datetime(2024, 6, 23, 6, 26, 45, 567294, tzinfo=datetime.timezone.utc)}, {'id': '571fe93d-34b4-405e-98b4-e47b538b884a', 'type': 'generation-create', 'body': {'traceId': '28bc21fe-5955-4ec5-ba39-27325718af5a', 'name': 'litellm-acompletion', 'startTime': datetime.datetime(2024, 6, 22, 23, 26, 45, 562146), 'metadata': {'litellm_response_cost': None, 'cache_hit': False}, 'input': {'messages': [{'role': 'user', 'content': 'This is a test'}]}, 'output': {'content': "It's simple to use and easy to get started", 'role': 'assistant'}, 'level': , 'id': 'time-23-26-45-562146_chatcmpl-2dc26df5-d4e4-46f5-868e-138aac85dd95', 'endTime': datetime.datetime(2024, 6, 22, 23, 26, 45, 564312), 'completionStartTime': datetime.datetime(2024, 6, 22, 23, 26, 45, 564312), 'model': 'chatgpt-v-2', 'modelParameters': {'temperature': '0.7', 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'}, 'usage': {'input': 10, 'output': 20, 'unit': }}, 'timestamp': datetime.datetime(2024, 6, 23, 6, 26, 45, 567688, tzinfo=datetime.timezone.utc)}, {'id': '13ae52b9-7480-4b2e-977c-e85f422f9a16', 'type': 'trace-create', 'body': {'id': 'f545a5c8-dfdf-4226-a30c-f24ff8d75144', 'timestamp': datetime.datetime(2024, 6, 23, 6, 26, 45, 567765, tzinfo=datetime.timezone.utc), 'name': 'litellm-acompletion', 'userId': 'langfuse_latency_test_user', 'input': {'messages': [{'role': 'user', 'content': 'This is a test'}]}, 'output': {'content': "It's simple to use and easy to get started", 'role': 'assistant'}, 'tags': []}, 'timestamp': datetime.datetime(2024, 6, 23, 6, 26, 45, 568357, tzinfo=datetime.timezone.utc)}, {'id': '7498e67e-0b2b-451c-8533-a35de0aed092', 'type': 'trace-create', 'body': {'id': 'c8d266ca-c370-439e-9d14-f011e5cfa254', 'timestamp': datetime.datetime(2024, 6, 23, 6, 26, 45, 568137, tzinfo=datetime.timezone.utc), 'name': 'litellm-acompletion', 'userId': 'langfuse_latency_test_user', 'input': {'messages': [{'role': 'user', 'content': 'This is a test'}]}, 'output': {'content': "It's simple to use and easy to get started", 'role': 'assistant'}, 'tags': []}, 'timestamp': datetime.datetime(2024, 6, 23, 6, 26, 45, 568812, tzinfo=datetime.timezone.utc)}, {'id': '2656f364-b367-442a-a694-19dd159a0769', 'type': 'generation-create', 'body': {'traceId': 'f545a5c8-dfdf-4226-a30c-f24ff8d75144', 'name': 'litellm-acompletion', 'startTime': datetime.datetime(2024, 6, 22, 23, 26, 45, 562753), 'metadata': {'litellm_response_cost': None, 'cache_hit': False}, 'input': {'messages': [{'role': 'user', 'content': 'This is a test'}]}, 'output': {'content': "It's simple to use and easy to get started", 'role': 'assistant'}, 'level': , 'id': 'time-23-26-45-562753_chatcmpl-33ae3e6d-d66a-4447-82d9-c8f5d5be43e5', 'endTime': datetime.datetime(2024, 6, 22, 23, 26, 45, 564869), 'completionStartTime': datetime.datetime(2024, 6, 22, 23, 26, 45, 564869), 'model': 'chatgpt-v-2', 'modelParameters': {'temperature': '0.7', 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'}, 'usage': {'input': 10, 'output': 20, 'unit': }}, 'timestamp': datetime.datetime(2024, 6, 23, 6, 26, 45, 569165, tzinfo=datetime.timezone.utc)}, {'id': '8c42f89e-be59-4226-812e-bc849d35ab59', 'type': 'generation-create', 'body': {'traceId': 'c8d266ca-c370-439e-9d14-f011e5cfa254', 'name': 'litellm-acompletion', 'startTime': datetime.datetime(2024, 6, 22, 23, 26, 45, 563300), 'metadata': {'litellm_response_cost': None, 'cache_hit': False}, 'input': {'messages': [{'role': 'user', 'content': 'This is a test'}]}, 'output': {'content': "It's simple to use and easy to get started", 'role': 'assistant'}, 'level': , 'id': 'time-23-26-45-563300_chatcmpl-56c11246-4c9c-43c0-bb4e-0be309907acd', 'endTime': datetime.datetime(2024, 6, 22, 23, 26, 45, 565142), 'completionStartTime': datetime.datetime(2024, 6, 22, 23, 26, 45, 565142), 'model': 'chatgpt-v-2', 'modelParameters': {'temperature': '0.7', 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'}, 'usage': {'input': 10, 'output': 20, 'unit': }}, 'timestamp': datetime.datetime(2024, 6, 23, 6, 26, 45, 569494, tzinfo=datetime.timezone.utc)}, {'id': 'a926d1eb-68ed-484c-a9b9-3d82938a7d28', 'type': 'trace-create', 'body': {'id': '7c6fec55-def1-4838-8ea1-86960a1ccb19', 'timestamp': datetime.datetime(2024, 6, 23, 6, 26, 45, 570331, tzinfo=datetime.timezone.utc), 'name': 'litellm-acompletion', 'userId': 'langfuse_latency_test_user', 'input': {'messages': [{'role': 'user', 'content': 'This is a test'}]}, 'output': {'content': "It's simple to use and easy to get started", 'role': 'assistant'}, 'tags': []}, 'timestamp': datetime.datetime(2024, 6, 23, 6, 26, 45, 570495, tzinfo=datetime.timezone.utc)}, {'id': '97b5dee7-a3b2-4526-91cb-75dac909c78f', 'type': 'generation-create', 'body': {'traceId': '7c6fec55-def1-4838-8ea1-86960a1ccb19', 'name': 'litellm-acompletion', 'startTime': datetime.datetime(2024, 6, 22, 23, 26, 45, 563792), 'metadata': {'litellm_response_cost': None, 'cache_hit': False}, 'input': {'messages': [{'role': 'user', 'content': 'This is a test'}]}, 'output': {'content': "It's simple to use and easy to get started", 'role': 'assistant'}, 'level': , 'id': 'time-23-26-45-563792_chatcmpl-c159069a-bc65-43a0-bef5-e2d42688cead', 'endTime': datetime.datetime(2024, 6, 22, 23, 26, 45, 569384), 'completionStartTime': datetime.datetime(2024, 6, 22, 23, 26, 45, 569384), 'model': 'chatgpt-v-2', 'modelParameters': {'temperature': '0.7', 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'}, 'usage': {'input': 10, 'output': 20, 'unit': }}, 'timestamp': datetime.datetime(2024, 6, 23, 6, 26, 45, 570858, tzinfo=datetime.timezone.utc)}], 'metadata': {'batch_size': 10, 'sdk_integration': 'default', 'sdk_name': 'python', 'sdk_version': '2.32.0', 'public_key': 'pk-lf-b3db7e8e-c2f6-4fc7-825c-a541a8fbe003'}} -making request: {"batch": [{"id": "cd6c78ba-81aa-4106-bc92-48adbda0ef1b", "type": "trace-create", "body": {"id": "52a58bac-492b-433e-9228-2759b73303a6", "timestamp": "2024-06-23T06:26:45.565911Z", "name": "litellm-acompletion", "userId": "langfuse_latency_test_user", "input": {"messages": [{"role": "user", "content": "This is a test"}]}, "output": {"content": "It's simple to use and easy to get started", "role": "assistant"}, "tags": []}, "timestamp": "2024-06-23T06:26:45.566569Z"}, {"id": "57b678c1-d620-4aad-8052-1722a498972e", "type": "trace-create", "body": {"id": "28bc21fe-5955-4ec5-ba39-27325718af5a", "timestamp": "2024-06-23T06:26:45.566213Z", "name": "litellm-acompletion", "userId": "langfuse_latency_test_user", "input": {"messages": [{"role": "user", "content": "This is a test"}]}, "output": {"content": "It's simple to use and easy to get started", "role": "assistant"}, "tags": []}, "timestamp": "2024-06-23T06:26:45.566947Z"}, {"id": "831370be-b2bd-48d8-b32b-bfcaf103712b", "type": "generation-create", "body": {"traceId": "52a58bac-492b-433e-9228-2759b73303a6", "name": "litellm-acompletion", "startTime": "2024-06-22T23:26:45.561383-07:00", "metadata": {"litellm_response_cost": null, "cache_hit": false}, "input": {"messages": [{"role": "user", "content": "This is a test"}]}, "output": {"content": "It's simple to use and easy to get started", "role": "assistant"}, "level": "DEFAULT", "id": "time-23-26-45-561383_chatcmpl-193fd5b6-87ce-4b8f-90bb-e2c2608f0f73", "endTime": "2024-06-22T23:26:45.564028-07:00", "completionStartTime": "2024-06-22T23:26:45.564028-07:00", "model": "chatgpt-v-2", "modelParameters": {"temperature": "0.7", "max_tokens": 5, "user": "langfuse_latency_test_user", "extra_body": "{}"}, "usage": {"input": 10, "output": 20, "unit": "TOKENS"}}, "timestamp": "2024-06-23T06:26:45.567294Z"}, {"id": "571fe93d-34b4-405e-98b4-e47b538b884a", "type": "generation-create", "body": {"traceId": "28bc21fe-5955-4ec5-ba39-27325718af5a", "name": "litellm-acompletion", "startTime": "2024-06-22T23:26:45.562146-07:00", "metadata": {"litellm_response_cost": null, "cache_hit": false}, "input": {"messages": [{"role": "user", "content": "This is a test"}]}, "output": {"content": "It's simple to use and easy to get started", "role": "assistant"}, "level": "DEFAULT", "id": "time-23-26-45-562146_chatcmpl-2dc26df5-d4e4-46f5-868e-138aac85dd95", "endTime": "2024-06-22T23:26:45.564312-07:00", "completionStartTime": "2024-06-22T23:26:45.564312-07:00", "model": "chatgpt-v-2", "modelParameters": {"temperature": "0.7", "max_tokens": 5, "user": "langfuse_latency_test_user", "extra_body": "{}"}, "usage": {"input": 10, "output": 20, "unit": "TOKENS"}}, "timestamp": "2024-06-23T06:26:45.567688Z"}, {"id": "13ae52b9-7480-4b2e-977c-e85f422f9a16", "type": "trace-create", "body": {"id": "f545a5c8-dfdf-4226-a30c-f24ff8d75144", "timestamp": "2024-06-23T06:26:45.567765Z", "name": "litellm-acompletion", "userId": "langfuse_latency_test_user", "input": {"messages": [{"role": "user", "content": "This is a test"}]}, "output": {"content": "It's simple to use and easy to get started", "role": "assistant"}, "tags": []}, "timestamp": "2024-06-23T06:26:45.568357Z"}, {"id": "7498e67e-0b2b-451c-8533-a35de0aed092", "type": "trace-create", "body": {"id": "c8d266ca-c370-439e-9d14-f011e5cfa254", "timestamp": "2024-06-23T06:26:45.568137Z", "name": "litellm-acompletion", "userId": "langfuse_latency_test_user", "input": {"messages": [{"role": "user", "content": "This is a test"}]}, "output": {"content": "It's simple to use and easy to get started", "role": "assistant"}, "tags": []}, "timestamp": "2024-06-23T06:26:45.568812Z"}, {"id": "2656f364-b367-442a-a694-19dd159a0769", "type": "generation-create", "body": {"traceId": "f545a5c8-dfdf-4226-a30c-f24ff8d75144", "name": "litellm-acompletion", "startTime": "2024-06-22T23:26:45.562753-07:00", "metadata": {"litellm_response_cost": null, "cache_hit": false}, "input": {"messages": [{"role": "user", "content": "This is a test"}]}, "output": {"content": "It's simple to use and easy to get started", "role": "assistant"}, "level": "DEFAULT", "id": "time-23-26-45-562753_chatcmpl-33ae3e6d-d66a-4447-82d9-c8f5d5be43e5", "endTime": "2024-06-22T23:26:45.564869-07:00", "completionStartTime": "2024-06-22T23:26:45.564869-07:00", "model": "chatgpt-v-2", "modelParameters": {"temperature": "0.7", "max_tokens": 5, "user": "langfuse_latency_test_user", "extra_body": "{}"}, "usage": {"input": 10, "output": 20, "unit": "TOKENS"}}, "timestamp": "2024-06-23T06:26:45.569165Z"}, {"id": "8c42f89e-be59-4226-812e-bc849d35ab59", "type": "generation-create", "body": {"traceId": "c8d266ca-c370-439e-9d14-f011e5cfa254", "name": "litellm-acompletion", "startTime": "2024-06-22T23:26:45.563300-07:00", "metadata": {"litellm_response_cost": null, "cache_hit": false}, "input": {"messages": [{"role": "user", "content": "This is a test"}]}, "output": {"content": "It's simple to use and easy to get started", "role": "assistant"}, "level": "DEFAULT", "id": "time-23-26-45-563300_chatcmpl-56c11246-4c9c-43c0-bb4e-0be309907acd", "endTime": "2024-06-22T23:26:45.565142-07:00", "completionStartTime": "2024-06-22T23:26:45.565142-07:00", "model": "chatgpt-v-2", "modelParameters": {"temperature": "0.7", "max_tokens": 5, "user": "langfuse_latency_test_user", "extra_body": "{}"}, "usage": {"input": 10, "output": 20, "unit": "TOKENS"}}, "timestamp": "2024-06-23T06:26:45.569494Z"}, {"id": "a926d1eb-68ed-484c-a9b9-3d82938a7d28", "type": "trace-create", "body": {"id": "7c6fec55-def1-4838-8ea1-86960a1ccb19", "timestamp": "2024-06-23T06:26:45.570331Z", "name": "litellm-acompletion", "userId": "langfuse_latency_test_user", "input": {"messages": [{"role": "user", "content": "This is a test"}]}, "output": {"content": "It's simple to use and easy to get started", "role": "assistant"}, "tags": []}, "timestamp": "2024-06-23T06:26:45.570495Z"}, {"id": "97b5dee7-a3b2-4526-91cb-75dac909c78f", "type": "generation-create", "body": {"traceId": "7c6fec55-def1-4838-8ea1-86960a1ccb19", "name": "litellm-acompletion", "startTime": "2024-06-22T23:26:45.563792-07:00", "metadata": {"litellm_response_cost": null, "cache_hit": false}, "input": {"messages": [{"role": "user", "content": "This is a test"}]}, "output": {"content": "It's simple to use and easy to get started", "role": "assistant"}, "level": "DEFAULT", "id": "time-23-26-45-563792_chatcmpl-c159069a-bc65-43a0-bef5-e2d42688cead", "endTime": "2024-06-22T23:26:45.569384-07:00", "completionStartTime": "2024-06-22T23:26:45.569384-07:00", "model": "chatgpt-v-2", "modelParameters": {"temperature": "0.7", "max_tokens": 5, "user": "langfuse_latency_test_user", "extra_body": "{}"}, "usage": {"input": 10, "output": 20, "unit": "TOKENS"}}, "timestamp": "2024-06-23T06:26:45.570858Z"}], "metadata": {"batch_size": 10, "sdk_integration": "default", "sdk_name": "python", "sdk_version": "2.32.0", "public_key": "pk-lf-b3db7e8e-c2f6-4fc7-825c-a541a8fbe003"}} to https://us.cloud.langfuse.com/api/public/ingestion -`litellm.set_verbose` is deprecated. Please set `os.environ['LITELLM_LOG'] = 'DEBUG'` for debug logs. -received response: {"errors":[],"successes":[{"id":"cd6c78ba-81aa-4106-bc92-48adbda0ef1b","status":201},{"id":"57b678c1-d620-4aad-8052-1722a498972e","status":201},{"id":"831370be-b2bd-48d8-b32b-bfcaf103712b","status":201},{"id":"571fe93d-34b4-405e-98b4-e47b538b884a","status":201},{"id":"13ae52b9-7480-4b2e-977c-e85f422f9a16","status":201},{"id":"7498e67e-0b2b-451c-8533-a35de0aed092","status":201},{"id":"2656f364-b367-442a-a694-19dd159a0769","status":201},{"id":"8c42f89e-be59-4226-812e-bc849d35ab59","status":201},{"id":"a926d1eb-68ed-484c-a9b9-3d82938a7d28","status":201},{"id":"97b5dee7-a3b2-4526-91cb-75dac909c78f","status":201}]} +uploading data: {'batch': [{'id': '8709b135-ff1a-454b-8e77-9c16c2ae590b', 'type': 'trace-create', 'body': {'id': '22019f81-81fc-4b65-8ab3-e141f6625ac9', 'timestamp': datetime.datetime(2024, 7, 7, 0, 49, 49, 2109, tzinfo=datetime.timezone.utc), 'name': 'litellm-acompletion', 'userId': 'langfuse_latency_test_user', 'input': {'messages': [{'role': 'user', 'content': 'This is a test'}]}, 'output': {'content': "It's simple to use and easy to get started", 'role': 'assistant'}, 'tags': []}, 'timestamp': datetime.datetime(2024, 7, 7, 0, 49, 49, 2578, tzinfo=datetime.timezone.utc)}, {'id': 'ffc73bd8-2f00-496e-8566-eaa4b67b1f2b', 'type': 'generation-create', 'body': {'traceId': '22019f81-81fc-4b65-8ab3-e141f6625ac9', 'name': 'litellm-acompletion', 'startTime': datetime.datetime(2024, 7, 6, 17, 49, 48, 998308), 'metadata': {'litellm_response_cost': None, 'cache_hit': False}, 'input': {'messages': [{'role': 'user', 'content': 'This is a test'}]}, 'output': {'content': "It's simple to use and easy to get started", 'role': 'assistant'}, 'level': , 'id': 'time-17-49-48-998308_chatcmpl-4fc2302c-b7e4-4703-b716-cd01a3d0b322', 'endTime': datetime.datetime(2024, 7, 6, 17, 49, 49, 719), 'completionStartTime': datetime.datetime(2024, 7, 6, 17, 49, 49, 719), 'model': 'chatgpt-v-2', 'modelParameters': {'temperature': '0.7', 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'}, 'usage': {'input': 10, 'output': 20, 'unit': }}, 'timestamp': datetime.datetime(2024, 7, 7, 0, 49, 49, 2939, tzinfo=datetime.timezone.utc)}, {'id': '446c9edc-59e2-4805-946b-acdc25feb203', 'type': 'trace-create', 'body': {'id': 'bf11b810-c475-4a96-b0ec-a581321a44af', 'timestamp': datetime.datetime(2024, 7, 7, 0, 49, 49, 3618, tzinfo=datetime.timezone.utc), 'name': 'litellm-acompletion', 'userId': 'langfuse_latency_test_user', 'input': {'messages': [{'role': 'user', 'content': 'This is a test'}]}, 'output': {'content': "It's simple to use and easy to get started", 'role': 'assistant'}, 'tags': []}, 'timestamp': datetime.datetime(2024, 7, 7, 0, 49, 49, 3813, tzinfo=datetime.timezone.utc)}, {'id': '52a24782-ba7a-4bdf-9ae5-c79a5f4a8683', 'type': 'trace-create', 'body': {'id': '4e3773e6-037a-43e5-825d-b712babc0daf', 'timestamp': datetime.datetime(2024, 7, 7, 0, 49, 49, 3537, tzinfo=datetime.timezone.utc), 'name': 'litellm-acompletion', 'userId': 'langfuse_latency_test_user', 'input': {'messages': [{'role': 'user', 'content': 'This is a test'}]}, 'output': {'content': "It's simple to use and easy to get started", 'role': 'assistant'}, 'tags': []}, 'timestamp': datetime.datetime(2024, 7, 7, 0, 49, 49, 4113, tzinfo=datetime.timezone.utc)}, {'id': 'cce474cc-8881-4b76-8e33-64fcfb3333a5', 'type': 'generation-create', 'body': {'traceId': 'bf11b810-c475-4a96-b0ec-a581321a44af', 'name': 'litellm-acompletion', 'startTime': datetime.datetime(2024, 7, 6, 17, 49, 48, 998866), 'metadata': {'litellm_response_cost': None, 'cache_hit': False}, 'input': {'messages': [{'role': 'user', 'content': 'This is a test'}]}, 'output': {'content': "It's simple to use and easy to get started", 'role': 'assistant'}, 'level': , 'id': 'time-17-49-48-998866_chatcmpl-71dc0d36-7dfa-48af-ba45-613c2a31a6ae', 'endTime': datetime.datetime(2024, 7, 6, 17, 49, 49, 1253), 'completionStartTime': datetime.datetime(2024, 7, 6, 17, 49, 49, 1253), 'model': 'chatgpt-v-2', 'modelParameters': {'temperature': '0.7', 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'}, 'usage': {'input': 10, 'output': 20, 'unit': }}, 'timestamp': datetime.datetime(2024, 7, 7, 0, 49, 49, 4357, tzinfo=datetime.timezone.utc)}, {'id': 'b688cd33-a978-459b-9bee-330c1ea3dc07', 'type': 'generation-create', 'body': {'traceId': '4e3773e6-037a-43e5-825d-b712babc0daf', 'name': 'litellm-acompletion', 'startTime': datetime.datetime(2024, 7, 6, 17, 49, 48, 999544), 'metadata': {'litellm_response_cost': None, 'cache_hit': False}, 'input': {'messages': [{'role': 'user', 'content': 'This is a test'}]}, 'output': {'content': "It's simple to use and easy to get started", 'role': 'assistant'}, 'level': , 'id': 'time-17-49-48-999544_chatcmpl-d61b3ab9-3b9d-4913-afa5-e84420c90ac5', 'endTime': datetime.datetime(2024, 7, 6, 17, 49, 49, 1850), 'completionStartTime': datetime.datetime(2024, 7, 6, 17, 49, 49, 1850), 'model': 'chatgpt-v-2', 'modelParameters': {'temperature': '0.7', 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'}, 'usage': {'input': 10, 'output': 20, 'unit': }}, 'timestamp': datetime.datetime(2024, 7, 7, 0, 49, 49, 4708, tzinfo=datetime.timezone.utc)}, {'id': '9142c397-6a65-43a4-b4e2-4f24df372f46', 'type': 'trace-create', 'body': {'id': '4a8fa429-f08d-4f6b-b3ca-9a404d7c61fa', 'timestamp': datetime.datetime(2024, 7, 7, 0, 49, 49, 5826, tzinfo=datetime.timezone.utc), 'name': 'litellm-acompletion', 'userId': 'langfuse_latency_test_user', 'input': {'messages': [{'role': 'user', 'content': 'This is a test'}]}, 'output': {'content': "It's simple to use and easy to get started", 'role': 'assistant'}, 'tags': []}, 'timestamp': datetime.datetime(2024, 7, 7, 0, 49, 49, 6019, tzinfo=datetime.timezone.utc)}, {'id': '58990902-d78f-4e3c-ab93-806502bc273d', 'type': 'generation-create', 'body': {'traceId': '4a8fa429-f08d-4f6b-b3ca-9a404d7c61fa', 'name': 'litellm-acompletion', 'startTime': datetime.datetime(2024, 7, 6, 17, 49, 48, 999955), 'metadata': {'litellm_response_cost': None, 'cache_hit': False}, 'input': {'messages': [{'role': 'user', 'content': 'This is a test'}]}, 'output': {'content': "It's simple to use and easy to get started", 'role': 'assistant'}, 'level': , 'id': 'time-17-49-48-999955_chatcmpl-f1c470f6-2f80-4d4a-89a4-4a013bae4085', 'endTime': datetime.datetime(2024, 7, 6, 17, 49, 49, 5114), 'completionStartTime': datetime.datetime(2024, 7, 6, 17, 49, 49, 5114), 'model': 'chatgpt-v-2', 'modelParameters': {'temperature': '0.7', 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'}, 'usage': {'input': 10, 'output': 20, 'unit': }}, 'timestamp': datetime.datetime(2024, 7, 7, 0, 49, 49, 6245, tzinfo=datetime.timezone.utc)}, {'id': 'f7dccb63-fb83-4523-81dd-dc218e482a0e', 'type': 'trace-create', 'body': {'id': '8da0e9ce-8b13-4b4d-9f0d-daa3d5e0afc0', 'timestamp': datetime.datetime(2024, 7, 7, 0, 49, 49, 6797, tzinfo=datetime.timezone.utc), 'name': 'litellm-acompletion', 'userId': 'langfuse_latency_test_user', 'input': {'messages': [{'role': 'user', 'content': 'This is a test'}]}, 'output': {'content': "It's simple to use and easy to get started", 'role': 'assistant'}, 'tags': []}, 'timestamp': datetime.datetime(2024, 7, 7, 0, 49, 49, 6918, tzinfo=datetime.timezone.utc)}, {'id': '89095577-def6-40c2-93f5-c073d3772dd8', 'type': 'generation-create', 'body': {'traceId': '8da0e9ce-8b13-4b4d-9f0d-daa3d5e0afc0', 'name': 'litellm-acompletion', 'startTime': datetime.datetime(2024, 7, 6, 17, 49, 49, 372), 'metadata': {'litellm_response_cost': None, 'cache_hit': False}, 'input': {'messages': [{'role': 'user', 'content': 'This is a test'}]}, 'output': {'content': "It's simple to use and easy to get started", 'role': 'assistant'}, 'level': , 'id': 'time-17-49-49-000372_chatcmpl-f2672341-93c9-4fec-bccb-665a1df53ce5', 'endTime': datetime.datetime(2024, 7, 6, 17, 49, 49, 5904), 'completionStartTime': datetime.datetime(2024, 7, 6, 17, 49, 49, 5904), 'model': 'chatgpt-v-2', 'modelParameters': {'temperature': '0.7', 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'}, 'usage': {'input': 10, 'output': 20, 'unit': }}, 'timestamp': datetime.datetime(2024, 7, 7, 0, 49, 49, 7234, tzinfo=datetime.timezone.utc)}], 'metadata': {'batch_size': 10, 'sdk_integration': 'default', 'sdk_name': 'python', 'sdk_version': '2.32.0', 'public_key': 'pk-lf-b3db7e8e-c2f6-4fc7-825c-a541a8fbe003'}} +making request: {"batch": [{"id": "8709b135-ff1a-454b-8e77-9c16c2ae590b", "type": "trace-create", "body": {"id": "22019f81-81fc-4b65-8ab3-e141f6625ac9", "timestamp": "2024-07-07T00:49:49.002109Z", "name": "litellm-acompletion", "userId": "langfuse_latency_test_user", "input": {"messages": [{"role": "user", "content": "This is a test"}]}, "output": {"content": "It's simple to use and easy to get started", "role": "assistant"}, "tags": []}, "timestamp": "2024-07-07T00:49:49.002578Z"}, {"id": "ffc73bd8-2f00-496e-8566-eaa4b67b1f2b", "type": "generation-create", "body": {"traceId": "22019f81-81fc-4b65-8ab3-e141f6625ac9", "name": "litellm-acompletion", "startTime": "2024-07-06T17:49:48.998308-07:00", "metadata": {"litellm_response_cost": null, "cache_hit": false}, "input": {"messages": [{"role": "user", "content": "This is a test"}]}, "output": {"content": "It's simple to use and easy to get started", "role": "assistant"}, "level": "DEFAULT", "id": "time-17-49-48-998308_chatcmpl-4fc2302c-b7e4-4703-b716-cd01a3d0b322", "endTime": "2024-07-06T17:49:49.000719-07:00", "completionStartTime": "2024-07-06T17:49:49.000719-07:00", "model": "chatgpt-v-2", "modelParameters": {"temperature": "0.7", "max_tokens": 5, "user": "langfuse_latency_test_user", "extra_body": "{}"}, "usage": {"input": 10, "output": 20, "unit": "TOKENS"}}, "timestamp": "2024-07-07T00:49:49.002939Z"}, {"id": "446c9edc-59e2-4805-946b-acdc25feb203", "type": "trace-create", "body": {"id": "bf11b810-c475-4a96-b0ec-a581321a44af", "timestamp": "2024-07-07T00:49:49.003618Z", "name": "litellm-acompletion", "userId": "langfuse_latency_test_user", "input": {"messages": [{"role": "user", "content": "This is a test"}]}, "output": {"content": "It's simple to use and easy to get started", "role": "assistant"}, "tags": []}, "timestamp": "2024-07-07T00:49:49.003813Z"}, {"id": "52a24782-ba7a-4bdf-9ae5-c79a5f4a8683", "type": "trace-create", "body": {"id": "4e3773e6-037a-43e5-825d-b712babc0daf", "timestamp": "2024-07-07T00:49:49.003537Z", "name": "litellm-acompletion", "userId": "langfuse_latency_test_user", "input": {"messages": [{"role": "user", "content": "This is a test"}]}, "output": {"content": "It's simple to use and easy to get started", "role": "assistant"}, "tags": []}, "timestamp": "2024-07-07T00:49:49.004113Z"}, {"id": "cce474cc-8881-4b76-8e33-64fcfb3333a5", "type": "generation-create", "body": {"traceId": "bf11b810-c475-4a96-b0ec-a581321a44af", "name": "litellm-acompletion", "startTime": "2024-07-06T17:49:48.998866-07:00", "metadata": {"litellm_response_cost": null, "cache_hit": false}, "input": {"messages": [{"role": "user", "content": "This is a test"}]}, "output": {"content": "It's simple to use and easy to get started", "role": "assistant"}, "level": "DEFAULT", "id": "time-17-49-48-998866_chatcmpl-71dc0d36-7dfa-48af-ba45-613c2a31a6ae", "endTime": "2024-07-06T17:49:49.001253-07:00", "completionStartTime": "2024-07-06T17:49:49.001253-07:00", "model": "chatgpt-v-2", "modelParameters": {"temperature": "0.7", "max_tokens": 5, "user": "langfuse_latency_test_user", "extra_body": "{}"}, "usage": {"input": 10, "output": 20, "unit": "TOKENS"}}, "timestamp": "2024-07-07T00:49:49.004357Z"}, {"id": "b688cd33-a978-459b-9bee-330c1ea3dc07", "type": "generation-create", "body": {"traceId": "4e3773e6-037a-43e5-825d-b712babc0daf", "name": "litellm-acompletion", "startTime": "2024-07-06T17:49:48.999544-07:00", "metadata": {"litellm_response_cost": null, "cache_hit": false}, "input": {"messages": [{"role": "user", "content": "This is a test"}]}, "output": {"content": "It's simple to use and easy to get started", "role": "assistant"}, "level": "DEFAULT", "id": "time-17-49-48-999544_chatcmpl-d61b3ab9-3b9d-4913-afa5-e84420c90ac5", "endTime": "2024-07-06T17:49:49.001850-07:00", "completionStartTime": "2024-07-06T17:49:49.001850-07:00", "model": "chatgpt-v-2", "modelParameters": {"temperature": "0.7", "max_tokens": 5, "user": "langfuse_latency_test_user", "extra_body": "{}"}, "usage": {"input": 10, "output": 20, "unit": "TOKENS"}}, "timestamp": "2024-07-07T00:49:49.004708Z"}, {"id": "9142c397-6a65-43a4-b4e2-4f24df372f46", "type": "trace-create", "body": {"id": "4a8fa429-f08d-4f6b-b3ca-9a404d7c61fa", "timestamp": "2024-07-07T00:49:49.005826Z", "name": "litellm-acompletion", "userId": "langfuse_latency_test_user", "input": {"messages": [{"role": "user", "content": "This is a test"}]}, "output": {"content": "It's simple to use and easy to get started", "role": "assistant"}, "tags": []}, "timestamp": "2024-07-07T00:49:49.006019Z"}, {"id": "58990902-d78f-4e3c-ab93-806502bc273d", "type": "generation-create", "body": {"traceId": "4a8fa429-f08d-4f6b-b3ca-9a404d7c61fa", "name": "litellm-acompletion", "startTime": "2024-07-06T17:49:48.999955-07:00", "metadata": {"litellm_response_cost": null, "cache_hit": false}, "input": {"messages": [{"role": "user", "content": "This is a test"}]}, "output": {"content": "It's simple to use and easy to get started", "role": "assistant"}, "level": "DEFAULT", "id": "time-17-49-48-999955_chatcmpl-f1c470f6-2f80-4d4a-89a4-4a013bae4085", "endTime": "2024-07-06T17:49:49.005114-07:00", "completionStartTime": "2024-07-06T17:49:49.005114-07:00", "model": "chatgpt-v-2", "modelParameters": {"temperature": "0.7", "max_tokens": 5, "user": "langfuse_latency_test_user", "extra_body": "{}"}, "usage": {"input": 10, "output": 20, "unit": "TOKENS"}}, "timestamp": "2024-07-07T00:49:49.006245Z"}, {"id": "f7dccb63-fb83-4523-81dd-dc218e482a0e", "type": "trace-create", "body": {"id": "8da0e9ce-8b13-4b4d-9f0d-daa3d5e0afc0", "timestamp": "2024-07-07T00:49:49.006797Z", "name": "litellm-acompletion", "userId": "langfuse_latency_test_user", "input": {"messages": [{"role": "user", "content": "This is a test"}]}, "output": {"content": "It's simple to use and easy to get started", "role": "assistant"}, "tags": []}, "timestamp": "2024-07-07T00:49:49.006918Z"}, {"id": "89095577-def6-40c2-93f5-c073d3772dd8", "type": "generation-create", "body": {"traceId": "8da0e9ce-8b13-4b4d-9f0d-daa3d5e0afc0", "name": "litellm-acompletion", "startTime": "2024-07-06T17:49:49.000372-07:00", "metadata": {"litellm_response_cost": null, "cache_hit": false}, "input": {"messages": [{"role": "user", "content": "This is a test"}]}, "output": {"content": "It's simple to use and easy to get started", "role": "assistant"}, "level": "DEFAULT", "id": "time-17-49-49-000372_chatcmpl-f2672341-93c9-4fec-bccb-665a1df53ce5", "endTime": "2024-07-06T17:49:49.005904-07:00", "completionStartTime": "2024-07-06T17:49:49.005904-07:00", "model": "chatgpt-v-2", "modelParameters": {"temperature": "0.7", "max_tokens": 5, "user": "langfuse_latency_test_user", "extra_body": "{}"}, "usage": {"input": 10, "output": 20, "unit": "TOKENS"}}, "timestamp": "2024-07-07T00:49:49.007234Z"}], "metadata": {"batch_size": 10, "sdk_integration": "default", "sdk_name": "python", "sdk_version": "2.32.0", "public_key": "pk-lf-b3db7e8e-c2f6-4fc7-825c-a541a8fbe003"}} to https://us.cloud.langfuse.com/api/public/ingestion +received response: {"errors":[],"successes":[{"id":"8709b135-ff1a-454b-8e77-9c16c2ae590b","status":201},{"id":"ffc73bd8-2f00-496e-8566-eaa4b67b1f2b","status":201},{"id":"446c9edc-59e2-4805-946b-acdc25feb203","status":201},{"id":"52a24782-ba7a-4bdf-9ae5-c79a5f4a8683","status":201},{"id":"cce474cc-8881-4b76-8e33-64fcfb3333a5","status":201},{"id":"b688cd33-a978-459b-9bee-330c1ea3dc07","status":201},{"id":"9142c397-6a65-43a4-b4e2-4f24df372f46","status":201},{"id":"58990902-d78f-4e3c-ab93-806502bc273d","status":201},{"id":"f7dccb63-fb83-4523-81dd-dc218e482a0e","status":201},{"id":"89095577-def6-40c2-93f5-c073d3772dd8","status":201}]} successfully uploaded batch of 10 items -~0 items in the Langfuse queue consumer is running... `litellm.set_verbose` is deprecated. Please set `os.environ['LITELLM_LOG'] = 'DEBUG'` for debug logs. -Creating trace id='litellm-test-4d2a861a-39d1-451c-8187-c1bc8f5253bf' timestamp=datetime.datetime(2024, 6, 23, 6, 26, 47, 529980, tzinfo=datetime.timezone.utc) name='litellm-acompletion' user_id='langfuse_latency_test_user' input={'messages': [{'role': 'user', 'content': 'redacted-by-litellm'}]} output={'content': 'redacted-by-litellm', 'role': 'assistant'} session_id=None release=None version=None metadata=None tags=[] public=None +Creating trace id='litellm-test-86452fed-4136-4b1e-8083-803b21511bcc' timestamp=datetime.datetime(2024, 7, 7, 0, 49, 50, 228138, tzinfo=datetime.timezone.utc) name='litellm-acompletion' user_id='langfuse_latency_test_user' input={'messages': [{'role': 'user', 'content': 'redacted-by-litellm'}]} output={'content': 'redacted-by-litellm', 'role': 'assistant'} session_id=None release=None version=None metadata=None tags=[] public=None +Creating generation trace_id='litellm-test-86452fed-4136-4b1e-8083-803b21511bcc' name='litellm-acompletion' start_time=datetime.datetime(2024, 7, 6, 17, 49, 50, 227128) metadata={'litellm_response_cost': 5.4999999999999995e-05, 'cache_hit': False} input={'messages': [{'role': 'user', 'content': 'redacted-by-litellm'}]} output={'content': 'redacted-by-litellm', 'role': 'assistant'} level= status_message=None parent_observation_id=None version=None id='time-17-49-50-227128_chatcmpl-14db8e8d-39a5-4b84-a761-572f1ee2c1dd' end_time=datetime.datetime(2024, 7, 6, 17, 49, 50, 227686) completion_start_time=datetime.datetime(2024, 7, 6, 17, 49, 50, 227686) model='gpt-3.5-turbo' model_parameters={'temperature': '0.7', 'stream': False, 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'} usage=Usage(input=10, output=20, total=None, unit=, input_cost=None, output_cost=None, total_cost=5.4999999999999995e-05) prompt_name=None prompt_version=None... flushing queue -Creating generation trace_id='litellm-test-4d2a861a-39d1-451c-8187-c1bc8f5253bf' name='litellm-acompletion' start_time=datetime.datetime(2024, 6, 22, 23, 26, 47, 528930) metadata={'litellm_response_cost': 5.4999999999999995e-05, 'cache_hit': False} input={'messages': [{'role': 'user', 'content': 'redacted-by-litellm'}]} output={'content': 'redacted-by-litellm', 'role': 'assistant'} level= status_message=None parent_observation_id=None version=None id='time-23-26-47-528930_chatcmpl-811d9755-120c-4934-9efd-5ec08b8c41c6' end_time=datetime.datetime(2024, 6, 22, 23, 26, 47, 529521) completion_start_time=datetime.datetime(2024, 6, 22, 23, 26, 47, 529521) model='gpt-3.5-turbo' model_parameters={'temperature': '0.7', 'stream': False, 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'} usage=Usage(input=10, output=20, total=None, unit=, input_cost=None, output_cost=None, total_cost=5.4999999999999995e-05) prompt_name=None prompt_version=None... item size 454 successfully flushed about 0 items. item size 956 ~0 items in the Langfuse queue uploading batch of 2 items -uploading data: {'batch': [{'id': '997346c5-9bb9-4789-9ba9-33893bc65ee3', 'type': 'trace-create', 'body': {'id': 'litellm-test-4d2a861a-39d1-451c-8187-c1bc8f5253bf', 'timestamp': datetime.datetime(2024, 6, 23, 6, 26, 47, 529980, tzinfo=datetime.timezone.utc), 'name': 'litellm-acompletion', 'userId': 'langfuse_latency_test_user', 'input': {'messages': [{'role': 'user', 'content': 'redacted-by-litellm'}]}, 'output': {'content': 'redacted-by-litellm', 'role': 'assistant'}, 'tags': []}, 'timestamp': datetime.datetime(2024, 6, 23, 6, 26, 47, 530178, tzinfo=datetime.timezone.utc)}, {'id': 'c1c856eb-0aad-4da1-b68c-b68295b847e1', 'type': 'generation-create', 'body': {'traceId': 'litellm-test-4d2a861a-39d1-451c-8187-c1bc8f5253bf', 'name': 'litellm-acompletion', 'startTime': datetime.datetime(2024, 6, 22, 23, 26, 47, 528930), 'metadata': {'litellm_response_cost': 5.4999999999999995e-05, 'cache_hit': False}, 'input': {'messages': [{'role': 'user', 'content': 'redacted-by-litellm'}]}, 'output': {'content': 'redacted-by-litellm', 'role': 'assistant'}, 'level': , 'id': 'time-23-26-47-528930_chatcmpl-811d9755-120c-4934-9efd-5ec08b8c41c6', 'endTime': datetime.datetime(2024, 6, 22, 23, 26, 47, 529521), 'completionStartTime': datetime.datetime(2024, 6, 22, 23, 26, 47, 529521), 'model': 'gpt-3.5-turbo', 'modelParameters': {'temperature': '0.7', 'stream': False, 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'}, 'usage': {'input': 10, 'output': 20, 'unit': , 'totalCost': 5.4999999999999995e-05}}, 'timestamp': datetime.datetime(2024, 6, 23, 6, 26, 47, 530501, tzinfo=datetime.timezone.utc)}], 'metadata': {'batch_size': 2, 'sdk_integration': 'default', 'sdk_name': 'python', 'sdk_version': '2.32.0', 'public_key': 'pk-lf-b3db7e8e-c2f6-4fc7-825c-a541a8fbe003'}} -making request: {"batch": [{"id": "997346c5-9bb9-4789-9ba9-33893bc65ee3", "type": "trace-create", "body": {"id": "litellm-test-4d2a861a-39d1-451c-8187-c1bc8f5253bf", "timestamp": "2024-06-23T06:26:47.529980Z", "name": "litellm-acompletion", "userId": "langfuse_latency_test_user", "input": {"messages": [{"role": "user", "content": "redacted-by-litellm"}]}, "output": {"content": "redacted-by-litellm", "role": "assistant"}, "tags": []}, "timestamp": "2024-06-23T06:26:47.530178Z"}, {"id": "c1c856eb-0aad-4da1-b68c-b68295b847e1", "type": "generation-create", "body": {"traceId": "litellm-test-4d2a861a-39d1-451c-8187-c1bc8f5253bf", "name": "litellm-acompletion", "startTime": "2024-06-22T23:26:47.528930-07:00", "metadata": {"litellm_response_cost": 5.4999999999999995e-05, "cache_hit": false}, "input": {"messages": [{"role": "user", "content": "redacted-by-litellm"}]}, "output": {"content": "redacted-by-litellm", "role": "assistant"}, "level": "DEFAULT", "id": "time-23-26-47-528930_chatcmpl-811d9755-120c-4934-9efd-5ec08b8c41c6", "endTime": "2024-06-22T23:26:47.529521-07:00", "completionStartTime": "2024-06-22T23:26:47.529521-07:00", "model": "gpt-3.5-turbo", "modelParameters": {"temperature": "0.7", "stream": false, "max_tokens": 5, "user": "langfuse_latency_test_user", "extra_body": "{}"}, "usage": {"input": 10, "output": 20, "unit": "TOKENS", "totalCost": 5.4999999999999995e-05}}, "timestamp": "2024-06-23T06:26:47.530501Z"}], "metadata": {"batch_size": 2, "sdk_integration": "default", "sdk_name": "python", "sdk_version": "2.32.0", "public_key": "pk-lf-b3db7e8e-c2f6-4fc7-825c-a541a8fbe003"}} to https://us.cloud.langfuse.com/api/public/ingestion -received response: {"errors":[],"successes":[{"id":"997346c5-9bb9-4789-9ba9-33893bc65ee3","status":201},{"id":"c1c856eb-0aad-4da1-b68c-b68295b847e1","status":201}]} +uploading data: {'batch': [{'id': '1d38c06a-12d5-4488-b3d4-5f39621c85ed', 'type': 'trace-create', 'body': {'id': 'litellm-test-86452fed-4136-4b1e-8083-803b21511bcc', 'timestamp': datetime.datetime(2024, 7, 7, 0, 49, 50, 228138, tzinfo=datetime.timezone.utc), 'name': 'litellm-acompletion', 'userId': 'langfuse_latency_test_user', 'input': {'messages': [{'role': 'user', 'content': 'redacted-by-litellm'}]}, 'output': {'content': 'redacted-by-litellm', 'role': 'assistant'}, 'tags': []}, 'timestamp': datetime.datetime(2024, 7, 7, 0, 49, 50, 228332, tzinfo=datetime.timezone.utc)}, {'id': 'ec542e6b-eee8-471b-bf44-8d89e06ac3f5', 'type': 'generation-create', 'body': {'traceId': 'litellm-test-86452fed-4136-4b1e-8083-803b21511bcc', 'name': 'litellm-acompletion', 'startTime': datetime.datetime(2024, 7, 6, 17, 49, 50, 227128), 'metadata': {'litellm_response_cost': 5.4999999999999995e-05, 'cache_hit': False}, 'input': {'messages': [{'role': 'user', 'content': 'redacted-by-litellm'}]}, 'output': {'content': 'redacted-by-litellm', 'role': 'assistant'}, 'level': , 'id': 'time-17-49-50-227128_chatcmpl-14db8e8d-39a5-4b84-a761-572f1ee2c1dd', 'endTime': datetime.datetime(2024, 7, 6, 17, 49, 50, 227686), 'completionStartTime': datetime.datetime(2024, 7, 6, 17, 49, 50, 227686), 'model': 'gpt-3.5-turbo', 'modelParameters': {'temperature': '0.7', 'stream': False, 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'}, 'usage': {'input': 10, 'output': 20, 'unit': , 'totalCost': 5.4999999999999995e-05}}, 'timestamp': datetime.datetime(2024, 7, 7, 0, 49, 50, 228609, tzinfo=datetime.timezone.utc)}], 'metadata': {'batch_size': 2, 'sdk_integration': 'default', 'sdk_name': 'python', 'sdk_version': '2.32.0', 'public_key': 'pk-lf-b3db7e8e-c2f6-4fc7-825c-a541a8fbe003'}} +making request: {"batch": [{"id": "1d38c06a-12d5-4488-b3d4-5f39621c85ed", "type": "trace-create", "body": {"id": "litellm-test-86452fed-4136-4b1e-8083-803b21511bcc", "timestamp": "2024-07-07T00:49:50.228138Z", "name": "litellm-acompletion", "userId": "langfuse_latency_test_user", "input": {"messages": [{"role": "user", "content": "redacted-by-litellm"}]}, "output": {"content": "redacted-by-litellm", "role": "assistant"}, "tags": []}, "timestamp": "2024-07-07T00:49:50.228332Z"}, {"id": "ec542e6b-eee8-471b-bf44-8d89e06ac3f5", "type": "generation-create", "body": {"traceId": "litellm-test-86452fed-4136-4b1e-8083-803b21511bcc", "name": "litellm-acompletion", "startTime": "2024-07-06T17:49:50.227128-07:00", "metadata": {"litellm_response_cost": 5.4999999999999995e-05, "cache_hit": false}, "input": {"messages": [{"role": "user", "content": "redacted-by-litellm"}]}, "output": {"content": "redacted-by-litellm", "role": "assistant"}, "level": "DEFAULT", "id": "time-17-49-50-227128_chatcmpl-14db8e8d-39a5-4b84-a761-572f1ee2c1dd", "endTime": "2024-07-06T17:49:50.227686-07:00", "completionStartTime": "2024-07-06T17:49:50.227686-07:00", "model": "gpt-3.5-turbo", "modelParameters": {"temperature": "0.7", "stream": false, "max_tokens": 5, "user": "langfuse_latency_test_user", "extra_body": "{}"}, "usage": {"input": 10, "output": 20, "unit": "TOKENS", "totalCost": 5.4999999999999995e-05}}, "timestamp": "2024-07-07T00:49:50.228609Z"}], "metadata": {"batch_size": 2, "sdk_integration": "default", "sdk_name": "python", "sdk_version": "2.32.0", "public_key": "pk-lf-b3db7e8e-c2f6-4fc7-825c-a541a8fbe003"}} to https://us.cloud.langfuse.com/api/public/ingestion +received response: {"errors":[],"successes":[{"id":"1d38c06a-12d5-4488-b3d4-5f39621c85ed","status":201},{"id":"ec542e6b-eee8-471b-bf44-8d89e06ac3f5","status":201}]} successfully uploaded batch of 2 items ~0 items in the Langfuse queue ~0 items in the Langfuse queue @@ -56,7 +55,7 @@ successfully uploaded batch of 2 items ~0 items in the Langfuse queue ~0 items in the Langfuse queue ~0 items in the Langfuse queue -Getting observations... None, None, None, None, litellm-test-4d2a861a-39d1-451c-8187-c1bc8f5253bf, None, GENERATION +Getting observations... None, None, None, None, litellm-test-86452fed-4136-4b1e-8083-803b21511bcc, None, GENERATION ~0 items in the Langfuse queue consumer is running... `litellm.set_verbose` is deprecated. Please set `os.environ['LITELLM_LOG'] = 'DEBUG'` for debug logs. @@ -64,17 +63,17 @@ flushing queue successfully flushed about 0 items. ~0 items in the Langfuse queue ~0 items in the Langfuse queue -Creating trace id='litellm-test-a6ce08b7-2364-4efd-b030-7ee3a9ed6996' timestamp=datetime.datetime(2024, 6, 23, 6, 26, 50, 95341, tzinfo=datetime.timezone.utc) name='litellm-acompletion' user_id='langfuse_latency_test_user' input={'messages': [{'role': 'user', 'content': 'redacted-by-litellm'}]} output={'content': 'redacted-by-litellm', 'role': 'assistant'} session_id=None release=None version=None metadata=None tags=[] public=None -Creating generation trace_id='litellm-test-a6ce08b7-2364-4efd-b030-7ee3a9ed6996' name='litellm-acompletion' start_time=datetime.datetime(2024, 6, 22, 23, 26, 49, 844949) metadata={'litellm_response_cost': 4.1e-05, 'cache_hit': False} input={'messages': [{'role': 'user', 'content': 'redacted-by-litellm'}]} output={'content': 'redacted-by-litellm', 'role': 'assistant'} level= status_message=None parent_observation_id=None version=None id='time-23-26-49-844949_chatcmpl-61f43be5-fc8e-4d92-ad89-8080b51f60de' end_time=datetime.datetime(2024, 6, 22, 23, 26, 49, 855530) completion_start_time=datetime.datetime(2024, 6, 22, 23, 26, 49, 846913) model='gpt-3.5-turbo' model_parameters={'temperature': '0.7', 'stream': True, 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'} usage=Usage(input=14, output=10, total=None, unit=, input_cost=None, output_cost=None, total_cost=4.1e-05) prompt_name=None prompt_version=None... +Creating trace id='litellm-test-8d866db0-0114-48a2-9f24-12a82a7b3db6' timestamp=datetime.datetime(2024, 7, 7, 0, 49, 52, 866371, tzinfo=datetime.timezone.utc) name='litellm-acompletion' user_id='langfuse_latency_test_user' input={'messages': [{'role': 'user', 'content': 'redacted-by-litellm'}]} output={'content': 'redacted-by-litellm', 'role': 'assistant'} session_id=None release=None version=None metadata=None tags=[] public=None +Creating generation trace_id='litellm-test-8d866db0-0114-48a2-9f24-12a82a7b3db6' name='litellm-acompletion' start_time=datetime.datetime(2024, 7, 6, 17, 49, 52, 561285) metadata={'litellm_response_cost': 4.1e-05, 'cache_hit': False} input={'messages': [{'role': 'user', 'content': 'redacted-by-litellm'}]} output={'content': 'redacted-by-litellm', 'role': 'assistant'} level= status_message=None parent_observation_id=None version=None id='time-17-49-52-561285_chatcmpl-8079666e-d514-45b8-829f-887b4ae98ec0' end_time=datetime.datetime(2024, 7, 6, 17, 49, 52, 572272) completion_start_time=datetime.datetime(2024, 7, 6, 17, 49, 52, 564390) model='gpt-3.5-turbo' model_parameters={'temperature': '0.7', 'stream': True, 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'} usage=Usage(input=14, output=10, total=None, unit=, input_cost=None, output_cost=None, total_cost=4.1e-05) prompt_name=None prompt_version=None... item size 454 item size 925 ~0 items in the Langfuse queue ~0 items in the Langfuse queue uploading batch of 2 items -uploading data: {'batch': [{'id': '9bde426a-b7e9-480f-adc2-e1530b572882', 'type': 'trace-create', 'body': {'id': 'litellm-test-a6ce08b7-2364-4efd-b030-7ee3a9ed6996', 'timestamp': datetime.datetime(2024, 6, 23, 6, 26, 50, 95341, tzinfo=datetime.timezone.utc), 'name': 'litellm-acompletion', 'userId': 'langfuse_latency_test_user', 'input': {'messages': [{'role': 'user', 'content': 'redacted-by-litellm'}]}, 'output': {'content': 'redacted-by-litellm', 'role': 'assistant'}, 'tags': []}, 'timestamp': datetime.datetime(2024, 6, 23, 6, 26, 50, 95711, tzinfo=datetime.timezone.utc)}, {'id': '77964887-be69-42b6-b903-8b01d37643ca', 'type': 'generation-create', 'body': {'traceId': 'litellm-test-a6ce08b7-2364-4efd-b030-7ee3a9ed6996', 'name': 'litellm-acompletion', 'startTime': datetime.datetime(2024, 6, 22, 23, 26, 49, 844949), 'metadata': {'litellm_response_cost': 4.1e-05, 'cache_hit': False}, 'input': {'messages': [{'role': 'user', 'content': 'redacted-by-litellm'}]}, 'output': {'content': 'redacted-by-litellm', 'role': 'assistant'}, 'level': , 'id': 'time-23-26-49-844949_chatcmpl-61f43be5-fc8e-4d92-ad89-8080b51f60de', 'endTime': datetime.datetime(2024, 6, 22, 23, 26, 49, 855530), 'completionStartTime': datetime.datetime(2024, 6, 22, 23, 26, 49, 846913), 'model': 'gpt-3.5-turbo', 'modelParameters': {'temperature': '0.7', 'stream': True, 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'}, 'usage': {'input': 14, 'output': 10, 'unit': , 'totalCost': 4.1e-05}}, 'timestamp': datetime.datetime(2024, 6, 23, 6, 26, 50, 96374, tzinfo=datetime.timezone.utc)}], 'metadata': {'batch_size': 2, 'sdk_integration': 'default', 'sdk_name': 'python', 'sdk_version': '2.32.0', 'public_key': 'pk-lf-b3db7e8e-c2f6-4fc7-825c-a541a8fbe003'}} -making request: {"batch": [{"id": "9bde426a-b7e9-480f-adc2-e1530b572882", "type": "trace-create", "body": {"id": "litellm-test-a6ce08b7-2364-4efd-b030-7ee3a9ed6996", "timestamp": "2024-06-23T06:26:50.095341Z", "name": "litellm-acompletion", "userId": "langfuse_latency_test_user", "input": {"messages": [{"role": "user", "content": "redacted-by-litellm"}]}, "output": {"content": "redacted-by-litellm", "role": "assistant"}, "tags": []}, "timestamp": "2024-06-23T06:26:50.095711Z"}, {"id": "77964887-be69-42b6-b903-8b01d37643ca", "type": "generation-create", "body": {"traceId": "litellm-test-a6ce08b7-2364-4efd-b030-7ee3a9ed6996", "name": "litellm-acompletion", "startTime": "2024-06-22T23:26:49.844949-07:00", "metadata": {"litellm_response_cost": 4.1e-05, "cache_hit": false}, "input": {"messages": [{"role": "user", "content": "redacted-by-litellm"}]}, "output": {"content": "redacted-by-litellm", "role": "assistant"}, "level": "DEFAULT", "id": "time-23-26-49-844949_chatcmpl-61f43be5-fc8e-4d92-ad89-8080b51f60de", "endTime": "2024-06-22T23:26:49.855530-07:00", "completionStartTime": "2024-06-22T23:26:49.846913-07:00", "model": "gpt-3.5-turbo", "modelParameters": {"temperature": "0.7", "stream": true, "max_tokens": 5, "user": "langfuse_latency_test_user", "extra_body": "{}"}, "usage": {"input": 14, "output": 10, "unit": "TOKENS", "totalCost": 4.1e-05}}, "timestamp": "2024-06-23T06:26:50.096374Z"}], "metadata": {"batch_size": 2, "sdk_integration": "default", "sdk_name": "python", "sdk_version": "2.32.0", "public_key": "pk-lf-b3db7e8e-c2f6-4fc7-825c-a541a8fbe003"}} to https://us.cloud.langfuse.com/api/public/ingestion +uploading data: {'batch': [{'id': '892fa3ad-9191-4f19-b050-0badbf773160', 'type': 'trace-create', 'body': {'id': 'litellm-test-8d866db0-0114-48a2-9f24-12a82a7b3db6', 'timestamp': datetime.datetime(2024, 7, 7, 0, 49, 52, 866371, tzinfo=datetime.timezone.utc), 'name': 'litellm-acompletion', 'userId': 'langfuse_latency_test_user', 'input': {'messages': [{'role': 'user', 'content': 'redacted-by-litellm'}]}, 'output': {'content': 'redacted-by-litellm', 'role': 'assistant'}, 'tags': []}, 'timestamp': datetime.datetime(2024, 7, 7, 0, 49, 52, 866977, tzinfo=datetime.timezone.utc)}, {'id': '6abdba0f-7dba-438d-99ac-1c1a9d481d14', 'type': 'generation-create', 'body': {'traceId': 'litellm-test-8d866db0-0114-48a2-9f24-12a82a7b3db6', 'name': 'litellm-acompletion', 'startTime': datetime.datetime(2024, 7, 6, 17, 49, 52, 561285), 'metadata': {'litellm_response_cost': 4.1e-05, 'cache_hit': False}, 'input': {'messages': [{'role': 'user', 'content': 'redacted-by-litellm'}]}, 'output': {'content': 'redacted-by-litellm', 'role': 'assistant'}, 'level': , 'id': 'time-17-49-52-561285_chatcmpl-8079666e-d514-45b8-829f-887b4ae98ec0', 'endTime': datetime.datetime(2024, 7, 6, 17, 49, 52, 572272), 'completionStartTime': datetime.datetime(2024, 7, 6, 17, 49, 52, 564390), 'model': 'gpt-3.5-turbo', 'modelParameters': {'temperature': '0.7', 'stream': True, 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'}, 'usage': {'input': 14, 'output': 10, 'unit': , 'totalCost': 4.1e-05}}, 'timestamp': datetime.datetime(2024, 7, 7, 0, 49, 52, 867926, tzinfo=datetime.timezone.utc)}], 'metadata': {'batch_size': 2, 'sdk_integration': 'default', 'sdk_name': 'python', 'sdk_version': '2.32.0', 'public_key': 'pk-lf-b3db7e8e-c2f6-4fc7-825c-a541a8fbe003'}} +making request: {"batch": [{"id": "892fa3ad-9191-4f19-b050-0badbf773160", "type": "trace-create", "body": {"id": "litellm-test-8d866db0-0114-48a2-9f24-12a82a7b3db6", "timestamp": "2024-07-07T00:49:52.866371Z", "name": "litellm-acompletion", "userId": "langfuse_latency_test_user", "input": {"messages": [{"role": "user", "content": "redacted-by-litellm"}]}, "output": {"content": "redacted-by-litellm", "role": "assistant"}, "tags": []}, "timestamp": "2024-07-07T00:49:52.866977Z"}, {"id": "6abdba0f-7dba-438d-99ac-1c1a9d481d14", "type": "generation-create", "body": {"traceId": "litellm-test-8d866db0-0114-48a2-9f24-12a82a7b3db6", "name": "litellm-acompletion", "startTime": "2024-07-06T17:49:52.561285-07:00", "metadata": {"litellm_response_cost": 4.1e-05, "cache_hit": false}, "input": {"messages": [{"role": "user", "content": "redacted-by-litellm"}]}, "output": {"content": "redacted-by-litellm", "role": "assistant"}, "level": "DEFAULT", "id": "time-17-49-52-561285_chatcmpl-8079666e-d514-45b8-829f-887b4ae98ec0", "endTime": "2024-07-06T17:49:52.572272-07:00", "completionStartTime": "2024-07-06T17:49:52.564390-07:00", "model": "gpt-3.5-turbo", "modelParameters": {"temperature": "0.7", "stream": true, "max_tokens": 5, "user": "langfuse_latency_test_user", "extra_body": "{}"}, "usage": {"input": 14, "output": 10, "unit": "TOKENS", "totalCost": 4.1e-05}}, "timestamp": "2024-07-07T00:49:52.867926Z"}], "metadata": {"batch_size": 2, "sdk_integration": "default", "sdk_name": "python", "sdk_version": "2.32.0", "public_key": "pk-lf-b3db7e8e-c2f6-4fc7-825c-a541a8fbe003"}} to https://us.cloud.langfuse.com/api/public/ingestion ~0 items in the Langfuse queue -received response: {"errors":[],"successes":[{"id":"9bde426a-b7e9-480f-adc2-e1530b572882","status":201},{"id":"77964887-be69-42b6-b903-8b01d37643ca","status":201}]} +received response: {"errors":[],"successes":[{"id":"892fa3ad-9191-4f19-b050-0badbf773160","status":201},{"id":"6abdba0f-7dba-438d-99ac-1c1a9d481d14","status":201}]} successfully uploaded batch of 2 items ~0 items in the Langfuse queue ~0 items in the Langfuse queue @@ -82,27 +81,27 @@ successfully uploaded batch of 2 items ~0 items in the Langfuse queue ~0 items in the Langfuse queue ~0 items in the Langfuse queue +Getting observations... None, None, None, None, litellm-test-8d866db0-0114-48a2-9f24-12a82a7b3db6, None, GENERATION +~0 items in the Langfuse queue ~0 items in the Langfuse queue -Getting observations... None, None, None, None, litellm-test-a6ce08b7-2364-4efd-b030-7ee3a9ed6996, None, GENERATION ~0 items in the Langfuse queue consumer is running... -~0 items in the Langfuse queue `litellm.set_verbose` is deprecated. Please set `os.environ['LITELLM_LOG'] = 'DEBUG'` for debug logs. -Creating trace id='litellm-test-b3e968bf-c9cb-4f4d-a834-b0cba57e4695' timestamp=datetime.datetime(2024, 6, 23, 6, 26, 52, 198564, tzinfo=datetime.timezone.utc) name='litellm-acompletion' user_id='langfuse_latency_test_user' input='redacted-by-litellm' output='redacted-by-litellm' session_id=None release=None version=None metadata=None tags=[] public=None -Creating generation trace_id='litellm-test-b3e968bf-c9cb-4f4d-a834-b0cba57e4695' name='litellm-acompletion' start_time=datetime.datetime(2024, 6, 22, 23, 26, 52, 197638) metadata={'litellm_response_cost': 5.4999999999999995e-05, 'cache_hit': False} input='redacted-by-litellm' output='redacted-by-litellm' level= status_message=None parent_observation_id=None version=None id='time-23-26-52-197638_chatcmpl-089072da-028d-4425-ae6d-76e71d21df0d' end_time=datetime.datetime(2024, 6, 22, 23, 26, 52, 198243) completion_start_time=datetime.datetime(2024, 6, 22, 23, 26, 52, 198243) model='gpt-3.5-turbo' model_parameters={'temperature': '0.7', 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'} usage=Usage(input=10, output=20, total=None, unit=, input_cost=None, output_cost=None, total_cost=5.4999999999999995e-05) prompt_name=None prompt_version=None... +flushing queue +successfully flushed about 0 items. +Creating trace id='litellm-test-dbb12cf8-f570-4742-91b9-16c6f23a7be4' timestamp=datetime.datetime(2024, 7, 7, 0, 49, 54, 946118, tzinfo=datetime.timezone.utc) name='litellm-acompletion' user_id='langfuse_latency_test_user' input='redacted-by-litellm' output='redacted-by-litellm' session_id=None release=None version=None metadata=None tags=[] public=None +Creating generation trace_id='litellm-test-dbb12cf8-f570-4742-91b9-16c6f23a7be4' name='litellm-acompletion' start_time=datetime.datetime(2024, 7, 6, 17, 49, 54, 944317) metadata={'litellm_response_cost': 5.4999999999999995e-05, 'cache_hit': False} input='redacted-by-litellm' output='redacted-by-litellm' level= status_message=None parent_observation_id=None version=None id='time-17-49-54-944317_chatcmpl-0ff349c9-b506-4d5e-a511-ca47ba2b55c2' end_time=datetime.datetime(2024, 7, 6, 17, 49, 54, 945181) completion_start_time=datetime.datetime(2024, 7, 6, 17, 49, 54, 945181) model='gpt-3.5-turbo' model_parameters={'temperature': '0.7', 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'} usage=Usage(input=10, output=20, total=None, unit=, input_cost=None, output_cost=None, total_cost=5.4999999999999995e-05) prompt_name=None prompt_version=None... item size 375 item size 860 -flushing queue -successfully flushed about 0 items. -~0 items in the Langfuse queue ~0 items in the Langfuse queue ~0 items in the Langfuse queue ~0 items in the Langfuse queue uploading batch of 2 items -uploading data: {'batch': [{'id': 'a44cc9e3-8b12-4a3f-b8d5-f7a3949ac5c2', 'type': 'trace-create', 'body': {'id': 'litellm-test-b3e968bf-c9cb-4f4d-a834-b0cba57e4695', 'timestamp': datetime.datetime(2024, 6, 23, 6, 26, 52, 198564, tzinfo=datetime.timezone.utc), 'name': 'litellm-acompletion', 'userId': 'langfuse_latency_test_user', 'input': 'redacted-by-litellm', 'output': 'redacted-by-litellm', 'tags': []}, 'timestamp': datetime.datetime(2024, 6, 23, 6, 26, 52, 198832, tzinfo=datetime.timezone.utc)}, {'id': 'fceda986-a5a6-4e87-b7e6-bf208a2f7589', 'type': 'generation-create', 'body': {'traceId': 'litellm-test-b3e968bf-c9cb-4f4d-a834-b0cba57e4695', 'name': 'litellm-acompletion', 'startTime': datetime.datetime(2024, 6, 22, 23, 26, 52, 197638), 'metadata': {'litellm_response_cost': 5.4999999999999995e-05, 'cache_hit': False}, 'input': 'redacted-by-litellm', 'output': 'redacted-by-litellm', 'level': , 'id': 'time-23-26-52-197638_chatcmpl-089072da-028d-4425-ae6d-76e71d21df0d', 'endTime': datetime.datetime(2024, 6, 22, 23, 26, 52, 198243), 'completionStartTime': datetime.datetime(2024, 6, 22, 23, 26, 52, 198243), 'model': 'gpt-3.5-turbo', 'modelParameters': {'temperature': '0.7', 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'}, 'usage': {'input': 10, 'output': 20, 'unit': , 'totalCost': 5.4999999999999995e-05}}, 'timestamp': datetime.datetime(2024, 6, 23, 6, 26, 52, 199379, tzinfo=datetime.timezone.utc)}], 'metadata': {'batch_size': 2, 'sdk_integration': 'default', 'sdk_name': 'python', 'sdk_version': '2.32.0', 'public_key': 'pk-lf-b3db7e8e-c2f6-4fc7-825c-a541a8fbe003'}} -making request: {"batch": [{"id": "a44cc9e3-8b12-4a3f-b8d5-f7a3949ac5c2", "type": "trace-create", "body": {"id": "litellm-test-b3e968bf-c9cb-4f4d-a834-b0cba57e4695", "timestamp": "2024-06-23T06:26:52.198564Z", "name": "litellm-acompletion", "userId": "langfuse_latency_test_user", "input": "redacted-by-litellm", "output": "redacted-by-litellm", "tags": []}, "timestamp": "2024-06-23T06:26:52.198832Z"}, {"id": "fceda986-a5a6-4e87-b7e6-bf208a2f7589", "type": "generation-create", "body": {"traceId": "litellm-test-b3e968bf-c9cb-4f4d-a834-b0cba57e4695", "name": "litellm-acompletion", "startTime": "2024-06-22T23:26:52.197638-07:00", "metadata": {"litellm_response_cost": 5.4999999999999995e-05, "cache_hit": false}, "input": "redacted-by-litellm", "output": "redacted-by-litellm", "level": "DEFAULT", "id": "time-23-26-52-197638_chatcmpl-089072da-028d-4425-ae6d-76e71d21df0d", "endTime": "2024-06-22T23:26:52.198243-07:00", "completionStartTime": "2024-06-22T23:26:52.198243-07:00", "model": "gpt-3.5-turbo", "modelParameters": {"temperature": "0.7", "max_tokens": 5, "user": "langfuse_latency_test_user", "extra_body": "{}"}, "usage": {"input": 10, "output": 20, "unit": "TOKENS", "totalCost": 5.4999999999999995e-05}}, "timestamp": "2024-06-23T06:26:52.199379Z"}], "metadata": {"batch_size": 2, "sdk_integration": "default", "sdk_name": "python", "sdk_version": "2.32.0", "public_key": "pk-lf-b3db7e8e-c2f6-4fc7-825c-a541a8fbe003"}} to https://us.cloud.langfuse.com/api/public/ingestion +uploading data: {'batch': [{'id': '00f5d689-e397-4ad8-b084-998a49084b84', 'type': 'trace-create', 'body': {'id': 'litellm-test-dbb12cf8-f570-4742-91b9-16c6f23a7be4', 'timestamp': datetime.datetime(2024, 7, 7, 0, 49, 54, 946118, tzinfo=datetime.timezone.utc), 'name': 'litellm-acompletion', 'userId': 'langfuse_latency_test_user', 'input': 'redacted-by-litellm', 'output': 'redacted-by-litellm', 'tags': []}, 'timestamp': datetime.datetime(2024, 7, 7, 0, 49, 54, 946313, tzinfo=datetime.timezone.utc)}, {'id': 'c0dae082-ef3d-4884-8e9a-aede1cf07ff1', 'type': 'generation-create', 'body': {'traceId': 'litellm-test-dbb12cf8-f570-4742-91b9-16c6f23a7be4', 'name': 'litellm-acompletion', 'startTime': datetime.datetime(2024, 7, 6, 17, 49, 54, 944317), 'metadata': {'litellm_response_cost': 5.4999999999999995e-05, 'cache_hit': False}, 'input': 'redacted-by-litellm', 'output': 'redacted-by-litellm', 'level': , 'id': 'time-17-49-54-944317_chatcmpl-0ff349c9-b506-4d5e-a511-ca47ba2b55c2', 'endTime': datetime.datetime(2024, 7, 6, 17, 49, 54, 945181), 'completionStartTime': datetime.datetime(2024, 7, 6, 17, 49, 54, 945181), 'model': 'gpt-3.5-turbo', 'modelParameters': {'temperature': '0.7', 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'}, 'usage': {'input': 10, 'output': 20, 'unit': , 'totalCost': 5.4999999999999995e-05}}, 'timestamp': datetime.datetime(2024, 7, 7, 0, 49, 54, 946682, tzinfo=datetime.timezone.utc)}], 'metadata': {'batch_size': 2, 'sdk_integration': 'default', 'sdk_name': 'python', 'sdk_version': '2.32.0', 'public_key': 'pk-lf-b3db7e8e-c2f6-4fc7-825c-a541a8fbe003'}} +making request: {"batch": [{"id": "00f5d689-e397-4ad8-b084-998a49084b84", "type": "trace-create", "body": {"id": "litellm-test-dbb12cf8-f570-4742-91b9-16c6f23a7be4", "timestamp": "2024-07-07T00:49:54.946118Z", "name": "litellm-acompletion", "userId": "langfuse_latency_test_user", "input": "redacted-by-litellm", "output": "redacted-by-litellm", "tags": []}, "timestamp": "2024-07-07T00:49:54.946313Z"}, {"id": "c0dae082-ef3d-4884-8e9a-aede1cf07ff1", "type": "generation-create", "body": {"traceId": "litellm-test-dbb12cf8-f570-4742-91b9-16c6f23a7be4", "name": "litellm-acompletion", "startTime": "2024-07-06T17:49:54.944317-07:00", "metadata": {"litellm_response_cost": 5.4999999999999995e-05, "cache_hit": false}, "input": "redacted-by-litellm", "output": "redacted-by-litellm", "level": "DEFAULT", "id": "time-17-49-54-944317_chatcmpl-0ff349c9-b506-4d5e-a511-ca47ba2b55c2", "endTime": "2024-07-06T17:49:54.945181-07:00", "completionStartTime": "2024-07-06T17:49:54.945181-07:00", "model": "gpt-3.5-turbo", "modelParameters": {"temperature": "0.7", "max_tokens": 5, "user": "langfuse_latency_test_user", "extra_body": "{}"}, "usage": {"input": 10, "output": 20, "unit": "TOKENS", "totalCost": 5.4999999999999995e-05}}, "timestamp": "2024-07-07T00:49:54.946682Z"}], "metadata": {"batch_size": 2, "sdk_integration": "default", "sdk_name": "python", "sdk_version": "2.32.0", "public_key": "pk-lf-b3db7e8e-c2f6-4fc7-825c-a541a8fbe003"}} to https://us.cloud.langfuse.com/api/public/ingestion ~0 items in the Langfuse queue -received response: {"errors":[],"successes":[{"id":"a44cc9e3-8b12-4a3f-b8d5-f7a3949ac5c2","status":201},{"id":"fceda986-a5a6-4e87-b7e6-bf208a2f7589","status":201}]} +~0 items in the Langfuse queue +received response: {"errors":[],"successes":[{"id":"00f5d689-e397-4ad8-b084-998a49084b84","status":201},{"id":"c0dae082-ef3d-4884-8e9a-aede1cf07ff1","status":201}]} successfully uploaded batch of 2 items ~0 items in the Langfuse queue ~0 items in the Langfuse queue @@ -113,28 +112,27 @@ successfully uploaded batch of 2 items ~0 items in the Langfuse queue ~0 items in the Langfuse queue ~0 items in the Langfuse queue -Getting trace litellm-test-b3e968bf-c9cb-4f4d-a834-b0cba57e4695 +Getting trace litellm-test-dbb12cf8-f570-4742-91b9-16c6f23a7be4 ~0 items in the Langfuse queue ~0 items in the Langfuse queue -Getting observations... None, None, None, None, litellm-test-b3e968bf-c9cb-4f4d-a834-b0cba57e4695, None, GENERATION ~0 items in the Langfuse queue +Getting observations... None, None, None, None, litellm-test-dbb12cf8-f570-4742-91b9-16c6f23a7be4, None, GENERATION `litellm.set_verbose` is deprecated. Please set `os.environ['LITELLM_LOG'] = 'DEBUG'` for debug logs. -flushing queue -Creating trace id='litellm-test-2a7ed10d-b0aa-41c3-874e-adb2e128a9a6' timestamp=datetime.datetime(2024, 6, 23, 6, 26, 54, 545241, tzinfo=datetime.timezone.utc) name='litellm-acompletion' user_id='langfuse_latency_test_user' input={'messages': [{'role': 'user', 'content': 'This is a test'}]} output={'content': 'This is a test response', 'role': 'assistant'} session_id=None release=None version=None metadata=None tags=[] public=None -successfully flushed about 0 items. -Creating generation trace_id='litellm-test-2a7ed10d-b0aa-41c3-874e-adb2e128a9a6' name='litellm-acompletion' start_time=datetime.datetime(2024, 6, 22, 23, 26, 54, 540644) metadata={'litellm_response_cost': 5.4999999999999995e-05, 'cache_hit': False} input={'messages': [{'role': 'user', 'content': 'This is a test'}]} output={'content': 'This is a test response', 'role': 'assistant'} level= status_message=None parent_observation_id=None version=None id='time-23-26-54-540644_chatcmpl-5c5777de-9eaf-4515-ad2c-b9a9cf2cfbe5' end_time=datetime.datetime(2024, 6, 22, 23, 26, 54, 543392) completion_start_time=datetime.datetime(2024, 6, 22, 23, 26, 54, 543392) model='gpt-3.5-turbo' model_parameters={'temperature': '0.7', 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'} usage=Usage(input=10, output=20, total=None, unit=, input_cost=None, output_cost=None, total_cost=5.4999999999999995e-05) prompt_name=None prompt_version=None... +Creating trace id='litellm-test-f2b16f5b-81b8-4716-b812-3ff8d4d7b6cb' timestamp=datetime.datetime(2024, 7, 7, 0, 49, 57, 252490, tzinfo=datetime.timezone.utc) name='litellm-acompletion' user_id='langfuse_latency_test_user' input={'messages': [{'role': 'user', 'content': 'This is a test'}]} output={'content': 'This is a test response', 'role': 'assistant'} session_id=None release=None version=None metadata=None tags=[] public=None +Creating generation trace_id='litellm-test-f2b16f5b-81b8-4716-b812-3ff8d4d7b6cb' name='litellm-acompletion' start_time=datetime.datetime(2024, 7, 6, 17, 49, 57, 251265) metadata={'litellm_response_cost': 5.4999999999999995e-05, 'cache_hit': False} input={'messages': [{'role': 'user', 'content': 'This is a test'}]} output={'content': 'This is a test response', 'role': 'assistant'} level= status_message=None parent_observation_id=None version=None id='time-17-49-57-251265_chatcmpl-963c5062-9c99-4bd7-9bd2-7ff039f129e5' end_time=datetime.datetime(2024, 7, 6, 17, 49, 57, 251932) completion_start_time=datetime.datetime(2024, 7, 6, 17, 49, 57, 251932) model='gpt-3.5-turbo' model_parameters={'temperature': '0.7', 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'} usage=Usage(input=10, output=20, total=None, unit=, input_cost=None, output_cost=None, total_cost=5.4999999999999995e-05) prompt_name=None prompt_version=None... item size 453 +flushing queue item size 938 +successfully flushed about 0 items. ~0 items in the Langfuse queue ~0 items in the Langfuse queue ~0 items in the Langfuse queue ~0 items in the Langfuse queue uploading batch of 2 items -uploading data: {'batch': [{'id': '696d738d-b46a-418f-be31-049e9add4bd8', 'type': 'trace-create', 'body': {'id': 'litellm-test-2a7ed10d-b0aa-41c3-874e-adb2e128a9a6', 'timestamp': datetime.datetime(2024, 6, 23, 6, 26, 54, 545241, tzinfo=datetime.timezone.utc), 'name': 'litellm-acompletion', 'userId': 'langfuse_latency_test_user', 'input': {'messages': [{'role': 'user', 'content': 'This is a test'}]}, 'output': {'content': 'This is a test response', 'role': 'assistant'}, 'tags': []}, 'timestamp': datetime.datetime(2024, 6, 23, 6, 26, 54, 545804, tzinfo=datetime.timezone.utc)}, {'id': 'caf378b4-ae86-4a74-a7ac-2f9a83ed9d67', 'type': 'generation-create', 'body': {'traceId': 'litellm-test-2a7ed10d-b0aa-41c3-874e-adb2e128a9a6', 'name': 'litellm-acompletion', 'startTime': datetime.datetime(2024, 6, 22, 23, 26, 54, 540644), 'metadata': {'litellm_response_cost': 5.4999999999999995e-05, 'cache_hit': False}, 'input': {'messages': [{'role': 'user', 'content': 'This is a test'}]}, 'output': {'content': 'This is a test response', 'role': 'assistant'}, 'level': , 'id': 'time-23-26-54-540644_chatcmpl-5c5777de-9eaf-4515-ad2c-b9a9cf2cfbe5', 'endTime': datetime.datetime(2024, 6, 22, 23, 26, 54, 543392), 'completionStartTime': datetime.datetime(2024, 6, 22, 23, 26, 54, 543392), 'model': 'gpt-3.5-turbo', 'modelParameters': {'temperature': '0.7', 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'}, 'usage': {'input': 10, 'output': 20, 'unit': , 'totalCost': 5.4999999999999995e-05}}, 'timestamp': datetime.datetime(2024, 6, 23, 6, 26, 54, 547005, tzinfo=datetime.timezone.utc)}], 'metadata': {'batch_size': 2, 'sdk_integration': 'default', 'sdk_name': 'python', 'sdk_version': '2.32.0', 'public_key': 'pk-lf-b3db7e8e-c2f6-4fc7-825c-a541a8fbe003'}} -making request: {"batch": [{"id": "696d738d-b46a-418f-be31-049e9add4bd8", "type": "trace-create", "body": {"id": "litellm-test-2a7ed10d-b0aa-41c3-874e-adb2e128a9a6", "timestamp": "2024-06-23T06:26:54.545241Z", "name": "litellm-acompletion", "userId": "langfuse_latency_test_user", "input": {"messages": [{"role": "user", "content": "This is a test"}]}, "output": {"content": "This is a test response", "role": "assistant"}, "tags": []}, "timestamp": "2024-06-23T06:26:54.545804Z"}, {"id": "caf378b4-ae86-4a74-a7ac-2f9a83ed9d67", "type": "generation-create", "body": {"traceId": "litellm-test-2a7ed10d-b0aa-41c3-874e-adb2e128a9a6", "name": "litellm-acompletion", "startTime": "2024-06-22T23:26:54.540644-07:00", "metadata": {"litellm_response_cost": 5.4999999999999995e-05, "cache_hit": false}, "input": {"messages": [{"role": "user", "content": "This is a test"}]}, "output": {"content": "This is a test response", "role": "assistant"}, "level": "DEFAULT", "id": "time-23-26-54-540644_chatcmpl-5c5777de-9eaf-4515-ad2c-b9a9cf2cfbe5", "endTime": "2024-06-22T23:26:54.543392-07:00", "completionStartTime": "2024-06-22T23:26:54.543392-07:00", "model": "gpt-3.5-turbo", "modelParameters": {"temperature": "0.7", "max_tokens": 5, "user": "langfuse_latency_test_user", "extra_body": "{}"}, "usage": {"input": 10, "output": 20, "unit": "TOKENS", "totalCost": 5.4999999999999995e-05}}, "timestamp": "2024-06-23T06:26:54.547005Z"}], "metadata": {"batch_size": 2, "sdk_integration": "default", "sdk_name": "python", "sdk_version": "2.32.0", "public_key": "pk-lf-b3db7e8e-c2f6-4fc7-825c-a541a8fbe003"}} to https://us.cloud.langfuse.com/api/public/ingestion +uploading data: {'batch': [{'id': '6b7f45eb-d322-4614-9611-10b6325982da', 'type': 'trace-create', 'body': {'id': 'litellm-test-f2b16f5b-81b8-4716-b812-3ff8d4d7b6cb', 'timestamp': datetime.datetime(2024, 7, 7, 0, 49, 57, 252490, tzinfo=datetime.timezone.utc), 'name': 'litellm-acompletion', 'userId': 'langfuse_latency_test_user', 'input': {'messages': [{'role': 'user', 'content': 'This is a test'}]}, 'output': {'content': 'This is a test response', 'role': 'assistant'}, 'tags': []}, 'timestamp': datetime.datetime(2024, 7, 7, 0, 49, 57, 252967, tzinfo=datetime.timezone.utc)}, {'id': '7db8d145-bb3f-4e64-958b-0e73ea777d6d', 'type': 'generation-create', 'body': {'traceId': 'litellm-test-f2b16f5b-81b8-4716-b812-3ff8d4d7b6cb', 'name': 'litellm-acompletion', 'startTime': datetime.datetime(2024, 7, 6, 17, 49, 57, 251265), 'metadata': {'litellm_response_cost': 5.4999999999999995e-05, 'cache_hit': False}, 'input': {'messages': [{'role': 'user', 'content': 'This is a test'}]}, 'output': {'content': 'This is a test response', 'role': 'assistant'}, 'level': , 'id': 'time-17-49-57-251265_chatcmpl-963c5062-9c99-4bd7-9bd2-7ff039f129e5', 'endTime': datetime.datetime(2024, 7, 6, 17, 49, 57, 251932), 'completionStartTime': datetime.datetime(2024, 7, 6, 17, 49, 57, 251932), 'model': 'gpt-3.5-turbo', 'modelParameters': {'temperature': '0.7', 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'}, 'usage': {'input': 10, 'output': 20, 'unit': , 'totalCost': 5.4999999999999995e-05}}, 'timestamp': datetime.datetime(2024, 7, 7, 0, 49, 57, 253374, tzinfo=datetime.timezone.utc)}], 'metadata': {'batch_size': 2, 'sdk_integration': 'default', 'sdk_name': 'python', 'sdk_version': '2.32.0', 'public_key': 'pk-lf-b3db7e8e-c2f6-4fc7-825c-a541a8fbe003'}} +making request: {"batch": [{"id": "6b7f45eb-d322-4614-9611-10b6325982da", "type": "trace-create", "body": {"id": "litellm-test-f2b16f5b-81b8-4716-b812-3ff8d4d7b6cb", "timestamp": "2024-07-07T00:49:57.252490Z", "name": "litellm-acompletion", "userId": "langfuse_latency_test_user", "input": {"messages": [{"role": "user", "content": "This is a test"}]}, "output": {"content": "This is a test response", "role": "assistant"}, "tags": []}, "timestamp": "2024-07-07T00:49:57.252967Z"}, {"id": "7db8d145-bb3f-4e64-958b-0e73ea777d6d", "type": "generation-create", "body": {"traceId": "litellm-test-f2b16f5b-81b8-4716-b812-3ff8d4d7b6cb", "name": "litellm-acompletion", "startTime": "2024-07-06T17:49:57.251265-07:00", "metadata": {"litellm_response_cost": 5.4999999999999995e-05, "cache_hit": false}, "input": {"messages": [{"role": "user", "content": "This is a test"}]}, "output": {"content": "This is a test response", "role": "assistant"}, "level": "DEFAULT", "id": "time-17-49-57-251265_chatcmpl-963c5062-9c99-4bd7-9bd2-7ff039f129e5", "endTime": "2024-07-06T17:49:57.251932-07:00", "completionStartTime": "2024-07-06T17:49:57.251932-07:00", "model": "gpt-3.5-turbo", "modelParameters": {"temperature": "0.7", "max_tokens": 5, "user": "langfuse_latency_test_user", "extra_body": "{}"}, "usage": {"input": 10, "output": 20, "unit": "TOKENS", "totalCost": 5.4999999999999995e-05}}, "timestamp": "2024-07-07T00:49:57.253374Z"}], "metadata": {"batch_size": 2, "sdk_integration": "default", "sdk_name": "python", "sdk_version": "2.32.0", "public_key": "pk-lf-b3db7e8e-c2f6-4fc7-825c-a541a8fbe003"}} to https://us.cloud.langfuse.com/api/public/ingestion ~0 items in the Langfuse queue -~0 items in the Langfuse queue -received response: {"errors":[],"successes":[{"id":"696d738d-b46a-418f-be31-049e9add4bd8","status":201},{"id":"caf378b4-ae86-4a74-a7ac-2f9a83ed9d67","status":201}]} +received response: {"errors":[],"successes":[{"id":"6b7f45eb-d322-4614-9611-10b6325982da","status":201},{"id":"7db8d145-bb3f-4e64-958b-0e73ea777d6d","status":201}]} successfully uploaded batch of 2 items ~0 items in the Langfuse queue ~0 items in the Langfuse queue @@ -145,9 +143,11 @@ successfully uploaded batch of 2 items ~0 items in the Langfuse queue ~0 items in the Langfuse queue ~0 items in the Langfuse queue -Getting trace litellm-test-2a7ed10d-b0aa-41c3-874e-adb2e128a9a6 ~0 items in the Langfuse queue -Getting observations... None, None, None, None, litellm-test-2a7ed10d-b0aa-41c3-874e-adb2e128a9a6, None, GENERATION +Getting trace litellm-test-f2b16f5b-81b8-4716-b812-3ff8d4d7b6cb +~0 items in the Langfuse queue +~0 items in the Langfuse queue +Getting observations... None, None, None, None, litellm-test-f2b16f5b-81b8-4716-b812-3ff8d4d7b6cb, None, GENERATION ~0 items in the Langfuse queue joining 1 consumer threads ~0 items in the Langfuse queue @@ -157,7 +157,6 @@ joining 1 consumer threads consumer thread 0 joined joining 1 consumer threads ~0 items in the Langfuse queue -~0 items in the Langfuse queue consumer thread 0 joined joining 1 consumer threads ~0 items in the Langfuse queue diff --git a/litellm/tests/test_presidio_masking.py b/litellm/tests/test_presidio_masking.py index 2b02ea298..382644016 100644 --- a/litellm/tests/test_presidio_masking.py +++ b/litellm/tests/test_presidio_masking.py @@ -26,23 +26,31 @@ from litellm.proxy.hooks.presidio_pii_masking import _OPTIONAL_PresidioPIIMaskin from litellm.proxy.utils import ProxyLogging -def test_validate_environment_missing_http(): +@pytest.mark.parametrize( + "base_url", + [ + "presidio-analyzer-s3pa:10000", + "https://presidio-analyzer-s3pa:10000", + "http://presidio-analyzer-s3pa:10000", + ], +) +def test_validate_environment_missing_http(base_url): pii_masking = _OPTIONAL_PresidioPIIMasking(mock_testing=True) - os.environ["PRESIDIO_ANALYZER_API_BASE"] = "presidio-analyzer-s3pa:10000/analyze" - os.environ["PRESIDIO_ANONYMIZER_API_BASE"] = ( - "presidio-analyzer-s3pa:10000/anonymize" - ) + os.environ["PRESIDIO_ANALYZER_API_BASE"] = f"{base_url}/analyze" + os.environ["PRESIDIO_ANONYMIZER_API_BASE"] = f"{base_url}/anonymize" pii_masking.validate_environment() + expected_url = base_url + if not (base_url.startswith("https://") or base_url.startswith("http://")): + expected_url = "http://" + base_url + assert ( - pii_masking.presidio_anonymizer_api_base - == "http://presidio-analyzer-s3pa:10000/anonymize/" - ) - assert ( - pii_masking.presidio_analyzer_api_base - == "http://presidio-analyzer-s3pa:10000/analyze/" + pii_masking.presidio_anonymizer_api_base == f"{expected_url}/anonymize/" + ), "Got={}, Expected={}".format( + pii_masking.presidio_anonymizer_api_base, f"{expected_url}/anonymize/" ) + assert pii_masking.presidio_analyzer_api_base == f"{expected_url}/analyze/" @pytest.mark.asyncio