From 872470ff1f301cc0cc0b1c70785c806ba6af5388 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Tue, 7 May 2024 12:58:49 -0700 Subject: [PATCH 1/2] feat(slack_alerting.py): reintegrate langfuse trace url for slack alerts this ensures langfuse trace url returned in llm api exception err --- litellm/integrations/langfuse.py | 37 ++++++++++------- litellm/integrations/slack_alerting.py | 35 ++++++++++------ litellm/proxy/_super_secret_config.yaml | 5 ++- litellm/proxy/proxy_server.py | 53 +++++++++++++++++-------- litellm/proxy/utils.py | 19 ++++++++- 5 files changed, 101 insertions(+), 48 deletions(-) diff --git a/litellm/integrations/langfuse.py b/litellm/integrations/langfuse.py index fa8b0c61d..caf5437b2 100644 --- a/litellm/integrations/langfuse.py +++ b/litellm/integrations/langfuse.py @@ -262,7 +262,7 @@ class LangFuseLogger: try: tags = [] - metadata = copy.deepcopy(metadata) # Avoid modifying the original metadata + metadata = copy.deepcopy(metadata) # Avoid modifying the original metadata supports_tags = Version(langfuse.version.__version__) >= Version("2.6.3") supports_prompt = Version(langfuse.version.__version__) >= Version("2.7.3") supports_costs = Version(langfuse.version.__version__) >= Version("2.7.3") @@ -276,7 +276,6 @@ class LangFuseLogger: metadata_tags = metadata.pop("tags", []) tags = metadata_tags - # Clean Metadata before logging - never log raw metadata # the raw metadata can contain circular references which leads to infinite recursion # we clean out all extra litellm metadata params before logging @@ -303,18 +302,17 @@ class LangFuseLogger: else: clean_metadata[key] = value - session_id = clean_metadata.pop("session_id", None) trace_name = clean_metadata.pop("trace_name", None) trace_id = clean_metadata.pop("trace_id", None) existing_trace_id = clean_metadata.pop("existing_trace_id", None) update_trace_keys = clean_metadata.pop("update_trace_keys", []) - + if trace_name is None and existing_trace_id is None: # just log `litellm-{call_type}` as the trace name ## DO NOT SET TRACE_NAME if trace-id set. this can lead to overwriting of past traces. trace_name = f"litellm-{kwargs.get('call_type', 'completion')}" - + if existing_trace_id is not None: trace_params = {"id": existing_trace_id} @@ -322,15 +320,18 @@ class LangFuseLogger: for metadata_param_key in update_trace_keys: trace_param_key = metadata_param_key.replace("trace_", "") if trace_param_key not in trace_params: - updated_trace_value = clean_metadata.pop(metadata_param_key, None) + updated_trace_value = clean_metadata.pop( + metadata_param_key, None + ) if updated_trace_value is not None: trace_params[trace_param_key] = updated_trace_value - # Pop the trace specific keys that would have been popped if there were a new trace - for key in list(filter(lambda key: key.startswith("trace_"), clean_metadata.keys())): + for key in list( + filter(lambda key: key.startswith("trace_"), clean_metadata.keys()) + ): clean_metadata.pop(key, None) - + # Special keys that are found in the function arguments and not the metadata if "input" in update_trace_keys: trace_params["input"] = input @@ -342,16 +343,22 @@ class LangFuseLogger: "name": trace_name, "session_id": session_id, "input": input, - "version": clean_metadata.pop("trace_version", clean_metadata.get("version", None)), # If provided just version, it will applied to the trace as well, if applied a trace version it will take precedence + "version": clean_metadata.pop( + "trace_version", clean_metadata.get("version", None) + ), # If provided just version, it will applied to the trace as well, if applied a trace version it will take precedence } - for key in list(filter(lambda key: key.startswith("trace_"), clean_metadata.keys())): - trace_params[key.replace("trace_", "")] = clean_metadata.pop(key, None) - + for key in list( + filter(lambda key: key.startswith("trace_"), clean_metadata.keys()) + ): + trace_params[key.replace("trace_", "")] = clean_metadata.pop( + key, None + ) + if level == "ERROR": trace_params["status_message"] = output else: trace_params["output"] = output - + cost = kwargs.get("response_cost", None) print_verbose(f"trace: {cost}") @@ -454,7 +461,7 @@ class LangFuseLogger: ) generation_client = trace.generation(**generation_params) - + return generation_client.trace_id, generation_id except Exception as e: verbose_logger.debug(f"Langfuse Layer Error - {traceback.format_exc()}") diff --git a/litellm/integrations/slack_alerting.py b/litellm/integrations/slack_alerting.py index 5546f7c33..f033d99cd 100644 --- a/litellm/integrations/slack_alerting.py +++ b/litellm/integrations/slack_alerting.py @@ -149,16 +149,21 @@ class SlackAlerting(CustomLogger): def _add_langfuse_trace_id_to_alert( self, - request_info: str, request_data: Optional[dict] = None, - kwargs: Optional[dict] = None, - type: Literal["hanging_request", "slow_response"] = "hanging_request", - start_time: Optional[datetime.datetime] = None, - end_time: Optional[datetime.datetime] = None, - ): + ) -> Optional[str]: + """ + Returns langfuse trace url + """ # do nothing for now - pass - return request_info + if ( + request_data is not None + and request_data.get("metadata", {}).get("trace_id", None) is not None + ): + trace_id = request_data["metadata"]["trace_id"] + if litellm.utils.langFuseLogger is not None: + base_url = litellm.utils.langFuseLogger.Langfuse.base_url + return f"{base_url}/trace/{trace_id}" + return None def _response_taking_too_long_callback_helper( self, @@ -501,14 +506,13 @@ class SlackAlerting(CustomLogger): ) if "langfuse" in litellm.success_callback: - request_info = self._add_langfuse_trace_id_to_alert( - request_info=request_info, + langfuse_url = self._add_langfuse_trace_id_to_alert( request_data=request_data, - type="hanging_request", - start_time=start_time, - end_time=end_time, ) + if langfuse_url is not None: + request_info += "\n🪢 Langfuse Trace: {}".format(langfuse_url) + # add deployment latencies to alert _deployment_latency_map = self._get_deployment_latencies_to_alert( metadata=request_data.get("metadata", {}) @@ -701,6 +705,7 @@ Model Info: "daily_reports", "new_model_added", ], + **kwargs, ): """ Alerting based on thresholds: - https://github.com/BerriAI/litellm/issues/1298 @@ -731,6 +736,10 @@ Model Info: formatted_message = ( f"Level: `{level}`\nTimestamp: `{current_time}`\n\nMessage: {message}" ) + + if kwargs: + for key, value in kwargs.items(): + formatted_message += f"\n\n{key}: `{value}`\n\n" if _proxy_base_url is not None: formatted_message += f"\n\nProxy URL: `{_proxy_base_url}`" diff --git a/litellm/proxy/_super_secret_config.yaml b/litellm/proxy/_super_secret_config.yaml index 0475508e3..b7293a17f 100644 --- a/litellm/proxy/_super_secret_config.yaml +++ b/litellm/proxy/_super_secret_config.yaml @@ -14,6 +14,9 @@ model_list: api_key: my-fake-key-3 model: openai/my-fake-model-3 model_name: fake-openai-endpoint +- model_name: gpt-4 + litellm_params: + model: gpt-3.5-turbo router_settings: num_retries: 0 enable_pre_call_checks: true @@ -25,7 +28,7 @@ router_settings: routing_strategy: "latency-based-routing" litellm_settings: - success_callback: ["openmeter"] + success_callback: ["langfuse"] general_settings: alerting: ["slack"] diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index c22b381e2..d6d27fc4c 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -3160,7 +3160,9 @@ def data_generator(response): yield f"data: {json.dumps(chunk)}\n\n" -async def async_data_generator(response, user_api_key_dict): +async def async_data_generator( + response, user_api_key_dict: UserAPIKeyAuth, request_data: dict +): verbose_proxy_logger.debug("inside generator") try: start_time = time.time() @@ -3177,7 +3179,9 @@ async def async_data_generator(response, user_api_key_dict): except Exception as e: traceback.print_exc() await proxy_logging_obj.post_call_failure_hook( - user_api_key_dict=user_api_key_dict, original_exception=e + user_api_key_dict=user_api_key_dict, + original_exception=e, + request_data=request_data, ) verbose_proxy_logger.debug( f"\033[1;31mAn error occurred: {e}\n\n Debug this by setting `--debug`, e.g. `litellm --model gpt-3.5-turbo --debug`" @@ -3202,8 +3206,14 @@ async def async_data_generator(response, user_api_key_dict): yield f"data: {error_returned}\n\n" -def select_data_generator(response, user_api_key_dict): - return async_data_generator(response=response, user_api_key_dict=user_api_key_dict) +def select_data_generator( + response, user_api_key_dict: UserAPIKeyAuth, request_data: dict +): + return async_data_generator( + response=response, + user_api_key_dict=user_api_key_dict, + request_data=request_data, + ) def get_litellm_model_info(model: dict = {}): @@ -3496,9 +3506,8 @@ async def chat_completion( user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth), ): global general_settings, user_debug, proxy_logging_obj, llm_model_list + data = {} try: - # async with llm_router.sem - data = {} body = await request.body() body_str = body.decode() try: @@ -3689,7 +3698,9 @@ async def chat_completion( "x-litellm-model-api-base": api_base, } selected_data_generator = select_data_generator( - response=response, user_api_key_dict=user_api_key_dict + response=response, + user_api_key_dict=user_api_key_dict, + request_data=data, ) return StreamingResponse( selected_data_generator, @@ -3711,7 +3722,7 @@ async def chat_completion( data["litellm_status"] = "fail" # used for alerting traceback.print_exc() await proxy_logging_obj.post_call_failure_hook( - user_api_key_dict=user_api_key_dict, original_exception=e + user_api_key_dict=user_api_key_dict, original_exception=e, request_data=data ) verbose_proxy_logger.debug( f"\033[1;31mAn error occurred: {e}\n\n Debug this by setting `--debug`, e.g. `litellm --model gpt-3.5-turbo --debug`" @@ -3873,7 +3884,9 @@ async def completion( "x-litellm-model-id": model_id, } selected_data_generator = select_data_generator( - response=response, user_api_key_dict=user_api_key_dict + response=response, + user_api_key_dict=user_api_key_dict, + request_data=data, ) return StreamingResponse( @@ -3926,6 +3939,7 @@ async def embeddings( user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth), ): global proxy_logging_obj + data: Any = {} try: # Use orjson to parse JSON data, orjson speeds up requests significantly body = await request.body() @@ -4071,7 +4085,7 @@ async def embeddings( except Exception as e: data["litellm_status"] = "fail" # used for alerting await proxy_logging_obj.post_call_failure_hook( - user_api_key_dict=user_api_key_dict, original_exception=e + user_api_key_dict=user_api_key_dict, original_exception=e, request_data=data ) traceback.print_exc() if isinstance(e, HTTPException): @@ -4108,6 +4122,7 @@ async def image_generation( user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth), ): global proxy_logging_obj + data = {} try: # Use orjson to parse JSON data, orjson speeds up requests significantly body = await request.body() @@ -4227,7 +4242,7 @@ async def image_generation( except Exception as e: data["litellm_status"] = "fail" # used for alerting await proxy_logging_obj.post_call_failure_hook( - user_api_key_dict=user_api_key_dict, original_exception=e + user_api_key_dict=user_api_key_dict, original_exception=e, request_data=data ) traceback.print_exc() if isinstance(e, HTTPException): @@ -4268,10 +4283,11 @@ async def audio_transcriptions( https://platform.openai.com/docs/api-reference/audio/createTranscription?lang=curl """ global proxy_logging_obj + data: Dict = {} try: # Use orjson to parse JSON data, orjson speeds up requests significantly form_data = await request.form() - data: Dict = {key: value for key, value in form_data.items() if key != "file"} + data = {key: value for key, value in form_data.items() if key != "file"} # Include original request and headers in the data data["proxy_server_request"] = { # type: ignore @@ -4406,7 +4422,7 @@ async def audio_transcriptions( except Exception as e: data["litellm_status"] = "fail" # used for alerting await proxy_logging_obj.post_call_failure_hook( - user_api_key_dict=user_api_key_dict, original_exception=e + user_api_key_dict=user_api_key_dict, original_exception=e, request_data=data ) traceback.print_exc() if isinstance(e, HTTPException): @@ -4455,6 +4471,7 @@ async def moderations( ``` """ global proxy_logging_obj + data: Dict = {} try: # Use orjson to parse JSON data, orjson speeds up requests significantly body = await request.body() @@ -4568,7 +4585,7 @@ async def moderations( except Exception as e: data["litellm_status"] = "fail" # used for alerting await proxy_logging_obj.post_call_failure_hook( - user_api_key_dict=user_api_key_dict, original_exception=e + user_api_key_dict=user_api_key_dict, original_exception=e, request_data=data ) traceback.print_exc() if isinstance(e, HTTPException): @@ -7999,8 +8016,8 @@ async def async_queue_request( Now using a FastAPI background task + /chat/completions compatible endpoint """ + data = {} try: - data = {} data = await request.json() # type: ignore # Include original request and headers in the data @@ -8065,7 +8082,9 @@ async def async_queue_request( ): # use generate_responses to stream responses return StreamingResponse( async_data_generator( - user_api_key_dict=user_api_key_dict, response=response + user_api_key_dict=user_api_key_dict, + response=response, + request_data=data, ), media_type="text/event-stream", ) @@ -8073,7 +8092,7 @@ async def async_queue_request( return response except Exception as e: await proxy_logging_obj.post_call_failure_hook( - user_api_key_dict=user_api_key_dict, original_exception=e + user_api_key_dict=user_api_key_dict, original_exception=e, request_data=data ) if isinstance(e, HTTPException): raise ProxyException( diff --git a/litellm/proxy/utils.py b/litellm/proxy/utils.py index 9734806df..0379d5152 100644 --- a/litellm/proxy/utils.py +++ b/litellm/proxy/utils.py @@ -302,6 +302,7 @@ class ProxyLogging: "budget_alerts", "db_exceptions", ], + request_data: Optional[dict] = None, ): """ Alerting based on thresholds: - https://github.com/BerriAI/litellm/issues/1298 @@ -331,10 +332,19 @@ class ProxyLogging: if _proxy_base_url is not None: formatted_message += f"\n\nProxy URL: `{_proxy_base_url}`" + extra_kwargs = {} + if request_data is not None: + _url = self.slack_alerting_instance._add_langfuse_trace_id_to_alert( + request_data=request_data + ) + if _url is not None: + extra_kwargs["🪢 Langfuse Trace"] = _url + formatted_message += "\n\n🪢 Langfuse Trace: {}".format(_url) + for client in self.alerting: if client == "slack": await self.slack_alerting_instance.send_alert( - message=message, level=level, alert_type=alert_type + message=message, level=level, alert_type=alert_type, **extra_kwargs ) elif client == "sentry": if litellm.utils.sentry_sdk_instance is not None: @@ -369,6 +379,7 @@ class ProxyLogging: message=f"DB read/write call failed: {error_message}", level="High", alert_type="db_exceptions", + request_data={}, ) ) @@ -384,7 +395,10 @@ class ProxyLogging: litellm.utils.capture_exception(error=original_exception) async def post_call_failure_hook( - self, original_exception: Exception, user_api_key_dict: UserAPIKeyAuth + self, + original_exception: Exception, + user_api_key_dict: UserAPIKeyAuth, + request_data: dict, ): """ Allows users to raise custom exceptions/log when a call fails, without having to deal with parsing Request body. @@ -409,6 +423,7 @@ class ProxyLogging: message=f"LLM API call failed: {str(original_exception)}", level="High", alert_type="llm_exceptions", + request_data=request_data, ) ) From e85468badbb7873097a300fa49064b06146c72b2 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Tue, 7 May 2024 13:12:06 -0700 Subject: [PATCH 2/2] test: fix linting error --- litellm/tests/langfuse.log | 137 +++++++++++++++----------------- litellm/tests/test_alangfuse.py | 116 +++++++++++++++++++-------- 2 files changed, 146 insertions(+), 107 deletions(-) diff --git a/litellm/tests/langfuse.log b/litellm/tests/langfuse.log index f47590a29..61bc6ada5 100644 --- a/litellm/tests/langfuse.log +++ b/litellm/tests/langfuse.log @@ -1,79 +1,68 @@ -int() argument must be a string, a bytes-like object or a real number, not 'NoneType' -Traceback (most recent call last): - File "/opt/homebrew/lib/python3.11/site-packages/langfuse/client.py", line 778, in generation - "usage": _convert_usage_input(usage) if usage is not None else None, - ^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/opt/homebrew/lib/python3.11/site-packages/langfuse/utils.py", line 77, in _convert_usage_input - "totalCost": extract_by_priority(usage, ["totalCost", "total_cost"]), - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/opt/homebrew/lib/python3.11/site-packages/langfuse/utils.py", line 32, in extract_by_priority - return int(usage[key]) - ^^^^^^^^^^^^^^^ -TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType' -int() argument must be a string, a bytes-like object or a real number, not 'NoneType' -Traceback (most recent call last): - File "/opt/homebrew/lib/python3.11/site-packages/langfuse/client.py", line 778, in generation - "usage": _convert_usage_input(usage) if usage is not None else None, - ^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/opt/homebrew/lib/python3.11/site-packages/langfuse/utils.py", line 77, in _convert_usage_input - "totalCost": extract_by_priority(usage, ["totalCost", "total_cost"]), - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/opt/homebrew/lib/python3.11/site-packages/langfuse/utils.py", line 32, in extract_by_priority - return int(usage[key]) - ^^^^^^^^^^^^^^^ -TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType' -int() argument must be a string, a bytes-like object or a real number, not 'NoneType' -Traceback (most recent call last): - File "/opt/homebrew/lib/python3.11/site-packages/langfuse/client.py", line 778, in generation - "usage": _convert_usage_input(usage) if usage is not None else None, - ^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/opt/homebrew/lib/python3.11/site-packages/langfuse/utils.py", line 77, in _convert_usage_input - "totalCost": extract_by_priority(usage, ["totalCost", "total_cost"]), - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/opt/homebrew/lib/python3.11/site-packages/langfuse/utils.py", line 32, in extract_by_priority - return int(usage[key]) - ^^^^^^^^^^^^^^^ -TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType' -int() argument must be a string, a bytes-like object or a real number, not 'NoneType' -Traceback (most recent call last): - File "/opt/homebrew/lib/python3.11/site-packages/langfuse/client.py", line 778, in generation - "usage": _convert_usage_input(usage) if usage is not None else None, - ^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/opt/homebrew/lib/python3.11/site-packages/langfuse/utils.py", line 77, in _convert_usage_input - "totalCost": extract_by_priority(usage, ["totalCost", "total_cost"]), - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/opt/homebrew/lib/python3.11/site-packages/langfuse/utils.py", line 32, in extract_by_priority - return int(usage[key]) - ^^^^^^^^^^^^^^^ -TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType' -int() argument must be a string, a bytes-like object or a real number, not 'NoneType' -Traceback (most recent call last): - File "/opt/homebrew/lib/python3.11/site-packages/langfuse/client.py", line 778, in generation - "usage": _convert_usage_input(usage) if usage is not None else None, - ^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/opt/homebrew/lib/python3.11/site-packages/langfuse/utils.py", line 77, in _convert_usage_input - "totalCost": extract_by_priority(usage, ["totalCost", "total_cost"]), - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/opt/homebrew/lib/python3.11/site-packages/langfuse/utils.py", line 32, in extract_by_priority - return int(usage[key]) - ^^^^^^^^^^^^^^^ -TypeError: int() argument must be a string, a bytes-like object or a real number, not 'NoneType' consumer is running... -Getting observations... None, None, None, None, litellm-test-98e1cc75-bef8-4280-a2b9-e08633b81acd, None, GENERATION +Creating trace id='litellm-test-trace1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5' timestamp=datetime.datetime(2024, 5, 7, 20, 11, 22, 420643, tzinfo=datetime.timezone.utc) name='litellm-test-trace1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5' user_id='litellm-test-user1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5' input={'messages': [{'role': 'user', 'content': 'litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace1-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5'}]} output={'content': 'litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace1-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'role': 'assistant'} session_id='litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5' release='litellm-test-release' version='litellm-test-version' metadata={'trace_actual_metadata_key': 'trace_actual_metadata_value', 'generation_id': 'litellm-test-trace1-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5'} tags=['litellm-test-tag1', 'litellm-test-tag2', 'cache_hit:False'] public=None +adding task {'id': '9d380abe-bb42-480b-b48f-952ed6776e1c', 'type': 'trace-create', 'body': {'id': 'litellm-test-trace1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'timestamp': datetime.datetime(2024, 5, 7, 20, 11, 22, 420643, tzinfo=datetime.timezone.utc), 'name': 'litellm-test-trace1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'userId': 'litellm-test-user1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'input': {'messages': [{'role': 'user', 'content': 'litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace1-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5'}]}, 'output': {'content': 'litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace1-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'role': 'assistant'}, 'sessionId': 'litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'release': 'litellm-test-release', 'version': 'litellm-test-version', 'metadata': {'trace_actual_metadata_key': 'trace_actual_metadata_value', 'generation_id': 'litellm-test-trace1-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5'}, 'tags': ['litellm-test-tag1', 'litellm-test-tag2', 'cache_hit:False']}} +Creating generation trace_id='litellm-test-trace1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5' name='litellm-test-trace1-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5' start_time=datetime.datetime(2024, 5, 7, 13, 11, 22, 419075) metadata={'gen_metadata_key': 'gen_metadata_value', 'cache_hit': False} input={'messages': [{'role': 'user', 'content': 'litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace1-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5'}]} output={'content': 'litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace1-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'role': 'assistant'} level= status_message=None parent_observation_id=None version='litellm-test-version' id='litellm-test-trace1-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5' end_time=datetime.datetime(2024, 5, 7, 13, 11, 22, 419879) completion_start_time=datetime.datetime(2024, 5, 7, 13, 11, 22, 419879) model='gpt-3.5-turbo' model_parameters={'temperature': '0.2', 'max_tokens': 100, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'} usage=Usage(input=10, output=20, total=None, unit=, input_cost=None, output_cost=None, total_cost=5.4999999999999995e-05) prompt_name=None prompt_version=None... +item size 1224 +adding task {'id': '0d3ae4f8-e352-4acd-98ec-d21be7e8f5eb', 'type': 'generation-create', 'body': {'traceId': 'litellm-test-trace1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'name': 'litellm-test-trace1-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'startTime': datetime.datetime(2024, 5, 7, 13, 11, 22, 419075), 'metadata': {'gen_metadata_key': 'gen_metadata_value', 'cache_hit': False}, 'input': {'messages': [{'role': 'user', 'content': 'litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace1-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5'}]}, 'output': {'content': 'litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace1-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'role': 'assistant'}, 'level': , 'version': 'litellm-test-version', 'id': 'litellm-test-trace1-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'endTime': datetime.datetime(2024, 5, 7, 13, 11, 22, 419879), 'completionStartTime': datetime.datetime(2024, 5, 7, 13, 11, 22, 419879), 'model': 'gpt-3.5-turbo', 'modelParameters': {'temperature': '0.2', 'max_tokens': 100, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'}, 'usage': {'input': 10, 'output': 20, 'unit': , 'totalCost': 5.4999999999999995e-05}}} +item size 1359 +Creating trace id='litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5' timestamp=datetime.datetime(2024, 5, 7, 20, 11, 22, 423093, tzinfo=datetime.timezone.utc) name='litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5' user_id='litellm-test-user1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5' input={'messages': [{'role': 'user', 'content': 'litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5'}]} output={'content': 'litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'role': 'assistant'} session_id='litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5' release='litellm-test-release' version='litellm-test-version' metadata={'trace_actual_metadata_key': 'trace_actual_metadata_value', 'generation_id': 'litellm-test-trace2-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5'} tags=['litellm-test-tag1', 'litellm-test-tag2', 'cache_hit:False'] public=None +adding task {'id': '1b34abb5-4a24-4042-a8c3-9f3ea0254f2b', 'type': 'trace-create', 'body': {'id': 'litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'timestamp': datetime.datetime(2024, 5, 7, 20, 11, 22, 423093, tzinfo=datetime.timezone.utc), 'name': 'litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'userId': 'litellm-test-user1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'input': {'messages': [{'role': 'user', 'content': 'litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5'}]}, 'output': {'content': 'litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'role': 'assistant'}, 'sessionId': 'litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'release': 'litellm-test-release', 'version': 'litellm-test-version', 'metadata': {'trace_actual_metadata_key': 'trace_actual_metadata_value', 'generation_id': 'litellm-test-trace2-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5'}, 'tags': ['litellm-test-tag1', 'litellm-test-tag2', 'cache_hit:False']}} +Creating generation trace_id='litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5' name='litellm-test-trace2-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5' start_time=datetime.datetime(2024, 5, 7, 13, 11, 22, 421978) metadata={'gen_metadata_key': 'gen_metadata_value', 'cache_hit': False} input={'messages': [{'role': 'user', 'content': 'litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5'}]} output={'content': 'litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'role': 'assistant'} level= status_message=None parent_observation_id=None version='litellm-test-version' id='litellm-test-trace2-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5' end_time=datetime.datetime(2024, 5, 7, 13, 11, 22, 422551) completion_start_time=datetime.datetime(2024, 5, 7, 13, 11, 22, 422551) model='gpt-3.5-turbo' model_parameters={'temperature': '0.2', 'max_tokens': 100, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'} usage=Usage(input=10, output=20, total=None, unit=, input_cost=None, output_cost=None, total_cost=5.4999999999999995e-05) prompt_name=None prompt_version=None... +item size 1224 +adding task {'id': '050ba9cd-3eff-443b-9637-705406ceb8cb', 'type': 'generation-create', 'body': {'traceId': 'litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'name': 'litellm-test-trace2-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'startTime': datetime.datetime(2024, 5, 7, 13, 11, 22, 421978), 'metadata': {'gen_metadata_key': 'gen_metadata_value', 'cache_hit': False}, 'input': {'messages': [{'role': 'user', 'content': 'litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5'}]}, 'output': {'content': 'litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'role': 'assistant'}, 'level': , 'version': 'litellm-test-version', 'id': 'litellm-test-trace2-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'endTime': datetime.datetime(2024, 5, 7, 13, 11, 22, 422551), 'completionStartTime': datetime.datetime(2024, 5, 7, 13, 11, 22, 422551), 'model': 'gpt-3.5-turbo', 'modelParameters': {'temperature': '0.2', 'max_tokens': 100, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'}, 'usage': {'input': 10, 'output': 20, 'unit': , 'totalCost': 5.4999999999999995e-05}}} +item size 1359 +flushing queue +successfully flushed about 0 items. +Creating trace id='litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5' timestamp=datetime.datetime(2024, 5, 7, 20, 11, 22, 425221, tzinfo=datetime.timezone.utc) name=None user_id=None input=None output={'content': 'litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-generation-2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'role': 'assistant'} session_id=None release=None version=None metadata={'trace_actual_metadata_key': 'trace_actual_metadata_value', 'generation_id': 'litellm-test-trace2-generation-2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5'} tags=None public=None +adding task {'id': 'd5173131-5bef-46cd-aa5a-6dcd01f6c000', 'type': 'trace-create', 'body': {'id': 'litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'timestamp': datetime.datetime(2024, 5, 7, 20, 11, 22, 425221, tzinfo=datetime.timezone.utc), 'output': {'content': 'litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-generation-2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'role': 'assistant'}, 'metadata': {'trace_actual_metadata_key': 'trace_actual_metadata_value', 'generation_id': 'litellm-test-trace2-generation-2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5'}}} +Creating generation trace_id='litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5' name='litellm-test-trace2-generation-2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5' start_time=datetime.datetime(2024, 5, 7, 13, 11, 22, 424075) metadata={'gen_metadata_key': 'gen_metadata_value', 'cache_hit': False} input={'messages': [{'role': 'user', 'content': 'litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-generation-2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5'}]} output={'content': 'litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-generation-2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'role': 'assistant'} level= status_message=None parent_observation_id=None version='litellm-test-version' id='litellm-test-trace2-generation-2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5' end_time=datetime.datetime(2024, 5, 7, 13, 11, 22, 424526) completion_start_time=datetime.datetime(2024, 5, 7, 13, 11, 22, 424526) model='gpt-3.5-turbo' model_parameters={'temperature': '0.2', 'max_tokens': 100, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'} usage=Usage(input=10, output=20, total=None, unit=, input_cost=None, output_cost=None, total_cost=5.4999999999999995e-05) prompt_name=None prompt_version=None... +item size 630 +adding task {'id': 'ed61fc8d-aede-4c33-9ce8-377d498169f4', 'type': 'generation-create', 'body': {'traceId': 'litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'name': 'litellm-test-trace2-generation-2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'startTime': datetime.datetime(2024, 5, 7, 13, 11, 22, 424075), 'metadata': {'gen_metadata_key': 'gen_metadata_value', 'cache_hit': False}, 'input': {'messages': [{'role': 'user', 'content': 'litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-generation-2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5'}]}, 'output': {'content': 'litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-generation-2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'role': 'assistant'}, 'level': , 'version': 'litellm-test-version', 'id': 'litellm-test-trace2-generation-2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'endTime': datetime.datetime(2024, 5, 7, 13, 11, 22, 424526), 'completionStartTime': datetime.datetime(2024, 5, 7, 13, 11, 22, 424526), 'model': 'gpt-3.5-turbo', 'modelParameters': {'temperature': '0.2', 'max_tokens': 100, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'}, 'usage': {'input': 10, 'output': 20, 'unit': , 'totalCost': 5.4999999999999995e-05}}} +uploading batch of 15 items +uploading data: {'batch': [{'id': 'e3840349-5e27-4921-84fc-f11ac428b7c5', 'type': 'trace-create', 'body': {'id': '77e94058-6f8a-43bc-97ef-1a8d4966592c', 'timestamp': datetime.datetime(2024, 5, 7, 20, 11, 22, 289521, tzinfo=datetime.timezone.utc), 'name': 'litellm-acompletion', 'input': {'messages': [{'role': 'user', 'content': 'This is a test'}]}, 'output': {'content': "It's simple to use and easy to get started", 'role': 'assistant'}, 'tags': ['cache_hit:False']}, 'timestamp': datetime.datetime(2024, 5, 7, 20, 11, 22, 289935, tzinfo=datetime.timezone.utc)}, {'id': '54036ec0-06ff-44d1-ac3a-f6085a3983ab', 'type': 'generation-create', 'body': {'traceId': '77e94058-6f8a-43bc-97ef-1a8d4966592c', 'name': 'litellm-acompletion', 'startTime': datetime.datetime(2024, 5, 7, 13, 11, 21, 970003), 'metadata': {'cache_hit': False}, 'input': {'messages': [{'role': 'user', 'content': 'This is a test'}]}, 'output': {'content': "It's simple to use and easy to get started", 'role': 'assistant'}, 'level': , 'id': 'time-13-11-21-970003_chatcmpl-30ccf23d-ac57-4183-ab2f-b93f084c4187', 'endTime': datetime.datetime(2024, 5, 7, 13, 11, 22, 286720), 'completionStartTime': datetime.datetime(2024, 5, 7, 13, 11, 22, 286720), 'model': 'chatgpt-v-2', 'modelParameters': {'temperature': '0.7', 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'}, 'usage': {'input': 10, 'output': 20, 'unit': }}, 'timestamp': datetime.datetime(2024, 5, 7, 20, 11, 22, 290909, tzinfo=datetime.timezone.utc)}, {'id': '4bf88864-4937-48a4-8e9b-b1cf6a29cc5c', 'type': 'trace-create', 'body': {'id': '04190fd5-8a1f-41d9-b8be-878945c35293', 'timestamp': datetime.datetime(2024, 5, 7, 20, 11, 22, 292743, tzinfo=datetime.timezone.utc), 'name': 'litellm-acompletion', 'input': {'messages': [{'role': 'user', 'content': 'This is a test'}]}, 'output': {'content': "It's simple to use and easy to get started", 'role': 'assistant'}, 'tags': ['cache_hit:False']}, 'timestamp': datetime.datetime(2024, 5, 7, 20, 11, 22, 292929, tzinfo=datetime.timezone.utc)}, {'id': '050a1ed2-b54e-46ab-9145-04baca33524e', 'type': 'generation-create', 'body': {'traceId': '04190fd5-8a1f-41d9-b8be-878945c35293', 'name': 'litellm-acompletion', 'startTime': datetime.datetime(2024, 5, 7, 13, 11, 22, 282826), 'metadata': {'cache_hit': False}, 'input': {'messages': [{'role': 'user', 'content': 'This is a test'}]}, 'output': {'content': "It's simple to use and easy to get started", 'role': 'assistant'}, 'level': , 'id': 'time-13-11-22-282826_chatcmpl-684e7a99-b0ad-43e3-a0e9-acbce76e5457', 'endTime': datetime.datetime(2024, 5, 7, 13, 11, 22, 288054), 'completionStartTime': datetime.datetime(2024, 5, 7, 13, 11, 22, 288054), 'model': 'chatgpt-v-2', 'modelParameters': {'temperature': '0.7', 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'}, 'usage': {'input': 10, 'output': 20, 'unit': }}, 'timestamp': datetime.datetime(2024, 5, 7, 20, 11, 22, 293730, tzinfo=datetime.timezone.utc)}, {'id': '94b80fdf-7df9-4b69-8500-df55a4748802', 'type': 'trace-create', 'body': {'id': '82588025-780b-4045-a131-06dcaf2c54ca', 'timestamp': datetime.datetime(2024, 5, 7, 20, 11, 22, 293803, tzinfo=datetime.timezone.utc), 'name': 'litellm-acompletion', 'input': {'messages': [{'role': 'user', 'content': 'This is a test'}]}, 'output': {'content': "It's simple to use and easy to get started", 'role': 'assistant'}, 'tags': ['cache_hit:False']}, 'timestamp': datetime.datetime(2024, 5, 7, 20, 11, 22, 293964, tzinfo=datetime.timezone.utc)}, {'id': '659db88e-6adc-4c52-82d8-dac517773242', 'type': 'generation-create', 'body': {'traceId': '82588025-780b-4045-a131-06dcaf2c54ca', 'name': 'litellm-acompletion', 'startTime': datetime.datetime(2024, 5, 7, 13, 11, 22, 280988), 'metadata': {'cache_hit': False}, 'input': {'messages': [{'role': 'user', 'content': 'This is a test'}]}, 'output': {'content': "It's simple to use and easy to get started", 'role': 'assistant'}, 'level': , 'id': 'time-13-11-22-280988_chatcmpl-4ecaabdd-be67-4122-a3bf-b95466ffee0a', 'endTime': datetime.datetime(2024, 5, 7, 13, 11, 22, 287168), 'completionStartTime': datetime.datetime(2024, 5, 7, 13, 11, 22, 287168), 'model': 'chatgpt-v-2', 'modelParameters': {'temperature': '0.7', 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'}, 'usage': {'input': 10, 'output': 20, 'unit': }}, 'timestamp': datetime.datetime(2024, 5, 7, 20, 11, 22, 294336, tzinfo=datetime.timezone.utc)}, {'id': '383b9518-93ff-4943-ae0c-b3256ee3c1a7', 'type': 'trace-create', 'body': {'id': 'fe18bb31-ded9-4ad2-8417-41e0e3de195c', 'timestamp': datetime.datetime(2024, 5, 7, 20, 11, 22, 295711, tzinfo=datetime.timezone.utc), 'name': 'litellm-acompletion', 'input': {'messages': [{'role': 'user', 'content': 'This is a test'}]}, 'output': {'content': "It's simple to use and easy to get started", 'role': 'assistant'}, 'tags': ['cache_hit:False']}, 'timestamp': datetime.datetime(2024, 5, 7, 20, 11, 22, 295870, tzinfo=datetime.timezone.utc)}, {'id': '127d6d13-e8b0-44f2-bba1-cc5d9710b0b4', 'type': 'generation-create', 'body': {'traceId': 'fe18bb31-ded9-4ad2-8417-41e0e3de195c', 'name': 'litellm-acompletion', 'startTime': datetime.datetime(2024, 5, 7, 13, 11, 22, 284370), 'metadata': {'cache_hit': False}, 'input': {'messages': [{'role': 'user', 'content': 'This is a test'}]}, 'output': {'content': "It's simple to use and easy to get started", 'role': 'assistant'}, 'level': , 'id': 'time-13-11-22-284370_chatcmpl-bf93ab8e-ecf2-4f04-9506-ef51a1c4c9d0', 'endTime': datetime.datetime(2024, 5, 7, 13, 11, 22, 288779), 'completionStartTime': datetime.datetime(2024, 5, 7, 13, 11, 22, 288779), 'model': 'chatgpt-v-2', 'modelParameters': {'temperature': '0.7', 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'}, 'usage': {'input': 10, 'output': 20, 'unit': }}, 'timestamp': datetime.datetime(2024, 5, 7, 20, 11, 22, 296237, tzinfo=datetime.timezone.utc)}, {'id': '2bc81d4d-f6a5-415b-acaa-feb883c41bbb', 'type': 'trace-create', 'body': {'id': '99b7014a-c3c0-4040-92ad-64a665ab6abe', 'timestamp': datetime.datetime(2024, 5, 7, 20, 11, 22, 297355, tzinfo=datetime.timezone.utc), 'name': 'litellm-acompletion', 'input': {'messages': [{'role': 'user', 'content': 'This is a test'}]}, 'output': {'content': "It's simple to use and easy to get started", 'role': 'assistant'}, 'tags': ['cache_hit:False']}, 'timestamp': datetime.datetime(2024, 5, 7, 20, 11, 22, 297502, tzinfo=datetime.timezone.utc)}, {'id': 'd51705a9-088a-4f49-b494-f4fa8e6bc59e', 'type': 'generation-create', 'body': {'traceId': '99b7014a-c3c0-4040-92ad-64a665ab6abe', 'name': 'litellm-acompletion', 'startTime': datetime.datetime(2024, 5, 7, 13, 11, 22, 285989), 'metadata': {'cache_hit': False}, 'input': {'messages': [{'role': 'user', 'content': 'This is a test'}]}, 'output': {'content': "It's simple to use and easy to get started", 'role': 'assistant'}, 'level': , 'id': 'time-13-11-22-285989_chatcmpl-1a3c46e4-d474-4d19-92d8-8a7ee7ac0799', 'endTime': datetime.datetime(2024, 5, 7, 13, 11, 22, 295600), 'completionStartTime': datetime.datetime(2024, 5, 7, 13, 11, 22, 295600), 'model': 'chatgpt-v-2', 'modelParameters': {'temperature': '0.7', 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'}, 'usage': {'input': 10, 'output': 20, 'unit': }}, 'timestamp': datetime.datetime(2024, 5, 7, 20, 11, 22, 297845, tzinfo=datetime.timezone.utc)}, {'id': '9d380abe-bb42-480b-b48f-952ed6776e1c', 'type': 'trace-create', 'body': {'id': 'litellm-test-trace1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'timestamp': datetime.datetime(2024, 5, 7, 20, 11, 22, 420643, tzinfo=datetime.timezone.utc), 'name': 'litellm-test-trace1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'userId': 'litellm-test-user1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'input': {'messages': [{'role': 'user', 'content': 'litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace1-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5'}]}, 'output': {'content': 'litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace1-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'role': 'assistant'}, 'sessionId': 'litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'release': 'litellm-test-release', 'version': 'litellm-test-version', 'metadata': {'trace_actual_metadata_key': 'trace_actual_metadata_value', 'generation_id': 'litellm-test-trace1-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5'}, 'tags': ['litellm-test-tag1', 'litellm-test-tag2', 'cache_hit:False']}, 'timestamp': datetime.datetime(2024, 5, 7, 20, 11, 22, 421233, tzinfo=datetime.timezone.utc)}, {'id': '0d3ae4f8-e352-4acd-98ec-d21be7e8f5eb', 'type': 'generation-create', 'body': {'traceId': 'litellm-test-trace1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'name': 'litellm-test-trace1-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'startTime': datetime.datetime(2024, 5, 7, 13, 11, 22, 419075), 'metadata': {'gen_metadata_key': 'gen_metadata_value', 'cache_hit': False}, 'input': {'messages': [{'role': 'user', 'content': 'litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace1-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5'}]}, 'output': {'content': 'litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace1-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'role': 'assistant'}, 'level': , 'version': 'litellm-test-version', 'id': 'litellm-test-trace1-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'endTime': datetime.datetime(2024, 5, 7, 13, 11, 22, 419879), 'completionStartTime': datetime.datetime(2024, 5, 7, 13, 11, 22, 419879), 'model': 'gpt-3.5-turbo', 'modelParameters': {'temperature': '0.2', 'max_tokens': 100, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'}, 'usage': {'input': 10, 'output': 20, 'unit': , 'totalCost': 5.4999999999999995e-05}}, 'timestamp': datetime.datetime(2024, 5, 7, 20, 11, 22, 421804, tzinfo=datetime.timezone.utc)}, {'id': '1b34abb5-4a24-4042-a8c3-9f3ea0254f2b', 'type': 'trace-create', 'body': {'id': 'litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'timestamp': datetime.datetime(2024, 5, 7, 20, 11, 22, 423093, tzinfo=datetime.timezone.utc), 'name': 'litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'userId': 'litellm-test-user1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'input': {'messages': [{'role': 'user', 'content': 'litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5'}]}, 'output': {'content': 'litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'role': 'assistant'}, 'sessionId': 'litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'release': 'litellm-test-release', 'version': 'litellm-test-version', 'metadata': {'trace_actual_metadata_key': 'trace_actual_metadata_value', 'generation_id': 'litellm-test-trace2-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5'}, 'tags': ['litellm-test-tag1', 'litellm-test-tag2', 'cache_hit:False']}, 'timestamp': datetime.datetime(2024, 5, 7, 20, 11, 22, 423311, tzinfo=datetime.timezone.utc)}, {'id': '050ba9cd-3eff-443b-9637-705406ceb8cb', 'type': 'generation-create', 'body': {'traceId': 'litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'name': 'litellm-test-trace2-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'startTime': datetime.datetime(2024, 5, 7, 13, 11, 22, 421978), 'metadata': {'gen_metadata_key': 'gen_metadata_value', 'cache_hit': False}, 'input': {'messages': [{'role': 'user', 'content': 'litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5'}]}, 'output': {'content': 'litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'role': 'assistant'}, 'level': , 'version': 'litellm-test-version', 'id': 'litellm-test-trace2-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'endTime': datetime.datetime(2024, 5, 7, 13, 11, 22, 422551), 'completionStartTime': datetime.datetime(2024, 5, 7, 13, 11, 22, 422551), 'model': 'gpt-3.5-turbo', 'modelParameters': {'temperature': '0.2', 'max_tokens': 100, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'}, 'usage': {'input': 10, 'output': 20, 'unit': , 'totalCost': 5.4999999999999995e-05}}, 'timestamp': datetime.datetime(2024, 5, 7, 20, 11, 22, 423829, tzinfo=datetime.timezone.utc)}, {'id': 'd5173131-5bef-46cd-aa5a-6dcd01f6c000', 'type': 'trace-create', 'body': {'id': 'litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'timestamp': datetime.datetime(2024, 5, 7, 20, 11, 22, 425221, tzinfo=datetime.timezone.utc), 'output': {'content': 'litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-generation-2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'role': 'assistant'}, 'metadata': {'trace_actual_metadata_key': 'trace_actual_metadata_value', 'generation_id': 'litellm-test-trace2-generation-2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5'}}, 'timestamp': datetime.datetime(2024, 5, 7, 20, 11, 22, 425370, tzinfo=datetime.timezone.utc)}], 'metadata': {'batch_size': 15, 'sdk_integration': 'litellm', 'sdk_name': 'python', 'sdk_version': '2.27.0', 'public_key': 'pk-lf-47ddd17f-c73c-4edd-b92a-b28835843e66'}} +making request: {"batch": [{"id": "e3840349-5e27-4921-84fc-f11ac428b7c5", "type": "trace-create", "body": {"id": "77e94058-6f8a-43bc-97ef-1a8d4966592c", "timestamp": "2024-05-07T20:11:22.289521Z", "name": "litellm-acompletion", "input": {"messages": [{"role": "user", "content": "This is a test"}]}, "output": {"content": "It's simple to use and easy to get started", "role": "assistant"}, "tags": ["cache_hit:False"]}, "timestamp": "2024-05-07T20:11:22.289935Z"}, {"id": "54036ec0-06ff-44d1-ac3a-f6085a3983ab", "type": "generation-create", "body": {"traceId": "77e94058-6f8a-43bc-97ef-1a8d4966592c", "name": "litellm-acompletion", "startTime": "2024-05-07T13:11:21.970003-07:00", "metadata": {"cache_hit": false}, "input": {"messages": [{"role": "user", "content": "This is a test"}]}, "output": {"content": "It's simple to use and easy to get started", "role": "assistant"}, "level": "DEFAULT", "id": "time-13-11-21-970003_chatcmpl-30ccf23d-ac57-4183-ab2f-b93f084c4187", "endTime": "2024-05-07T13:11:22.286720-07:00", "completionStartTime": "2024-05-07T13:11:22.286720-07:00", "model": "chatgpt-v-2", "modelParameters": {"temperature": "0.7", "max_tokens": 5, "user": "langfuse_latency_test_user", "extra_body": "{}"}, "usage": {"input": 10, "output": 20, "unit": "TOKENS"}}, "timestamp": "2024-05-07T20:11:22.290909Z"}, {"id": "4bf88864-4937-48a4-8e9b-b1cf6a29cc5c", "type": "trace-create", "body": {"id": "04190fd5-8a1f-41d9-b8be-878945c35293", "timestamp": "2024-05-07T20:11:22.292743Z", "name": "litellm-acompletion", "input": {"messages": [{"role": "user", "content": "This is a test"}]}, "output": {"content": "It's simple to use and easy to get started", "role": "assistant"}, "tags": ["cache_hit:False"]}, "timestamp": "2024-05-07T20:11:22.292929Z"}, {"id": "050a1ed2-b54e-46ab-9145-04baca33524e", "type": "generation-create", "body": {"traceId": "04190fd5-8a1f-41d9-b8be-878945c35293", "name": "litellm-acompletion", "startTime": "2024-05-07T13:11:22.282826-07:00", "metadata": {"cache_hit": false}, "input": {"messages": [{"role": "user", "content": "This is a test"}]}, "output": {"content": "It's simple to use and easy to get started", "role": "assistant"}, "level": "DEFAULT", "id": "time-13-11-22-282826_chatcmpl-684e7a99-b0ad-43e3-a0e9-acbce76e5457", "endTime": "2024-05-07T13:11:22.288054-07:00", "completionStartTime": "2024-05-07T13:11:22.288054-07:00", "model": "chatgpt-v-2", "modelParameters": {"temperature": "0.7", "max_tokens": 5, "user": "langfuse_latency_test_user", "extra_body": "{}"}, "usage": {"input": 10, "output": 20, "unit": "TOKENS"}}, "timestamp": "2024-05-07T20:11:22.293730Z"}, {"id": "94b80fdf-7df9-4b69-8500-df55a4748802", "type": "trace-create", "body": {"id": "82588025-780b-4045-a131-06dcaf2c54ca", "timestamp": "2024-05-07T20:11:22.293803Z", "name": "litellm-acompletion", "input": {"messages": [{"role": "user", "content": "This is a test"}]}, "output": {"content": "It's simple to use and easy to get started", "role": "assistant"}, "tags": ["cache_hit:False"]}, "timestamp": "2024-05-07T20:11:22.293964Z"}, {"id": "659db88e-6adc-4c52-82d8-dac517773242", "type": "generation-create", "body": {"traceId": "82588025-780b-4045-a131-06dcaf2c54ca", "name": "litellm-acompletion", "startTime": "2024-05-07T13:11:22.280988-07:00", "metadata": {"cache_hit": false}, "input": {"messages": [{"role": "user", "content": "This is a test"}]}, "output": {"content": "It's simple to use and easy to get started", "role": "assistant"}, "level": "DEFAULT", "id": "time-13-11-22-280988_chatcmpl-4ecaabdd-be67-4122-a3bf-b95466ffee0a", "endTime": "2024-05-07T13:11:22.287168-07:00", "completionStartTime": "2024-05-07T13:11:22.287168-07:00", "model": "chatgpt-v-2", "modelParameters": {"temperature": "0.7", "max_tokens": 5, "user": "langfuse_latency_test_user", "extra_body": "{}"}, "usage": {"input": 10, "output": 20, "unit": "TOKENS"}}, "timestamp": "2024-05-07T20:11:22.294336Z"}, {"id": "383b9518-93ff-4943-ae0c-b3256ee3c1a7", "type": "trace-create", "body": {"id": "fe18bb31-ded9-4ad2-8417-41e0e3de195c", "timestamp": "2024-05-07T20:11:22.295711Z", "name": "litellm-acompletion", "input": {"messages": [{"role": "user", "content": "This is a test"}]}, "output": {"content": "It's simple to use and easy to get started", "role": "assistant"}, "tags": ["cache_hit:False"]}, "timestamp": "2024-05-07T20:11:22.295870Z"}, {"id": "127d6d13-e8b0-44f2-bba1-cc5d9710b0b4", "type": "generation-create", "body": {"traceId": "fe18bb31-ded9-4ad2-8417-41e0e3de195c", "name": "litellm-acompletion", "startTime": "2024-05-07T13:11:22.284370-07:00", "metadata": {"cache_hit": false}, "input": {"messages": [{"role": "user", "content": "This is a test"}]}, "output": {"content": "It's simple to use and easy to get started", "role": "assistant"}, "level": "DEFAULT", "id": "time-13-11-22-284370_chatcmpl-bf93ab8e-ecf2-4f04-9506-ef51a1c4c9d0", "endTime": "2024-05-07T13:11:22.288779-07:00", "completionStartTime": "2024-05-07T13:11:22.288779-07:00", "model": "chatgpt-v-2", "modelParameters": {"temperature": "0.7", "max_tokens": 5, "user": "langfuse_latency_test_user", "extra_body": "{}"}, "usage": {"input": 10, "output": 20, "unit": "TOKENS"}}, "timestamp": "2024-05-07T20:11:22.296237Z"}, {"id": "2bc81d4d-f6a5-415b-acaa-feb883c41bbb", "type": "trace-create", "body": {"id": "99b7014a-c3c0-4040-92ad-64a665ab6abe", "timestamp": "2024-05-07T20:11:22.297355Z", "name": "litellm-acompletion", "input": {"messages": [{"role": "user", "content": "This is a test"}]}, "output": {"content": "It's simple to use and easy to get started", "role": "assistant"}, "tags": ["cache_hit:False"]}, "timestamp": "2024-05-07T20:11:22.297502Z"}, {"id": "d51705a9-088a-4f49-b494-f4fa8e6bc59e", "type": "generation-create", "body": {"traceId": "99b7014a-c3c0-4040-92ad-64a665ab6abe", "name": "litellm-acompletion", "startTime": "2024-05-07T13:11:22.285989-07:00", "metadata": {"cache_hit": false}, "input": {"messages": [{"role": "user", "content": "This is a test"}]}, "output": {"content": "It's simple to use and easy to get started", "role": "assistant"}, "level": "DEFAULT", "id": "time-13-11-22-285989_chatcmpl-1a3c46e4-d474-4d19-92d8-8a7ee7ac0799", "endTime": "2024-05-07T13:11:22.295600-07:00", "completionStartTime": "2024-05-07T13:11:22.295600-07:00", "model": "chatgpt-v-2", "modelParameters": {"temperature": "0.7", "max_tokens": 5, "user": "langfuse_latency_test_user", "extra_body": "{}"}, "usage": {"input": 10, "output": 20, "unit": "TOKENS"}}, "timestamp": "2024-05-07T20:11:22.297845Z"}, {"id": "9d380abe-bb42-480b-b48f-952ed6776e1c", "type": "trace-create", "body": {"id": "litellm-test-trace1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5", "timestamp": "2024-05-07T20:11:22.420643Z", "name": "litellm-test-trace1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5", "userId": "litellm-test-user1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5", "input": {"messages": [{"role": "user", "content": "litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace1-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5"}]}, "output": {"content": "litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace1-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5", "role": "assistant"}, "sessionId": "litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5", "release": "litellm-test-release", "version": "litellm-test-version", "metadata": {"trace_actual_metadata_key": "trace_actual_metadata_value", "generation_id": "litellm-test-trace1-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5"}, "tags": ["litellm-test-tag1", "litellm-test-tag2", "cache_hit:False"]}, "timestamp": "2024-05-07T20:11:22.421233Z"}, {"id": "0d3ae4f8-e352-4acd-98ec-d21be7e8f5eb", "type": "generation-create", "body": {"traceId": "litellm-test-trace1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5", "name": "litellm-test-trace1-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5", "startTime": "2024-05-07T13:11:22.419075-07:00", "metadata": {"gen_metadata_key": "gen_metadata_value", "cache_hit": false}, "input": {"messages": [{"role": "user", "content": "litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace1-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5"}]}, "output": {"content": "litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace1-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5", "role": "assistant"}, "level": "DEFAULT", "version": "litellm-test-version", "id": "litellm-test-trace1-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5", "endTime": "2024-05-07T13:11:22.419879-07:00", "completionStartTime": "2024-05-07T13:11:22.419879-07:00", "model": "gpt-3.5-turbo", "modelParameters": {"temperature": "0.2", "max_tokens": 100, "user": "langfuse_latency_test_user", "extra_body": "{}"}, "usage": {"input": 10, "output": 20, "unit": "TOKENS", "totalCost": 5.4999999999999995e-05}}, "timestamp": "2024-05-07T20:11:22.421804Z"}, {"id": "1b34abb5-4a24-4042-a8c3-9f3ea0254f2b", "type": "trace-create", "body": {"id": "litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5", "timestamp": "2024-05-07T20:11:22.423093Z", "name": "litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5", "userId": "litellm-test-user1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5", "input": {"messages": [{"role": "user", "content": "litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5"}]}, "output": {"content": "litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5", "role": "assistant"}, "sessionId": "litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5", "release": "litellm-test-release", "version": "litellm-test-version", "metadata": {"trace_actual_metadata_key": "trace_actual_metadata_value", "generation_id": "litellm-test-trace2-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5"}, "tags": ["litellm-test-tag1", "litellm-test-tag2", "cache_hit:False"]}, "timestamp": "2024-05-07T20:11:22.423311Z"}, {"id": "050ba9cd-3eff-443b-9637-705406ceb8cb", "type": "generation-create", "body": {"traceId": "litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5", "name": "litellm-test-trace2-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5", "startTime": "2024-05-07T13:11:22.421978-07:00", "metadata": {"gen_metadata_key": "gen_metadata_value", "cache_hit": false}, "input": {"messages": [{"role": "user", "content": "litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5"}]}, "output": {"content": "litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5", "role": "assistant"}, "level": "DEFAULT", "version": "litellm-test-version", "id": "litellm-test-trace2-generation-1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5", "endTime": "2024-05-07T13:11:22.422551-07:00", "completionStartTime": "2024-05-07T13:11:22.422551-07:00", "model": "gpt-3.5-turbo", "modelParameters": {"temperature": "0.2", "max_tokens": 100, "user": "langfuse_latency_test_user", "extra_body": "{}"}, "usage": {"input": 10, "output": 20, "unit": "TOKENS", "totalCost": 5.4999999999999995e-05}}, "timestamp": "2024-05-07T20:11:22.423829Z"}, {"id": "d5173131-5bef-46cd-aa5a-6dcd01f6c000", "type": "trace-create", "body": {"id": "litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5", "timestamp": "2024-05-07T20:11:22.425221Z", "output": {"content": "litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-generation-2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5", "role": "assistant"}, "metadata": {"trace_actual_metadata_key": "trace_actual_metadata_value", "generation_id": "litellm-test-trace2-generation-2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5"}}, "timestamp": "2024-05-07T20:11:22.425370Z"}], "metadata": {"batch_size": 15, "sdk_integration": "litellm", "sdk_name": "python", "sdk_version": "2.27.0", "public_key": "pk-lf-47ddd17f-c73c-4edd-b92a-b28835843e66"}} to https://cloud.langfuse.com/api/public/ingestion +received response: {"errors":[],"successes":[{"id":"e3840349-5e27-4921-84fc-f11ac428b7c5","status":201},{"id":"54036ec0-06ff-44d1-ac3a-f6085a3983ab","status":201},{"id":"4bf88864-4937-48a4-8e9b-b1cf6a29cc5c","status":201},{"id":"050a1ed2-b54e-46ab-9145-04baca33524e","status":201},{"id":"94b80fdf-7df9-4b69-8500-df55a4748802","status":201},{"id":"659db88e-6adc-4c52-82d8-dac517773242","status":201},{"id":"383b9518-93ff-4943-ae0c-b3256ee3c1a7","status":201},{"id":"127d6d13-e8b0-44f2-bba1-cc5d9710b0b4","status":201},{"id":"2bc81d4d-f6a5-415b-acaa-feb883c41bbb","status":201},{"id":"d51705a9-088a-4f49-b494-f4fa8e6bc59e","status":201},{"id":"9d380abe-bb42-480b-b48f-952ed6776e1c","status":201},{"id":"0d3ae4f8-e352-4acd-98ec-d21be7e8f5eb","status":201},{"id":"1b34abb5-4a24-4042-a8c3-9f3ea0254f2b","status":201},{"id":"050ba9cd-3eff-443b-9637-705406ceb8cb","status":201},{"id":"d5173131-5bef-46cd-aa5a-6dcd01f6c000","status":201}]} +successfully uploaded batch of 15 items +item size 1359 +Getting trace litellm-test-trace1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5 +Getting observations... None, None, None, None, litellm-test-trace1-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5, None, GENERATION +uploading batch of 1 items +uploading data: {'batch': [{'id': 'ed61fc8d-aede-4c33-9ce8-377d498169f4', 'type': 'generation-create', 'body': {'traceId': 'litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'name': 'litellm-test-trace2-generation-2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'startTime': datetime.datetime(2024, 5, 7, 13, 11, 22, 424075), 'metadata': {'gen_metadata_key': 'gen_metadata_value', 'cache_hit': False}, 'input': {'messages': [{'role': 'user', 'content': 'litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-generation-2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5'}]}, 'output': {'content': 'litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-generation-2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'role': 'assistant'}, 'level': , 'version': 'litellm-test-version', 'id': 'litellm-test-trace2-generation-2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5', 'endTime': datetime.datetime(2024, 5, 7, 13, 11, 22, 424526), 'completionStartTime': datetime.datetime(2024, 5, 7, 13, 11, 22, 424526), 'model': 'gpt-3.5-turbo', 'modelParameters': {'temperature': '0.2', 'max_tokens': 100, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'}, 'usage': {'input': 10, 'output': 20, 'unit': , 'totalCost': 5.4999999999999995e-05}}, 'timestamp': datetime.datetime(2024, 5, 7, 20, 11, 22, 425776, tzinfo=datetime.timezone.utc)}], 'metadata': {'batch_size': 1, 'sdk_integration': 'litellm', 'sdk_name': 'python', 'sdk_version': '2.27.0', 'public_key': 'pk-lf-47ddd17f-c73c-4edd-b92a-b28835843e66'}} +making request: {"batch": [{"id": "ed61fc8d-aede-4c33-9ce8-377d498169f4", "type": "generation-create", "body": {"traceId": "litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5", "name": "litellm-test-trace2-generation-2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5", "startTime": "2024-05-07T13:11:22.424075-07:00", "metadata": {"gen_metadata_key": "gen_metadata_value", "cache_hit": false}, "input": {"messages": [{"role": "user", "content": "litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-generation-2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5"}]}, "output": {"content": "litellm-test-session-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5:litellm-test-trace2-generation-2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5", "role": "assistant"}, "level": "DEFAULT", "version": "litellm-test-version", "id": "litellm-test-trace2-generation-2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5", "endTime": "2024-05-07T13:11:22.424526-07:00", "completionStartTime": "2024-05-07T13:11:22.424526-07:00", "model": "gpt-3.5-turbo", "modelParameters": {"temperature": "0.2", "max_tokens": 100, "user": "langfuse_latency_test_user", "extra_body": "{}"}, "usage": {"input": 10, "output": 20, "unit": "TOKENS", "totalCost": 5.4999999999999995e-05}}, "timestamp": "2024-05-07T20:11:22.425776Z"}], "metadata": {"batch_size": 1, "sdk_integration": "litellm", "sdk_name": "python", "sdk_version": "2.27.0", "public_key": "pk-lf-47ddd17f-c73c-4edd-b92a-b28835843e66"}} to https://cloud.langfuse.com/api/public/ingestion +Getting trace litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5 +received response: {"errors":[],"successes":[{"id":"ed61fc8d-aede-4c33-9ce8-377d498169f4","status":201}]} +successfully uploaded batch of 1 items +Getting observations... None, None, None, None, litellm-test-trace2-c8f258e1-002a-4ab9-98e1-1bf3c84c0bd5, None, GENERATION consumer is running... -Getting observations... None, None, None, None, litellm-test-532d2bc8-f8d6-42fd-8f78-416bae79925d, None, GENERATION -joining 1 consumer threads -consumer thread 0 joined -joining 1 consumer threads -consumer thread 0 joined -joining 1 consumer threads -consumer thread 0 joined -joining 1 consumer threads -consumer thread 0 joined -joining 1 consumer threads -consumer thread 0 joined -joining 1 consumer threads -consumer thread 0 joined +flushing queue +successfully flushed about 0 items. +Creating trace id='litellm-test-a87c7c71-32cd-4e6c-acc0-7378d6d81bb1' timestamp=datetime.datetime(2024, 5, 7, 20, 11, 45, 796169, tzinfo=datetime.timezone.utc) name='litellm-acompletion' user_id=None input={'messages': 'redacted-by-litellm'} output={'content': 'redacted-by-litellm', 'role': 'assistant'} session_id=None release=None version=None metadata=None tags=['cache_hit:False'] public=None +adding task {'id': '244ffc62-a30d-4281-8a86-bdfcb3edef05', 'type': 'trace-create', 'body': {'id': 'litellm-test-a87c7c71-32cd-4e6c-acc0-7378d6d81bb1', 'timestamp': datetime.datetime(2024, 5, 7, 20, 11, 45, 796169, tzinfo=datetime.timezone.utc), 'name': 'litellm-acompletion', 'input': {'messages': 'redacted-by-litellm'}, 'output': {'content': 'redacted-by-litellm', 'role': 'assistant'}, 'tags': ['cache_hit:False']}} +Creating generation trace_id='litellm-test-a87c7c71-32cd-4e6c-acc0-7378d6d81bb1' name='litellm-acompletion' start_time=datetime.datetime(2024, 5, 7, 13, 11, 45, 794599) metadata={'cache_hit': False} input={'messages': 'redacted-by-litellm'} output={'content': 'redacted-by-litellm', 'role': 'assistant'} level= status_message=None parent_observation_id=None version=None id='time-13-11-45-794599_chatcmpl-28d76a11-56a6-43d2-9bf6-a6ddf7d8895a' end_time=datetime.datetime(2024, 5, 7, 13, 11, 45, 795329) completion_start_time=datetime.datetime(2024, 5, 7, 13, 11, 45, 795329) model='gpt-3.5-turbo' model_parameters={'temperature': '0.7', 'stream': False, 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'} usage=Usage(input=10, output=20, total=None, unit=, input_cost=None, output_cost=None, total_cost=5.4999999999999995e-05) prompt_name=None prompt_version=None... +item size 400 +adding task {'id': 'e9d12a6d-3fca-4adb-a018-bf276733ffa6', 'type': 'generation-create', 'body': {'traceId': 'litellm-test-a87c7c71-32cd-4e6c-acc0-7378d6d81bb1', 'name': 'litellm-acompletion', 'startTime': datetime.datetime(2024, 5, 7, 13, 11, 45, 794599), 'metadata': {'cache_hit': False}, 'input': {'messages': 'redacted-by-litellm'}, 'output': {'content': 'redacted-by-litellm', 'role': 'assistant'}, 'level': , 'id': 'time-13-11-45-794599_chatcmpl-28d76a11-56a6-43d2-9bf6-a6ddf7d8895a', 'endTime': datetime.datetime(2024, 5, 7, 13, 11, 45, 795329), 'completionStartTime': datetime.datetime(2024, 5, 7, 13, 11, 45, 795329), 'model': 'gpt-3.5-turbo', 'modelParameters': {'temperature': '0.7', 'stream': False, 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'}, 'usage': {'input': 10, 'output': 20, 'unit': , 'totalCost': 5.4999999999999995e-05}}} +item size 876 +uploading batch of 2 items +uploading data: {'batch': [{'id': '244ffc62-a30d-4281-8a86-bdfcb3edef05', 'type': 'trace-create', 'body': {'id': 'litellm-test-a87c7c71-32cd-4e6c-acc0-7378d6d81bb1', 'timestamp': datetime.datetime(2024, 5, 7, 20, 11, 45, 796169, tzinfo=datetime.timezone.utc), 'name': 'litellm-acompletion', 'input': {'messages': 'redacted-by-litellm'}, 'output': {'content': 'redacted-by-litellm', 'role': 'assistant'}, 'tags': ['cache_hit:False']}, 'timestamp': datetime.datetime(2024, 5, 7, 20, 11, 45, 796433, tzinfo=datetime.timezone.utc)}, {'id': 'e9d12a6d-3fca-4adb-a018-bf276733ffa6', 'type': 'generation-create', 'body': {'traceId': 'litellm-test-a87c7c71-32cd-4e6c-acc0-7378d6d81bb1', 'name': 'litellm-acompletion', 'startTime': datetime.datetime(2024, 5, 7, 13, 11, 45, 794599), 'metadata': {'cache_hit': False}, 'input': {'messages': 'redacted-by-litellm'}, 'output': {'content': 'redacted-by-litellm', 'role': 'assistant'}, 'level': , 'id': 'time-13-11-45-794599_chatcmpl-28d76a11-56a6-43d2-9bf6-a6ddf7d8895a', 'endTime': datetime.datetime(2024, 5, 7, 13, 11, 45, 795329), 'completionStartTime': datetime.datetime(2024, 5, 7, 13, 11, 45, 795329), 'model': 'gpt-3.5-turbo', 'modelParameters': {'temperature': '0.7', 'stream': False, 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'}, 'usage': {'input': 10, 'output': 20, 'unit': , 'totalCost': 5.4999999999999995e-05}}, 'timestamp': datetime.datetime(2024, 5, 7, 20, 11, 45, 797038, tzinfo=datetime.timezone.utc)}], 'metadata': {'batch_size': 2, 'sdk_integration': 'litellm', 'sdk_name': 'python', 'sdk_version': '2.27.0', 'public_key': 'pk-lf-47ddd17f-c73c-4edd-b92a-b28835843e66'}} +making request: {"batch": [{"id": "244ffc62-a30d-4281-8a86-bdfcb3edef05", "type": "trace-create", "body": {"id": "litellm-test-a87c7c71-32cd-4e6c-acc0-7378d6d81bb1", "timestamp": "2024-05-07T20:11:45.796169Z", "name": "litellm-acompletion", "input": {"messages": "redacted-by-litellm"}, "output": {"content": "redacted-by-litellm", "role": "assistant"}, "tags": ["cache_hit:False"]}, "timestamp": "2024-05-07T20:11:45.796433Z"}, {"id": "e9d12a6d-3fca-4adb-a018-bf276733ffa6", "type": "generation-create", "body": {"traceId": "litellm-test-a87c7c71-32cd-4e6c-acc0-7378d6d81bb1", "name": "litellm-acompletion", "startTime": "2024-05-07T13:11:45.794599-07:00", "metadata": {"cache_hit": false}, "input": {"messages": "redacted-by-litellm"}, "output": {"content": "redacted-by-litellm", "role": "assistant"}, "level": "DEFAULT", "id": "time-13-11-45-794599_chatcmpl-28d76a11-56a6-43d2-9bf6-a6ddf7d8895a", "endTime": "2024-05-07T13:11:45.795329-07:00", "completionStartTime": "2024-05-07T13:11:45.795329-07:00", "model": "gpt-3.5-turbo", "modelParameters": {"temperature": "0.7", "stream": false, "max_tokens": 5, "user": "langfuse_latency_test_user", "extra_body": "{}"}, "usage": {"input": 10, "output": 20, "unit": "TOKENS", "totalCost": 5.4999999999999995e-05}}, "timestamp": "2024-05-07T20:11:45.797038Z"}], "metadata": {"batch_size": 2, "sdk_integration": "litellm", "sdk_name": "python", "sdk_version": "2.27.0", "public_key": "pk-lf-47ddd17f-c73c-4edd-b92a-b28835843e66"}} to https://cloud.langfuse.com/api/public/ingestion +received response: {"errors":[],"successes":[{"id":"244ffc62-a30d-4281-8a86-bdfcb3edef05","status":201},{"id":"e9d12a6d-3fca-4adb-a018-bf276733ffa6","status":201}]} +successfully uploaded batch of 2 items +Getting observations... None, None, None, None, litellm-test-a87c7c71-32cd-4e6c-acc0-7378d6d81bb1, None, GENERATION +consumer is running... +flushing queue +successfully flushed about 0 items. +Creating trace id='litellm-test-d9136466-2e87-4afc-8367-dc51764251c7' timestamp=datetime.datetime(2024, 5, 7, 20, 11, 48, 286447, tzinfo=datetime.timezone.utc) name='litellm-acompletion' user_id=None input={'messages': 'redacted-by-litellm'} output={'content': 'redacted-by-litellm', 'role': 'assistant'} session_id=None release=None version=None metadata=None tags=['cache_hit:False'] public=None +adding task {'id': 'cab47524-1e1e-4404-b8bd-5f526895ac0c', 'type': 'trace-create', 'body': {'id': 'litellm-test-d9136466-2e87-4afc-8367-dc51764251c7', 'timestamp': datetime.datetime(2024, 5, 7, 20, 11, 48, 286447, tzinfo=datetime.timezone.utc), 'name': 'litellm-acompletion', 'input': {'messages': 'redacted-by-litellm'}, 'output': {'content': 'redacted-by-litellm', 'role': 'assistant'}, 'tags': ['cache_hit:False']}} +Creating generation trace_id='litellm-test-d9136466-2e87-4afc-8367-dc51764251c7' name='litellm-acompletion' start_time=datetime.datetime(2024, 5, 7, 13, 11, 48, 276681) metadata={'cache_hit': False} input={'messages': 'redacted-by-litellm'} output={'content': 'redacted-by-litellm', 'role': 'assistant'} level= status_message=None parent_observation_id=None version=None id='time-13-11-48-276681_chatcmpl-ef076c31-4977-4687-bc83-07bb1f0aa1b2' end_time=datetime.datetime(2024, 5, 7, 13, 11, 48, 285026) completion_start_time=datetime.datetime(2024, 5, 7, 13, 11, 48, 278853) model='gpt-3.5-turbo' model_parameters={'temperature': '0.7', 'stream': True, 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'} usage=Usage(input=0, output=98, total=None, unit=, input_cost=None, output_cost=None, total_cost=0.000196) prompt_name=None prompt_version=None... +item size 400 +adding task {'id': '6bacab4d-822a-430f-85a9-4de1fa7ce259', 'type': 'generation-create', 'body': {'traceId': 'litellm-test-d9136466-2e87-4afc-8367-dc51764251c7', 'name': 'litellm-acompletion', 'startTime': datetime.datetime(2024, 5, 7, 13, 11, 48, 276681), 'metadata': {'cache_hit': False}, 'input': {'messages': 'redacted-by-litellm'}, 'output': {'content': 'redacted-by-litellm', 'role': 'assistant'}, 'level': , 'id': 'time-13-11-48-276681_chatcmpl-ef076c31-4977-4687-bc83-07bb1f0aa1b2', 'endTime': datetime.datetime(2024, 5, 7, 13, 11, 48, 285026), 'completionStartTime': datetime.datetime(2024, 5, 7, 13, 11, 48, 278853), 'model': 'gpt-3.5-turbo', 'modelParameters': {'temperature': '0.7', 'stream': True, 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'}, 'usage': {'input': 0, 'output': 98, 'unit': , 'totalCost': 0.000196}}} +item size 860 +uploading batch of 2 items +uploading data: {'batch': [{'id': 'cab47524-1e1e-4404-b8bd-5f526895ac0c', 'type': 'trace-create', 'body': {'id': 'litellm-test-d9136466-2e87-4afc-8367-dc51764251c7', 'timestamp': datetime.datetime(2024, 5, 7, 20, 11, 48, 286447, tzinfo=datetime.timezone.utc), 'name': 'litellm-acompletion', 'input': {'messages': 'redacted-by-litellm'}, 'output': {'content': 'redacted-by-litellm', 'role': 'assistant'}, 'tags': ['cache_hit:False']}, 'timestamp': datetime.datetime(2024, 5, 7, 20, 11, 48, 286752, tzinfo=datetime.timezone.utc)}, {'id': '6bacab4d-822a-430f-85a9-4de1fa7ce259', 'type': 'generation-create', 'body': {'traceId': 'litellm-test-d9136466-2e87-4afc-8367-dc51764251c7', 'name': 'litellm-acompletion', 'startTime': datetime.datetime(2024, 5, 7, 13, 11, 48, 276681), 'metadata': {'cache_hit': False}, 'input': {'messages': 'redacted-by-litellm'}, 'output': {'content': 'redacted-by-litellm', 'role': 'assistant'}, 'level': , 'id': 'time-13-11-48-276681_chatcmpl-ef076c31-4977-4687-bc83-07bb1f0aa1b2', 'endTime': datetime.datetime(2024, 5, 7, 13, 11, 48, 285026), 'completionStartTime': datetime.datetime(2024, 5, 7, 13, 11, 48, 278853), 'model': 'gpt-3.5-turbo', 'modelParameters': {'temperature': '0.7', 'stream': True, 'max_tokens': 5, 'user': 'langfuse_latency_test_user', 'extra_body': '{}'}, 'usage': {'input': 0, 'output': 98, 'unit': , 'totalCost': 0.000196}}, 'timestamp': datetime.datetime(2024, 5, 7, 20, 11, 48, 287077, tzinfo=datetime.timezone.utc)}], 'metadata': {'batch_size': 2, 'sdk_integration': 'litellm', 'sdk_name': 'python', 'sdk_version': '2.27.0', 'public_key': 'pk-lf-47ddd17f-c73c-4edd-b92a-b28835843e66'}} +making request: {"batch": [{"id": "cab47524-1e1e-4404-b8bd-5f526895ac0c", "type": "trace-create", "body": {"id": "litellm-test-d9136466-2e87-4afc-8367-dc51764251c7", "timestamp": "2024-05-07T20:11:48.286447Z", "name": "litellm-acompletion", "input": {"messages": "redacted-by-litellm"}, "output": {"content": "redacted-by-litellm", "role": "assistant"}, "tags": ["cache_hit:False"]}, "timestamp": "2024-05-07T20:11:48.286752Z"}, {"id": "6bacab4d-822a-430f-85a9-4de1fa7ce259", "type": "generation-create", "body": {"traceId": "litellm-test-d9136466-2e87-4afc-8367-dc51764251c7", "name": "litellm-acompletion", "startTime": "2024-05-07T13:11:48.276681-07:00", "metadata": {"cache_hit": false}, "input": {"messages": "redacted-by-litellm"}, "output": {"content": "redacted-by-litellm", "role": "assistant"}, "level": "DEFAULT", "id": "time-13-11-48-276681_chatcmpl-ef076c31-4977-4687-bc83-07bb1f0aa1b2", "endTime": "2024-05-07T13:11:48.285026-07:00", "completionStartTime": "2024-05-07T13:11:48.278853-07:00", "model": "gpt-3.5-turbo", "modelParameters": {"temperature": "0.7", "stream": true, "max_tokens": 5, "user": "langfuse_latency_test_user", "extra_body": "{}"}, "usage": {"input": 0, "output": 98, "unit": "TOKENS", "totalCost": 0.000196}}, "timestamp": "2024-05-07T20:11:48.287077Z"}], "metadata": {"batch_size": 2, "sdk_integration": "litellm", "sdk_name": "python", "sdk_version": "2.27.0", "public_key": "pk-lf-47ddd17f-c73c-4edd-b92a-b28835843e66"}} to https://cloud.langfuse.com/api/public/ingestion +received response: {"errors":[],"successes":[{"id":"cab47524-1e1e-4404-b8bd-5f526895ac0c","status":201},{"id":"6bacab4d-822a-430f-85a9-4de1fa7ce259","status":201}]} +successfully uploaded batch of 2 items +Getting observations... None, None, None, None, litellm-test-d9136466-2e87-4afc-8367-dc51764251c7, None, GENERATION joining 1 consumer threads consumer thread 0 joined joining 1 consumer threads diff --git a/litellm/tests/test_alangfuse.py b/litellm/tests/test_alangfuse.py index d71738cc6..5a495550c 100644 --- a/litellm/tests/test_alangfuse.py +++ b/litellm/tests/test_alangfuse.py @@ -21,7 +21,7 @@ import pytest @pytest.fixture -def langfuse_client() -> "langfuse.Langfuse": +def langfuse_client(): import langfuse langfuse_client = langfuse.Langfuse( @@ -29,9 +29,12 @@ def langfuse_client() -> "langfuse.Langfuse": secret_key=os.environ["LANGFUSE_SECRET_KEY"], ) - with patch("langfuse.Langfuse", MagicMock(return_value=langfuse_client)) as mock_langfuse_client: + with patch( + "langfuse.Langfuse", MagicMock(return_value=langfuse_client) + ) as mock_langfuse_client: yield mock_langfuse_client() - + + def search_logs(log_file_path, num_good_logs=1): """ Searches the given log file for logs containing the "/api/public" string. @@ -143,7 +146,7 @@ def test_langfuse_logging_async(): pytest.fail(f"An exception occurred - {e}") -async def make_async_calls(metadata = None, **completion_kwargs): +async def make_async_calls(metadata=None, **completion_kwargs): tasks = [] for _ in range(5): tasks.append(create_async_task()) @@ -173,14 +176,14 @@ def create_async_task(**completion_kwargs): By default a standard set of arguments are used for the litellm.acompletion function. """ completion_args = { - "model": "azure/chatgpt-v-2", - "messages": [{"role": "user", "content": "This is a test"}], - "max_tokens": 5, - "temperature": 0.7, - "timeout": 5, - "user": "langfuse_latency_test_user", - "mock_response": "It's simple to use and easy to get started", - } + "model": "azure/chatgpt-v-2", + "messages": [{"role": "user", "content": "This is a test"}], + "max_tokens": 5, + "temperature": 0.7, + "timeout": 5, + "user": "langfuse_latency_test_user", + "mock_response": "It's simple to use and easy to get started", + } completion_args.update(completion_kwargs) return asyncio.create_task(litellm.acompletion(**completion_args)) @@ -195,7 +198,11 @@ async def test_langfuse_logging_without_request_response(stream, langfuse_client litellm.set_verbose = True litellm.turn_off_message_logging = True litellm.success_callback = ["langfuse"] - response = await create_async_task(model="gpt-3.5-turbo", stream=stream, metadata={"trace_id": _unique_trace_name}) + response = await create_async_task( + model="gpt-3.5-turbo", + stream=stream, + metadata={"trace_id": _unique_trace_name}, + ) print(response) if stream: async for chunk in response: @@ -232,49 +239,78 @@ async def test_langfuse_logging_metadata(langfuse_client): Tags is just set for the trace """ import uuid - + litellm.set_verbose = True litellm.success_callback = ["langfuse"] - + trace_identifiers = {} - expected_filtered_metadata_keys = {"trace_name", "trace_id", "existing_trace_id", "trace_user_id", "session_id", "tags", "generation_name", "generation_id", "prompt"} - trace_metadata = {"trace_actual_metadata_key": "trace_actual_metadata_value"} # Allows for setting the metadata on the trace + expected_filtered_metadata_keys = { + "trace_name", + "trace_id", + "existing_trace_id", + "trace_user_id", + "session_id", + "tags", + "generation_name", + "generation_id", + "prompt", + } + trace_metadata = { + "trace_actual_metadata_key": "trace_actual_metadata_value" + } # Allows for setting the metadata on the trace run_id = str(uuid.uuid4()) session_id = f"litellm-test-session-{run_id}" trace_common_metadata = { "session_id": session_id, "tags": ["litellm-test-tag1", "litellm-test-tag2"], - "update_trace_keys": ["output", "trace_metadata"], # Overwrite the following fields in the trace with the last generation's output and the trace_user_id + "update_trace_keys": [ + "output", + "trace_metadata", + ], # Overwrite the following fields in the trace with the last generation's output and the trace_user_id "trace_metadata": trace_metadata, - "gen_metadata_key": "gen_metadata_value", # Metadata key that should not be filtered in the generation + "gen_metadata_key": "gen_metadata_value", # Metadata key that should not be filtered in the generation "trace_release": "litellm-test-release", "version": "litellm-test-version", } - for trace_num in range(1, 3): # Two traces + for trace_num in range(1, 3): # Two traces metadata = copy.deepcopy(trace_common_metadata) trace_id = f"litellm-test-trace{trace_num}-{run_id}" metadata["trace_id"] = trace_id metadata["trace_name"] = trace_id trace_identifiers[trace_id] = [] print(f"Trace: {trace_id}") - for generation_num in range(1, trace_num + 1): # Each trace has a number of generations equal to its trace number + for generation_num in range( + 1, trace_num + 1 + ): # Each trace has a number of generations equal to its trace number metadata["trace_user_id"] = f"litellm-test-user{generation_num}-{run_id}" - generation_id = f"litellm-test-trace{trace_num}-generation-{generation_num}-{run_id}" + generation_id = ( + f"litellm-test-trace{trace_num}-generation-{generation_num}-{run_id}" + ) metadata["generation_id"] = generation_id metadata["generation_name"] = generation_id - metadata["trace_metadata"]["generation_id"] = generation_id # Update to test if trace_metadata is overwritten by update trace keys + metadata["trace_metadata"][ + "generation_id" + ] = generation_id # Update to test if trace_metadata is overwritten by update trace keys trace_identifiers[trace_id].append(generation_id) print(f"Generation: {generation_id}") - response = await create_async_task(model="gpt-3.5-turbo", + response = await create_async_task( + model="gpt-3.5-turbo", mock_response=f"{session_id}:{trace_id}:{generation_id}", - messages=[{"role": "user", "content": f"{session_id}:{trace_id}:{generation_id}"}], + messages=[ + { + "role": "user", + "content": f"{session_id}:{trace_id}:{generation_id}", + } + ], max_tokens=100, temperature=0.2, - metadata=copy.deepcopy(metadata) # Every generation needs its own metadata, langfuse is not async/thread safe without it + metadata=copy.deepcopy( + metadata + ), # Every generation needs its own metadata, langfuse is not async/thread safe without it ) print(response) metadata["existing_trace_id"] = trace_id - + langfuse_client.flush() await asyncio.sleep(2) @@ -284,20 +320,31 @@ async def test_langfuse_logging_metadata(langfuse_client): assert trace.id == trace_id assert trace.session_id == session_id assert trace.metadata != trace_metadata - generations = list(reversed(langfuse_client.get_generations(trace_id=trace_id).data)) + generations = list( + reversed(langfuse_client.get_generations(trace_id=trace_id).data) + ) assert len(generations) == len(generation_ids) - assert trace.input == generations[0].input # Should be set by the first generation - assert trace.output == generations[-1].output # Should be overwritten by the last generation according to update_trace_keys - assert trace.metadata != generations[-1].metadata # Should be overwritten by the last generation according to update_trace_keys + assert ( + trace.input == generations[0].input + ) # Should be set by the first generation + assert ( + trace.output == generations[-1].output + ) # Should be overwritten by the last generation according to update_trace_keys + assert ( + trace.metadata != generations[-1].metadata + ) # Should be overwritten by the last generation according to update_trace_keys assert trace.metadata["generation_id"] == generations[-1].id assert set(trace.tags).issuperset(trace_common_metadata["tags"]) print("trace_from_langfuse", trace) for generation_id, generation in zip(generation_ids, generations): assert generation.id == generation_id assert generation.trace_id == trace_id - assert set(generation.metadata.keys()).isdisjoint(expected_filtered_metadata_keys) + assert set(generation.metadata.keys()).isdisjoint( + expected_filtered_metadata_keys + ) print("generation_from_langfuse", generation) + @pytest.mark.skip(reason="beta test - checking langfuse output") def test_langfuse_logging(): try: @@ -657,7 +704,10 @@ def test_langfuse_existing_trace_id(): assert initial_langfuse_trace_dict == new_langfuse_trace_dict -@pytest.mark.skipif(condition=not os.environ.get("OPENAI_API_KEY", False), reason="Authentication missing for openai") +@pytest.mark.skipif( + condition=not os.environ.get("OPENAI_API_KEY", False), + reason="Authentication missing for openai", +) def test_langfuse_logging_tool_calling(): litellm.set_verbose = True