forked from phoenix/litellm-mirror
add users
This commit is contained in:
parent
c397da7c59
commit
dc8ae16d84
4 changed files with 2406 additions and 1412 deletions
|
@ -33,7 +33,9 @@ class LangFuseLogger:
|
|||
debug=True,
|
||||
)
|
||||
|
||||
def log_event(self, kwargs, response_obj, start_time, end_time, print_verbose):
|
||||
def log_event(
|
||||
self, kwargs, response_obj, start_time, end_time, user_id, print_verbose
|
||||
):
|
||||
# Method definition
|
||||
|
||||
try:
|
||||
|
@ -64,6 +66,7 @@ class LangFuseLogger:
|
|||
output = response_obj["choices"][0]["message"].json()
|
||||
|
||||
self._log_langfuse_v2(
|
||||
user_id,
|
||||
metadata,
|
||||
output,
|
||||
start_time,
|
||||
|
@ -73,6 +76,7 @@ class LangFuseLogger:
|
|||
input,
|
||||
response_obj,
|
||||
) if self._is_langfuse_v2() else self._log_langfuse_v1(
|
||||
user_id,
|
||||
metadata,
|
||||
output,
|
||||
start_time,
|
||||
|
@ -93,9 +97,11 @@ class LangFuseLogger:
|
|||
pass
|
||||
|
||||
async def _async_log_event(
|
||||
self, kwargs, response_obj, start_time, end_time, print_verbose
|
||||
self, kwargs, response_obj, start_time, end_time, user_id, print_verbose
|
||||
):
|
||||
self.log_event(kwargs, response_obj, start_time, end_time, print_verbose)
|
||||
self.log_event(
|
||||
kwargs, response_obj, start_time, end_time, user_id, print_verbose
|
||||
)
|
||||
|
||||
def _is_langfuse_v2(self):
|
||||
import langfuse
|
||||
|
@ -104,6 +110,7 @@ class LangFuseLogger:
|
|||
|
||||
def _log_langfuse_v1(
|
||||
self,
|
||||
user_id,
|
||||
metadata,
|
||||
output,
|
||||
start_time,
|
||||
|
@ -120,6 +127,7 @@ class LangFuseLogger:
|
|||
name=metadata.get("generation_name", "litellm-completion"),
|
||||
input=input,
|
||||
output=output,
|
||||
userId=user_id,
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -142,6 +150,7 @@ class LangFuseLogger:
|
|||
|
||||
def _log_langfuse_v2(
|
||||
self,
|
||||
user_id,
|
||||
metadata,
|
||||
output,
|
||||
start_time,
|
||||
|
@ -155,6 +164,7 @@ class LangFuseLogger:
|
|||
name=metadata.get("generation_name", "litellm-completion"),
|
||||
input=input,
|
||||
output=output,
|
||||
user_id=user_id,
|
||||
)
|
||||
|
||||
trace.generation(
|
||||
|
|
|
@ -1,37 +1,2 @@
|
|||
Using selector: KqueueSelector
|
||||
consumer is running...
|
||||
Starting new HTTPS connection (1): litellm-logging.onrender.com:443
|
||||
Request options: {'method': 'post', 'url': '/chat/completions', 'files': None, 'json_data': {'messages': [{'role': 'user', 'content': 'This is a test'}], 'model': 'gpt-3.5-turbo', 'max_tokens': 100, 'temperature': 0.7}}
|
||||
connect_tcp.started host='api.openai.com' port=443 local_address=None timeout=5.0 socket_options=None
|
||||
connect_tcp.complete return_value=<httpcore._backends.anyio.AnyIOStream object at 0x10ce3f850>
|
||||
start_tls.started ssl_context=<ssl.SSLContext object at 0x10ca89340> server_hostname='api.openai.com' timeout=5.0
|
||||
start_tls.complete return_value=<httpcore._backends.anyio.AnyIOStream object at 0x10ce3f8b0>
|
||||
send_request_headers.started request=<Request [b'POST']>
|
||||
send_request_headers.complete
|
||||
send_request_body.started request=<Request [b'POST']>
|
||||
send_request_body.complete
|
||||
receive_response_headers.started request=<Request [b'POST']>
|
||||
https://litellm-logging.onrender.com:443 "POST /logging HTTP/1.1" 200 38
|
||||
receive_response_headers.complete return_value=(b'HTTP/1.1', 200, b'OK', [(b'Date', b'Mon, 18 Dec 2023 21:53:36 GMT'), (b'Content-Type', b'application/json'), (b'Transfer-Encoding', b'chunked'), (b'Connection', b'keep-alive'), (b'access-control-allow-origin', b'*'), (b'Cache-Control', b'no-cache, must-revalidate'), (b'openai-model', b'gpt-3.5-turbo-0613'), (b'openai-organization', b'finto-technologies'), (b'openai-processing-ms', b'314'), (b'openai-version', b'2020-10-01'), (b'strict-transport-security', b'max-age=15724800; includeSubDomains'), (b'x-ratelimit-limit-requests', b'5000'), (b'x-ratelimit-limit-tokens', b'160000'), (b'x-ratelimit-limit-tokens_usage_based', b'160000'), (b'x-ratelimit-remaining-requests', b'4999'), (b'x-ratelimit-remaining-tokens', b'159895'), (b'x-ratelimit-remaining-tokens_usage_based', b'159895'), (b'x-ratelimit-reset-requests', b'12ms'), (b'x-ratelimit-reset-tokens', b'39ms'), (b'x-ratelimit-reset-tokens_usage_based', b'39ms'), (b'x-request-id', b'798c68979c33c09835370164b9c3a523'), (b'CF-Cache-Status', b'DYNAMIC'), (b'Set-Cookie', b'__cf_bm=CbrXQ9eH3xFyKA4RzW3z3LlpLb_1pGPWeFTYPtWcE50-1702936416-1-ASb/OMcdGX68dHUk+/wA7xDISru2gTUlUJCwGntKnQ58aBvxa5I6ws5xiY6cXyT8hm9s5bX09Q4Tdb/b85w3rFs=; path=/; expires=Mon, 18-Dec-23 22:23:36 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), (b'Set-Cookie', b'_cfuvid=hNImRsjGg2JqU2MW6VYVAAMPGT99ADf9XOKBz5pJix0-1702936416944-0-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None'), (b'Server', b'cloudflare'), (b'CF-RAY', b'837aa3b94aa51655-WAW'), (b'Content-Encoding', b'gzip'), (b'alt-svc', b'h3=":443"; ma=86400')])
|
||||
receive_response_body.started request=<Request [b'POST']>
|
||||
receive_response_body.complete
|
||||
response_closed.started
|
||||
response_closed.complete
|
||||
HTTP Request: POST https://api.openai.com/v1/chat/completions "200 OK"
|
||||
Creating trace id='5b723fd6-0b1b-4c0a-b254-29a6e1fe29ed' name='litellm-completion' user_id=None input=[[{'role': 'user', 'content': 'This is a test'}]] output={'content': 'Great! What would you like to test?', 'role': 'assistant'} session_id=None release=None version=None metadata=None public=None
|
||||
adding task {'id': '3ce30ace-129e-4a4d-b9db-ed42cdfc5bc5', 'type': 'trace-create', 'body': {'id': '5b723fd6-0b1b-4c0a-b254-29a6e1fe29ed', 'name': 'litellm-completion', 'input': [[{'role': 'user', 'content': 'This is a test'}]], 'output': {'content': 'Great! What would you like to test?', 'role': 'assistant'}}}
|
||||
Creating generation trace_id='5b723fd6-0b1b-4c0a-b254-29a6e1fe29ed' name='litellm-completion' start_time=datetime.datetime(2023, 12, 18, 22, 53, 35, 556108) metadata={} input=[[{'role': 'user', 'content': 'This is a test'}]] output={'content': 'Great! What would you like to test?', 'role': 'assistant'} level=None status_message=None parent_observation_id=None version=None id='215b1635-46e3-4791-878b-6d76213b8559' end_time=datetime.datetime(2023, 12, 18, 22, 53, 36, 522751) completion_start_time=None model='gpt-3.5-turbo' model_parameters={'temperature': '0.7', 'max_tokens': 100} usage=Usage(input=11, output=9, total=None, unit=<ModelUsageUnit.TOKENS: 'TOKENS'>)...
|
||||
item size 348
|
||||
adding task {'id': '361e8f67-f46f-42ce-bf9b-e5aab7c5aa38', 'type': 'generation-create', 'body': {'traceId': '5b723fd6-0b1b-4c0a-b254-29a6e1fe29ed', 'name': 'litellm-completion', 'startTime': datetime.datetime(2023, 12, 18, 22, 53, 35, 556108), 'metadata': {}, 'input': [[{'role': 'user', 'content': 'This is a test'}]], 'output': {'content': 'Great! What would you like to test?', 'role': 'assistant'}, 'id': '215b1635-46e3-4791-878b-6d76213b8559', 'endTime': datetime.datetime(2023, 12, 18, 22, 53, 36, 522751), 'model': 'gpt-3.5-turbo', 'modelParameters': {'temperature': '0.7', 'max_tokens': 100}, 'usage': {'input': 11, 'output': 9, 'unit': <ModelUsageUnit.TOKENS: 'TOKENS'>}}}
|
||||
flushing queue
|
||||
item size 659
|
||||
uploading batch of 2 items
|
||||
uploading data: {'batch': [{'id': '3ce30ace-129e-4a4d-b9db-ed42cdfc5bc5', 'type': 'trace-create', 'body': {'id': '5b723fd6-0b1b-4c0a-b254-29a6e1fe29ed', 'name': 'litellm-completion', 'input': [[{'role': 'user', 'content': 'This is a test'}]], 'output': {'content': 'Great! What would you like to test?', 'role': 'assistant'}}, 'timestamp': datetime.datetime(2023, 12, 18, 21, 53, 36, 524507, tzinfo=tzutc())}, {'id': '361e8f67-f46f-42ce-bf9b-e5aab7c5aa38', 'type': 'generation-create', 'body': {'traceId': '5b723fd6-0b1b-4c0a-b254-29a6e1fe29ed', 'name': 'litellm-completion', 'startTime': datetime.datetime(2023, 12, 18, 22, 53, 35, 556108), 'metadata': {}, 'input': [[{'role': 'user', 'content': 'This is a test'}]], 'output': {'content': 'Great! What would you like to test?', 'role': 'assistant'}, 'id': '215b1635-46e3-4791-878b-6d76213b8559', 'endTime': datetime.datetime(2023, 12, 18, 22, 53, 36, 522751), 'model': 'gpt-3.5-turbo', 'modelParameters': {'temperature': '0.7', 'max_tokens': 100}, 'usage': {'input': 11, 'output': 9, 'unit': <ModelUsageUnit.TOKENS: 'TOKENS'>}}, 'timestamp': datetime.datetime(2023, 12, 18, 21, 53, 36, 525388, tzinfo=tzutc())}], 'metadata': {'batch_size': 2, 'sdk_integration': 'default', 'sdk_name': 'python', 'sdk_version': '2.0.1', 'public_key': 'pk-lf-1234567890'}}
|
||||
making request: {"batch": [{"id": "3ce30ace-129e-4a4d-b9db-ed42cdfc5bc5", "type": "trace-create", "body": {"id": "5b723fd6-0b1b-4c0a-b254-29a6e1fe29ed", "name": "litellm-completion", "input": [[{"role": "user", "content": "This is a test"}]], "output": {"content": "Great! What would you like to test?", "role": "assistant"}}, "timestamp": "2023-12-18T21:53:36.524507+00:00"}, {"id": "361e8f67-f46f-42ce-bf9b-e5aab7c5aa38", "type": "generation-create", "body": {"traceId": "5b723fd6-0b1b-4c0a-b254-29a6e1fe29ed", "name": "litellm-completion", "startTime": "2023-12-18T22:53:35.556108+00:00", "metadata": {}, "input": [[{"role": "user", "content": "This is a test"}]], "output": {"content": "Great! What would you like to test?", "role": "assistant"}, "id": "215b1635-46e3-4791-878b-6d76213b8559", "endTime": "2023-12-18T22:53:36.522751+00:00", "model": "gpt-3.5-turbo", "modelParameters": {"temperature": "0.7", "max_tokens": 100}, "usage": {"input": 11, "output": 9, "unit": "TOKENS"}}, "timestamp": "2023-12-18T21:53:36.525388+00:00"}], "metadata": {"batch_size": 2, "sdk_integration": "default", "sdk_name": "python", "sdk_version": "2.0.1", "public_key": "pk-lf-1234567890"}} to http://localhost:3000/api/public/ingestion
|
||||
Starting new HTTP connection (1): localhost:3000
|
||||
http://localhost:3000 "POST /api/public/ingestion HTTP/1.1" 207 145
|
||||
received response: {"errors":[],"successes":[{"id":"3ce30ace-129e-4a4d-b9db-ed42cdfc5bc5","status":201},{"id":"361e8f67-f46f-42ce-bf9b-e5aab7c5aa38","status":201}]}
|
||||
successfully uploaded batch of 2 items
|
||||
successfully flushed about 0 items.
|
||||
joining 1 consumer threads
|
||||
consumer thread 0 joined
|
||||
close.started
|
||||
close.complete
|
||||
|
|
|
@ -105,6 +105,7 @@ def test_langfuse_logging_async():
|
|||
max_tokens=100,
|
||||
temperature=0.7,
|
||||
timeout=5,
|
||||
user="test_user",
|
||||
)
|
||||
|
||||
response = asyncio.run(_test_langfuse())
|
||||
|
@ -198,7 +199,7 @@ def test_langfuse_logging_custom_generation_name():
|
|||
print(e)
|
||||
|
||||
|
||||
test_langfuse_logging_custom_generation_name()
|
||||
# test_langfuse_logging_custom_generation_name()
|
||||
|
||||
|
||||
@pytest.mark.skip(reason="beta test - checking langfuse output")
|
||||
|
@ -235,7 +236,7 @@ def test_langfuse_logging_function_calling():
|
|||
print(e)
|
||||
|
||||
|
||||
test_langfuse_logging_function_calling()
|
||||
# test_langfuse_logging_function_calling()
|
||||
|
||||
|
||||
def test_langfuse_logging_tool_calling():
|
||||
|
@ -296,4 +297,4 @@ def test_langfuse_logging_tool_calling():
|
|||
tool_calls = response.choices[0].message.tool_calls
|
||||
|
||||
|
||||
test_langfuse_logging_tool_calling()
|
||||
# test_langfuse_logging_tool_calling()
|
||||
|
|
2272
litellm/utils.py
2272
litellm/utils.py
File diff suppressed because it is too large
Load diff
Loading…
Add table
Add a link
Reference in a new issue