forked from phoenix/litellm-mirror
fix(langfuse.py): serialize message for logging
This commit is contained in:
parent
7856d65d44
commit
f9dfeb502a
2 changed files with 13 additions and 5 deletions
|
@ -58,7 +58,7 @@ class LangFuseLogger:
|
||||||
model=kwargs['model'],
|
model=kwargs['model'],
|
||||||
modelParameters=optional_params,
|
modelParameters=optional_params,
|
||||||
prompt=prompt,
|
prompt=prompt,
|
||||||
completion=response_obj['choices'][0]['message'],
|
completion=response_obj['choices'][0]['message'].json(),
|
||||||
usage=Usage(
|
usage=Usage(
|
||||||
prompt_tokens=response_obj['usage']['prompt_tokens'],
|
prompt_tokens=response_obj['usage']['prompt_tokens'],
|
||||||
completion_tokens=response_obj['usage']['completion_tokens']
|
completion_tokens=response_obj['usage']['completion_tokens']
|
||||||
|
|
|
@ -178,6 +178,14 @@ class Message(OpenAIObject):
|
||||||
# Allow dictionary-style assignment of attributes
|
# Allow dictionary-style assignment of attributes
|
||||||
setattr(self, key, value)
|
setattr(self, key, value)
|
||||||
|
|
||||||
|
def json(self, **kwargs):
|
||||||
|
try:
|
||||||
|
return self.model_dump() # noqa
|
||||||
|
except:
|
||||||
|
# if using pydantic v1
|
||||||
|
return self.dict()
|
||||||
|
|
||||||
|
|
||||||
class Delta(OpenAIObject):
|
class Delta(OpenAIObject):
|
||||||
def __init__(self, content=None, role=None, **params):
|
def __init__(self, content=None, role=None, **params):
|
||||||
super(Delta, self).__init__(**params)
|
super(Delta, self).__init__(**params)
|
||||||
|
@ -817,16 +825,17 @@ class Logging:
|
||||||
)
|
)
|
||||||
# print(f"original response in success handler: {self.model_call_details['original_response']}")
|
# print(f"original response in success handler: {self.model_call_details['original_response']}")
|
||||||
try:
|
try:
|
||||||
print_verbose(f"success callbacks: {litellm.success_callback}")
|
print_verbose(f"success callbacks: {litellm.success_callback}")
|
||||||
## BUILD COMPLETE STREAMED RESPONSE
|
## BUILD COMPLETE STREAMED RESPONSE
|
||||||
complete_streaming_response = None
|
complete_streaming_response = None
|
||||||
if self.stream == True and self.model_call_details.get("litellm_params", {}).get("acompletion", False) == True:
|
if self.stream == True and self.model_call_details.get("litellm_params", {}).get("acompletion", False) == True:
|
||||||
# if it's acompletion == True, chunks are built/appended in async_success_handler
|
# if it's acompletion == True, chunks are built/appended in async_success_handler
|
||||||
|
self.streaming_chunks.append(result)
|
||||||
if result.choices[0].finish_reason is not None: # if it's the last chunk
|
if result.choices[0].finish_reason is not None: # if it's the last chunk
|
||||||
complete_streaming_response = litellm.stream_chunk_builder(self.streaming_chunks, messages=self.model_call_details.get("messages", None))
|
complete_streaming_response = litellm.stream_chunk_builder(self.streaming_chunks, messages=self.model_call_details.get("messages", None))
|
||||||
else:
|
else:
|
||||||
# this is a completion() call
|
# this is a completion() call
|
||||||
if self.stream:
|
if self.stream == True:
|
||||||
print_verbose("success callback - assembling complete streaming response")
|
print_verbose("success callback - assembling complete streaming response")
|
||||||
if result.choices[0].finish_reason is not None: # if it's the last chunk
|
if result.choices[0].finish_reason is not None: # if it's the last chunk
|
||||||
print_verbose(f"success callback - Got the very Last chunk. Assembling {self.streaming_chunks}")
|
print_verbose(f"success callback - Got the very Last chunk. Assembling {self.streaming_chunks}")
|
||||||
|
@ -5766,14 +5775,13 @@ class CustomStreamWrapper:
|
||||||
if processed_chunk is None:
|
if processed_chunk is None:
|
||||||
continue
|
continue
|
||||||
## LOGGING
|
## LOGGING
|
||||||
|
threading.Thread(target=self.logging_obj.success_handler, args=(processed_chunk,)).start() # log response
|
||||||
asyncio.create_task(self.logging_obj.async_success_handler(processed_chunk,))
|
asyncio.create_task(self.logging_obj.async_success_handler(processed_chunk,))
|
||||||
return processed_chunk
|
return processed_chunk
|
||||||
raise StopAsyncIteration
|
raise StopAsyncIteration
|
||||||
else: # temporary patch for non-aiohttp async calls
|
else: # temporary patch for non-aiohttp async calls
|
||||||
# example - boto3 bedrock llms
|
# example - boto3 bedrock llms
|
||||||
print_verbose(f"ENTERS __NEXT__ LOOP")
|
|
||||||
processed_chunk = next(self)
|
processed_chunk = next(self)
|
||||||
print_verbose(f"PROCESSED CHUNK IN __ANEXT__: {processed_chunk}")
|
|
||||||
asyncio.create_task(self.logging_obj.async_success_handler(processed_chunk,))
|
asyncio.create_task(self.logging_obj.async_success_handler(processed_chunk,))
|
||||||
return processed_chunk
|
return processed_chunk
|
||||||
except StopAsyncIteration:
|
except StopAsyncIteration:
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue