forked from phoenix/litellm-mirror
test(main.py): adding more logging
This commit is contained in:
parent
13776b1df7
commit
39aec43b86
2 changed files with 6 additions and 2 deletions
|
@ -15,7 +15,7 @@ import dotenv, traceback, random, asyncio, time, contextvars
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
import httpx
|
import httpx
|
||||||
import litellm
|
import litellm
|
||||||
|
from ._logging import verbose_logger
|
||||||
from litellm import ( # type: ignore
|
from litellm import ( # type: ignore
|
||||||
client,
|
client,
|
||||||
exception_type,
|
exception_type,
|
||||||
|
@ -3346,11 +3346,15 @@ def stream_chunk_builder(
|
||||||
):
|
):
|
||||||
model_response = litellm.ModelResponse()
|
model_response = litellm.ModelResponse()
|
||||||
### SORT CHUNKS BASED ON CREATED ORDER ##
|
### SORT CHUNKS BASED ON CREATED ORDER ##
|
||||||
|
print_verbose("Goes into checking if chunk has hiddden created at param")
|
||||||
if chunks[0]._hidden_params.get("created_at", None):
|
if chunks[0]._hidden_params.get("created_at", None):
|
||||||
|
print_verbose("Chunks have a created at hidden param")
|
||||||
# Sort chunks based on created_at in ascending order
|
# Sort chunks based on created_at in ascending order
|
||||||
chunks = sorted(
|
chunks = sorted(
|
||||||
chunks, key=lambda x: x._hidden_params.get("created_at", float("inf"))
|
chunks, key=lambda x: x._hidden_params.get("created_at", float("inf"))
|
||||||
)
|
)
|
||||||
|
print_verbose("Chunks sorted")
|
||||||
|
|
||||||
# set hidden params from chunk to model_response
|
# set hidden params from chunk to model_response
|
||||||
if model_response is not None and hasattr(model_response, "_hidden_params"):
|
if model_response is not None and hasattr(model_response, "_hidden_params"):
|
||||||
model_response._hidden_params = chunks[0].get("_hidden_params", {})
|
model_response._hidden_params = chunks[0].get("_hidden_params", {})
|
||||||
|
|
|
@ -206,7 +206,7 @@ def test_azure_completion_stream():
|
||||||
# checks if the model response available in the async + stream callbacks is equal to the received response
|
# checks if the model response available in the async + stream callbacks is equal to the received response
|
||||||
customHandler2 = MyCustomHandler()
|
customHandler2 = MyCustomHandler()
|
||||||
litellm.callbacks = [customHandler2]
|
litellm.callbacks = [customHandler2]
|
||||||
litellm.set_verbose = False
|
litellm.set_verbose = True
|
||||||
messages = [
|
messages = [
|
||||||
{"role": "system", "content": "You are a helpful assistant."},
|
{"role": "system", "content": "You are a helpful assistant."},
|
||||||
{
|
{
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue