mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 19:24:27 +00:00
test: add more logging for failing test
This commit is contained in:
parent
84c7a5b693
commit
d07c813ef9
2 changed files with 8 additions and 2 deletions
|
@ -180,11 +180,12 @@ def completion(
|
||||||
"headers": headers,
|
"headers": headers,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
print_verbose(f"_is_function_call: {_is_function_call}")
|
||||||
## COMPLETION CALL
|
## COMPLETION CALL
|
||||||
if (
|
if (
|
||||||
stream is not None and stream == True and _is_function_call == False
|
stream is not None and stream == True and _is_function_call == False
|
||||||
): # if function call - fake the streaming (need complete blocks for output parsing in openai format)
|
): # if function call - fake the streaming (need complete blocks for output parsing in openai format)
|
||||||
|
print_verbose(f"makes anthropic streaming POST request")
|
||||||
data["stream"] = stream
|
data["stream"] = stream
|
||||||
response = requests.post(
|
response = requests.post(
|
||||||
api_base,
|
api_base,
|
||||||
|
@ -289,6 +290,9 @@ def completion(
|
||||||
completion_stream = model_response_iterator(
|
completion_stream = model_response_iterator(
|
||||||
model_response=streaming_model_response
|
model_response=streaming_model_response
|
||||||
)
|
)
|
||||||
|
print_verbose(
|
||||||
|
f"Returns anthropic CustomStreamWrapper with 'cached_response' streaming object"
|
||||||
|
)
|
||||||
return CustomStreamWrapper(
|
return CustomStreamWrapper(
|
||||||
completion_stream=completion_stream,
|
completion_stream=completion_stream,
|
||||||
model=model,
|
model=model,
|
||||||
|
|
|
@ -9373,7 +9373,9 @@ class CustomStreamWrapper:
|
||||||
else:
|
else:
|
||||||
chunk = next(self.completion_stream)
|
chunk = next(self.completion_stream)
|
||||||
if chunk is not None and chunk != b"":
|
if chunk is not None and chunk != b"":
|
||||||
print_verbose(f"PROCESSED CHUNK PRE CHUNK CREATOR: {chunk}")
|
print_verbose(
|
||||||
|
f"PROCESSED CHUNK PRE CHUNK CREATOR: {chunk}; custom_llm_provider: {self.custom_llm_provider}"
|
||||||
|
)
|
||||||
response: Optional[ModelResponse] = self.chunk_creator(chunk=chunk)
|
response: Optional[ModelResponse] = self.chunk_creator(chunk=chunk)
|
||||||
print_verbose(f"PROCESSED CHUNK POST CHUNK CREATOR: {response}")
|
print_verbose(f"PROCESSED CHUNK POST CHUNK CREATOR: {response}")
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue