forked from phoenix/litellm-mirror
fix(utils.py): fix logging for text completion streaming
This commit is contained in:
parent
b4d624f332
commit
9b46412279
2 changed files with 42 additions and 1 deletions
|
@ -545,6 +545,45 @@ async def test_async_chat_bedrock_stream():
|
|||
|
||||
# asyncio.run(test_async_chat_bedrock_stream())
|
||||
|
||||
# Text Completion
|
||||
|
||||
## Test OpenAI text completion + Async
|
||||
@pytest.mark.asyncio
|
||||
async def test_async_text_completion_openai_stream():
|
||||
try:
|
||||
customHandler = CompletionCustomHandler()
|
||||
litellm.callbacks = [customHandler]
|
||||
response = await litellm.atext_completion(
|
||||
model="gpt-3.5-turbo",
|
||||
prompt="Hi 👋 - i'm async text completion openai",
|
||||
)
|
||||
# test streaming
|
||||
response = await litellm.atext_completion(
|
||||
model="gpt-3.5-turbo",
|
||||
prompt="Hi 👋 - i'm async text completion openai",
|
||||
stream=True,
|
||||
)
|
||||
async for chunk in response:
|
||||
print(f"chunk: {chunk}")
|
||||
continue
|
||||
## test failure callback
|
||||
try:
|
||||
response = await litellm.atext_completion(
|
||||
model="gpt-3.5-turbo",
|
||||
prompt="Hi 👋 - i'm async text completion openai",
|
||||
stream=True,
|
||||
api_key="my-bad-key",
|
||||
)
|
||||
async for chunk in response:
|
||||
continue
|
||||
except:
|
||||
pass
|
||||
time.sleep(1)
|
||||
print(f"customHandler.errors: {customHandler.errors}")
|
||||
assert len(customHandler.errors) == 0
|
||||
litellm.callbacks = []
|
||||
except Exception as e:
|
||||
pytest.fail(f"An exception occurred: {str(e)}")
|
||||
|
||||
# EMBEDDING
|
||||
## Test OpenAI + Async
|
||||
|
|
|
@ -710,7 +710,7 @@ class CallTypes(Enum):
|
|||
aimage_generation = "aimage_generation"
|
||||
|
||||
|
||||
# Logging function -> log the exact model details + what's being sent | Non-Blocking
|
||||
# Logging function -> log the exact model details + what's being sent | Non-BlockingP
|
||||
class Logging:
|
||||
global supabaseClient, liteDebuggerClient, promptLayerLogger, weightsBiasesLogger, langsmithLogger, capture_exception, add_breadcrumb, llmonitorLogger
|
||||
|
||||
|
@ -729,6 +729,8 @@ class Logging:
|
|||
raise ValueError(
|
||||
f"Invalid call_type {call_type}. Allowed values: {allowed_values}"
|
||||
)
|
||||
if messages is not None and isinstance(messages, str):
|
||||
messages = [{"role": "user", "content": messages}] # convert text completion input to the chat completion format
|
||||
self.model = model
|
||||
self.messages = messages
|
||||
self.stream = stream
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue