mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 19:24:27 +00:00
fix tg ai streaming
This commit is contained in:
parent
79bcb59e0b
commit
9a31de520a
3 changed files with 64 additions and 5 deletions
|
@ -294,4 +294,20 @@ def test_petals():
|
|||
# pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
|
||||
# import asyncio
|
||||
# def test_completion_together_ai_stream():
|
||||
# try:
|
||||
# response = completion(model="togethercomputer/llama-2-70b-chat", messages=messages, custom_llm_provider="together_ai", stream=True, max_tokens=200)
|
||||
# print(response)
|
||||
# asyncio.run(get_response(response))
|
||||
# # print(string_response)
|
||||
# except Exception as e:
|
||||
# pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
|
||||
# async def get_response(generator):
|
||||
# async for elem in generator:
|
||||
# print(elem)
|
||||
# return
|
||||
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue