diff --git a/litellm/tests/test_ollama_local.py b/litellm/tests/test_ollama_local.py index 9692ab844..3962287de 100644 --- a/litellm/tests/test_ollama_local.py +++ b/litellm/tests/test_ollama_local.py @@ -44,24 +44,24 @@ # test_completion_ollama_with_api_base() -# # def test_completion_ollama_stream(): -# # user_message = "what is litellm?" -# # messages = [{ "content": user_message,"role": "user"}] -# # try: -# # response = completion( -# # model="ollama/llama2", -# # messages=messages, -# # stream=True -# # ) -# # print(response) -# # for chunk in response: -# # print(chunk) -# # # print(chunk['choices'][0]['delta']) +# def test_completion_ollama_stream(): +# user_message = "what is litellm?" +# messages = [{ "content": user_message,"role": "user"}] +# try: +# response = completion( +# model="ollama/llama2", +# messages=messages, +# stream=True +# ) +# print(response) +# for chunk in response: +# print(chunk) +# # print(chunk['choices'][0]['delta']) -# # except Exception as e: -# # pytest.fail(f"Error occurred: {e}") +# except Exception as e: +# pytest.fail(f"Error occurred: {e}") -# # test_completion_ollama_stream() +# test_completion_ollama_stream() # def test_completion_ollama_custom_prompt_template(): @@ -93,49 +93,49 @@ # test_completion_ollama_custom_prompt_template() -# # async def test_completion_ollama_async_stream(): -# # user_message = "what is the weather" -# # messages = [{ "content": user_message,"role": "user"}] -# # try: -# # response = await litellm.acompletion( -# # model="ollama/llama2", -# # messages=messages, -# # api_base="http://localhost:11434", -# # stream=True -# # ) -# # async for chunk in response: -# # print(chunk) +# async def test_completion_ollama_async_stream(): +# user_message = "what is the weather" +# messages = [{ "content": user_message,"role": "user"}] +# try: +# response = await litellm.acompletion( +# model="ollama/llama2", +# messages=messages, +# api_base="http://localhost:11434", +# stream=True +# ) +# async for chunk in response: +# print(chunk) -# # # print(chunk['choices'][0]['delta']) +# # print(chunk['choices'][0]['delta']) -# # except Exception as e: -# # pytest.fail(f"Error occurred: {e}") +# except Exception as e: +# pytest.fail(f"Error occurred: {e}") -# # # import asyncio -# # # asyncio.run(test_completion_ollama_async_stream()) +# # import asyncio +# # asyncio.run(test_completion_ollama_async_stream()) -# # def prepare_messages_for_chat(text: str) -> list: -# # messages = [ -# # {"role": "user", "content": text}, -# # ] -# # return messages +# def prepare_messages_for_chat(text: str) -> list: +# messages = [ +# {"role": "user", "content": text}, +# ] +# return messages -# # async def ask_question(): -# # params = { -# # "messages": prepare_messages_for_chat("What is litellm? tell me 10 things about it who is sihaan.write an essay"), -# # "api_base": "http://localhost:11434", -# # "model": "ollama/llama2", -# # "stream": True, -# # } -# # response = await litellm.acompletion(**params) -# # return response +# async def ask_question(): +# params = { +# "messages": prepare_messages_for_chat("What is litellm? tell me 10 things about it who is sihaan.write an essay"), +# "api_base": "http://localhost:11434", +# "model": "ollama/llama2", +# "stream": True, +# } +# response = await litellm.acompletion(**params) +# return response -# # async def main(): -# # response = await ask_question() -# # async for chunk in response: -# # print(chunk) +# async def main(): +# response = await ask_question() +# async for chunk in response: +# print(chunk) -# # if __name__ == "__main__": -# # import asyncio -# # asyncio.run(main()) +# if __name__ == "__main__": +# import asyncio +# asyncio.run(main())