(test) add ollama testing

This commit is contained in:
ishaan-jaff 2023-10-11 17:00:33 -07:00
parent fabad3dc42
commit a1c9c80031

View file

@ -1,5 +1,5 @@
# # ##### THESE TESTS CAN ONLY RUN LOCALLY WITH THE OLLAMA SERVER RUNNING ######
# # # https://ollama.ai/
# ##### THESE TESTS CAN ONLY RUN LOCALLY WITH THE OLLAMA SERVER RUNNING ######
# # https://ollama.ai/
# import sys, os
# import traceback
@ -44,30 +44,11 @@
# test_completion_ollama_with_api_base()
# def test_completion_ollama_stream():
# user_message = "what is litellm?"
# messages = [{ "content": user_message,"role": "user"}]
# try:
# response = completion(
# model="ollama/llama2",
# messages=messages,
# stream=True
# )
# print(response)
# for chunk in response:
# print(chunk)
# # print(chunk['choices'][0]['delta'])
# except Exception as e:
# pytest.fail(f"Error occurred: {e}")
# test_completion_ollama_stream()
# def test_completion_ollama_custom_prompt_template():
# user_message = "what is litellm?"
# litellm.register_prompt_template(
# model="llama2",
# model="ollama/llama2",
# roles={
# "system": {"pre_message": "System: "},
# "user": {"pre_message": "User: "},
@ -104,15 +85,23 @@
# stream=True
# )
# async for chunk in response:
# print(chunk)
# # print(chunk['choices'][0]['delta'])
# print(chunk['choices'][0]['delta'])
# print("TEST ASYNC NON Stream")
# response = await litellm.acompletion(
# model="ollama/llama2",
# messages=messages,
# api_base="http://localhost:11434",
# )
# print(response)
# except Exception as e:
# pytest.fail(f"Error occurred: {e}")
# # import asyncio
# # asyncio.run(test_completion_ollama_async_stream())
# import asyncio
# asyncio.run(test_completion_ollama_async_stream())
# def prepare_messages_for_chat(text: str) -> list:
# messages = [
@ -143,6 +132,30 @@
# )
# print("response", response)
# def test_completion_expect_error():
# # this tests if we can exception map correctly for ollama
# print("making ollama request")
# # litellm.set_verbose=True
# user_message = "what is litellm?"
# messages = [{ "content": user_message,"role": "user"}]
# try:
# response = completion(
# model="ollama/invalid",
# messages=messages,
# stream=True
# )
# print(response)
# for chunk in response:
# print(chunk)
# # print(chunk['choices'][0]['delta'])
# except Exception as e:
# pass
# pytest.fail(f"Error occurred: {e}")
# test_completion_expect_error()
# if __name__ == "__main__":
# import asyncio
# asyncio.run(main())