forked from phoenix/litellm-mirror
fix ollama tests comments
This commit is contained in:
parent
29509a48f8
commit
7edaff6198
1 changed files with 54 additions and 54 deletions
|
@ -44,24 +44,24 @@
|
|||
|
||||
# test_completion_ollama_with_api_base()
|
||||
|
||||
# # def test_completion_ollama_stream():
|
||||
# # user_message = "what is litellm?"
|
||||
# # messages = [{ "content": user_message,"role": "user"}]
|
||||
# # try:
|
||||
# # response = completion(
|
||||
# # model="ollama/llama2",
|
||||
# # messages=messages,
|
||||
# # stream=True
|
||||
# # )
|
||||
# # print(response)
|
||||
# # for chunk in response:
|
||||
# # print(chunk)
|
||||
# # # print(chunk['choices'][0]['delta'])
|
||||
# def test_completion_ollama_stream():
|
||||
# user_message = "what is litellm?"
|
||||
# messages = [{ "content": user_message,"role": "user"}]
|
||||
# try:
|
||||
# response = completion(
|
||||
# model="ollama/llama2",
|
||||
# messages=messages,
|
||||
# stream=True
|
||||
# )
|
||||
# print(response)
|
||||
# for chunk in response:
|
||||
# print(chunk)
|
||||
# # print(chunk['choices'][0]['delta'])
|
||||
|
||||
# # except Exception as e:
|
||||
# # pytest.fail(f"Error occurred: {e}")
|
||||
# except Exception as e:
|
||||
# pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
# # test_completion_ollama_stream()
|
||||
# test_completion_ollama_stream()
|
||||
|
||||
|
||||
# def test_completion_ollama_custom_prompt_template():
|
||||
|
@ -93,49 +93,49 @@
|
|||
|
||||
# test_completion_ollama_custom_prompt_template()
|
||||
|
||||
# # async def test_completion_ollama_async_stream():
|
||||
# # user_message = "what is the weather"
|
||||
# # messages = [{ "content": user_message,"role": "user"}]
|
||||
# # try:
|
||||
# # response = await litellm.acompletion(
|
||||
# # model="ollama/llama2",
|
||||
# # messages=messages,
|
||||
# # api_base="http://localhost:11434",
|
||||
# # stream=True
|
||||
# # )
|
||||
# # async for chunk in response:
|
||||
# # print(chunk)
|
||||
# async def test_completion_ollama_async_stream():
|
||||
# user_message = "what is the weather"
|
||||
# messages = [{ "content": user_message,"role": "user"}]
|
||||
# try:
|
||||
# response = await litellm.acompletion(
|
||||
# model="ollama/llama2",
|
||||
# messages=messages,
|
||||
# api_base="http://localhost:11434",
|
||||
# stream=True
|
||||
# )
|
||||
# async for chunk in response:
|
||||
# print(chunk)
|
||||
|
||||
# # # print(chunk['choices'][0]['delta'])
|
||||
# # print(chunk['choices'][0]['delta'])
|
||||
|
||||
# # except Exception as e:
|
||||
# # pytest.fail(f"Error occurred: {e}")
|
||||
# except Exception as e:
|
||||
# pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
# # # import asyncio
|
||||
# # # asyncio.run(test_completion_ollama_async_stream())
|
||||
|
||||
# # def prepare_messages_for_chat(text: str) -> list:
|
||||
# # messages = [
|
||||
# # {"role": "user", "content": text},
|
||||
# # ]
|
||||
# # return messages
|
||||
|
||||
|
||||
# # async def ask_question():
|
||||
# # params = {
|
||||
# # "messages": prepare_messages_for_chat("What is litellm? tell me 10 things about it who is sihaan.write an essay"),
|
||||
# # "api_base": "http://localhost:11434",
|
||||
# # "model": "ollama/llama2",
|
||||
# # "stream": True,
|
||||
# # }
|
||||
# # response = await litellm.acompletion(**params)
|
||||
# # return response
|
||||
|
||||
# # async def main():
|
||||
# # response = await ask_question()
|
||||
# # async for chunk in response:
|
||||
# # print(chunk)
|
||||
|
||||
# # if __name__ == "__main__":
|
||||
# # import asyncio
|
||||
# # asyncio.run(main())
|
||||
# # asyncio.run(test_completion_ollama_async_stream())
|
||||
|
||||
# def prepare_messages_for_chat(text: str) -> list:
|
||||
# messages = [
|
||||
# {"role": "user", "content": text},
|
||||
# ]
|
||||
# return messages
|
||||
|
||||
|
||||
# async def ask_question():
|
||||
# params = {
|
||||
# "messages": prepare_messages_for_chat("What is litellm? tell me 10 things about it who is sihaan.write an essay"),
|
||||
# "api_base": "http://localhost:11434",
|
||||
# "model": "ollama/llama2",
|
||||
# "stream": True,
|
||||
# }
|
||||
# response = await litellm.acompletion(**params)
|
||||
# return response
|
||||
|
||||
# async def main():
|
||||
# response = await ask_question()
|
||||
# async for chunk in response:
|
||||
# print(chunk)
|
||||
|
||||
# if __name__ == "__main__":
|
||||
# import asyncio
|
||||
# asyncio.run(main())
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue