fix ollama tests comments

This commit is contained in:
ishaan-jaff 2023-10-05 11:20:34 -07:00
parent 29509a48f8
commit 7edaff6198

View file

@ -44,24 +44,24 @@
# test_completion_ollama_with_api_base() # test_completion_ollama_with_api_base()
# # def test_completion_ollama_stream(): # def test_completion_ollama_stream():
# # user_message = "what is litellm?" # user_message = "what is litellm?"
# # messages = [{ "content": user_message,"role": "user"}] # messages = [{ "content": user_message,"role": "user"}]
# # try: # try:
# # response = completion( # response = completion(
# # model="ollama/llama2", # model="ollama/llama2",
# # messages=messages, # messages=messages,
# # stream=True # stream=True
# # ) # )
# # print(response) # print(response)
# # for chunk in response: # for chunk in response:
# # print(chunk) # print(chunk)
# # # print(chunk['choices'][0]['delta']) # # print(chunk['choices'][0]['delta'])
# # except Exception as e: # except Exception as e:
# # pytest.fail(f"Error occurred: {e}") # pytest.fail(f"Error occurred: {e}")
# # test_completion_ollama_stream() # test_completion_ollama_stream()
# def test_completion_ollama_custom_prompt_template(): # def test_completion_ollama_custom_prompt_template():
@ -93,49 +93,49 @@
# test_completion_ollama_custom_prompt_template() # test_completion_ollama_custom_prompt_template()
# # async def test_completion_ollama_async_stream(): # async def test_completion_ollama_async_stream():
# # user_message = "what is the weather" # user_message = "what is the weather"
# # messages = [{ "content": user_message,"role": "user"}] # messages = [{ "content": user_message,"role": "user"}]
# # try: # try:
# # response = await litellm.acompletion( # response = await litellm.acompletion(
# # model="ollama/llama2", # model="ollama/llama2",
# # messages=messages, # messages=messages,
# # api_base="http://localhost:11434", # api_base="http://localhost:11434",
# # stream=True # stream=True
# # ) # )
# # async for chunk in response: # async for chunk in response:
# # print(chunk) # print(chunk)
# # # print(chunk['choices'][0]['delta']) # # print(chunk['choices'][0]['delta'])
# # except Exception as e: # except Exception as e:
# # pytest.fail(f"Error occurred: {e}") # pytest.fail(f"Error occurred: {e}")
# # # import asyncio # # import asyncio
# # # asyncio.run(test_completion_ollama_async_stream()) # # asyncio.run(test_completion_ollama_async_stream())
# # def prepare_messages_for_chat(text: str) -> list: # def prepare_messages_for_chat(text: str) -> list:
# # messages = [ # messages = [
# # {"role": "user", "content": text}, # {"role": "user", "content": text},
# # ] # ]
# # return messages # return messages
# # async def ask_question(): # async def ask_question():
# # params = { # params = {
# # "messages": prepare_messages_for_chat("What is litellm? tell me 10 things about it who is sihaan.write an essay"), # "messages": prepare_messages_for_chat("What is litellm? tell me 10 things about it who is sihaan.write an essay"),
# # "api_base": "http://localhost:11434", # "api_base": "http://localhost:11434",
# # "model": "ollama/llama2", # "model": "ollama/llama2",
# # "stream": True, # "stream": True,
# # } # }
# # response = await litellm.acompletion(**params) # response = await litellm.acompletion(**params)
# # return response # return response
# # async def main(): # async def main():
# # response = await ask_question() # response = await ask_question()
# # async for chunk in response: # async for chunk in response:
# # print(chunk) # print(chunk)
# # if __name__ == "__main__": # if __name__ == "__main__":
# # import asyncio # import asyncio
# # asyncio.run(main()) # asyncio.run(main())