working with ollama & local model

This commit is contained in:
Xi Yan 2024-09-19 08:52:33 -07:00
parent 9bdd4e3dd9
commit 46bf0192a1

View file

@ -90,7 +90,7 @@ async def run_main(host: str, port: int, stream: bool):
cprint(f"User>{message.content}", "green")
iterator = client.chat_completion(
ChatCompletionRequest(
model="ollama-1",
model="Meta-Llama3.1-8B-Instruct",
messages=[message],
stream=stream,
)