fix context retriever (#75)

This commit is contained in:
Xi Yan 2024-09-18 08:24:36 -07:00
parent 18b3dbcacc
commit 251f6b7ddd

View file

@ -63,11 +63,9 @@ async def llm_rag_query_generator(
model = config.model model = config.model
message = UserMessage(content=content) message = UserMessage(content=content)
response = inference_api.chat_completion( response = inference_api.chat_completion(
ChatCompletionRequest( model=model,
model=model, messages=[message],
messages=[message], stream=False,
stream=False,
)
) )
async for chunk in response: async for chunk in response: