fix context retriever

This commit is contained in:
Xi Yan 2024-09-18 08:13:44 -07:00
parent 055770a791
commit a2fac5d281

View file

@ -63,11 +63,9 @@ async def llm_rag_query_generator(
model = config.model
message = UserMessage(content=content)
response = inference_api.chat_completion(
ChatCompletionRequest(
model=model,
messages=[message],
stream=False,
)
model=model,
messages=[message],
stream=False,
)
async for chunk in response: