forked from phoenix-oss/llama-stack-mirror
Remove request arg from chat completion response processing (#240)
Signed-off-by: Yuan Tang <terrytangyuan@gmail.com>
This commit is contained in:
parent
209cd3d35e
commit
80ada04f76
7 changed files with 14 additions and 18 deletions
|
@ -50,9 +50,7 @@ def text_from_choice(choice) -> str:
|
|||
|
||||
|
||||
def process_chat_completion_response(
|
||||
request: ChatCompletionRequest,
|
||||
response: OpenAICompatCompletionResponse,
|
||||
formatter: ChatFormat,
|
||||
response: OpenAICompatCompletionResponse, formatter: ChatFormat
|
||||
) -> ChatCompletionResponse:
|
||||
choice = response.choices[0]
|
||||
|
||||
|
@ -78,9 +76,7 @@ def process_chat_completion_response(
|
|||
|
||||
|
||||
async def process_chat_completion_stream_response(
|
||||
request: ChatCompletionRequest,
|
||||
stream: AsyncGenerator[OpenAICompatCompletionResponse, None],
|
||||
formatter: ChatFormat,
|
||||
stream: AsyncGenerator[OpenAICompatCompletionResponse, None], formatter: ChatFormat
|
||||
) -> AsyncGenerator:
|
||||
yield ChatCompletionResponseStreamChunk(
|
||||
event=ChatCompletionResponseEvent(
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue