mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-03 19:57:35 +00:00
fix: don't pass default response format in Responses
# What does this PR do? ## Test Plan
This commit is contained in:
parent
6cce553c93
commit
0cc072dcaf
1 changed files with 4 additions and 1 deletions
|
@ -127,13 +127,16 @@ class StreamingResponseOrchestrator:
|
||||||
messages = self.ctx.messages.copy()
|
messages = self.ctx.messages.copy()
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
|
# Text is the default response format for chat completion so don't need to pass it
|
||||||
|
# (some providers don't support non-empty response_format when tools are present)
|
||||||
|
response_format = None if self.ctx.response_format.type == "text" else self.ctx.response_format
|
||||||
completion_result = await self.inference_api.openai_chat_completion(
|
completion_result = await self.inference_api.openai_chat_completion(
|
||||||
model=self.ctx.model,
|
model=self.ctx.model,
|
||||||
messages=messages,
|
messages=messages,
|
||||||
tools=self.ctx.chat_tools,
|
tools=self.ctx.chat_tools,
|
||||||
stream=True,
|
stream=True,
|
||||||
temperature=self.ctx.temperature,
|
temperature=self.ctx.temperature,
|
||||||
response_format=self.ctx.response_format,
|
response_format=response_format,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Process streaming chunks and build complete response
|
# Process streaming chunks and build complete response
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue