mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-31 16:01:46 +00:00
fixes, update test to be more robust
This commit is contained in:
parent
70d3e4bd67
commit
cad646478f
2 changed files with 20 additions and 29 deletions
|
@ -263,12 +263,9 @@ class OpenAIResponsesImpl:
|
||||||
chat_response: OpenAIChatCompletion,
|
chat_response: OpenAIChatCompletion,
|
||||||
ctx: ChatCompletionContext,
|
ctx: ChatCompletionContext,
|
||||||
tools: list[OpenAIResponseInputTool] | None,
|
tools: list[OpenAIResponseInputTool] | None,
|
||||||
output_messages: list[OpenAIResponseOutput],
|
|
||||||
) -> list[OpenAIResponseOutput]:
|
) -> list[OpenAIResponseOutput]:
|
||||||
"""
|
"""Handle tool execution and response message creation."""
|
||||||
Handle tool execution and response message creation.
|
output_messages: list[OpenAIResponseOutput] = []
|
||||||
Returns: updated output_messages list
|
|
||||||
"""
|
|
||||||
# Execute tool calls if any
|
# Execute tool calls if any
|
||||||
for choice in chat_response.choices:
|
for choice in chat_response.choices:
|
||||||
if choice.message.tool_calls and tools:
|
if choice.message.tool_calls and tools:
|
||||||
|
@ -362,6 +359,8 @@ class OpenAIResponsesImpl:
|
||||||
temperature=temperature,
|
temperature=temperature,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
print(f"chat_tools: {chat_tools}")
|
||||||
|
print(f"messages: {messages}")
|
||||||
inference_result = await self.inference_api.openai_chat_completion(
|
inference_result = await self.inference_api.openai_chat_completion(
|
||||||
model=model,
|
model=model,
|
||||||
messages=messages,
|
messages=messages,
|
||||||
|
@ -404,11 +403,12 @@ class OpenAIResponsesImpl:
|
||||||
chat_response = OpenAIChatCompletion(**inference_result.model_dump())
|
chat_response = OpenAIChatCompletion(**inference_result.model_dump())
|
||||||
|
|
||||||
# Process response choices (tool execution and message creation)
|
# Process response choices (tool execution and message creation)
|
||||||
output_messages = await self._process_response_choices(
|
output_messages.extend(
|
||||||
chat_response=chat_response,
|
await self._process_response_choices(
|
||||||
ctx=ctx,
|
chat_response=chat_response,
|
||||||
tools=tools,
|
ctx=ctx,
|
||||||
output_messages=output_messages,
|
tools=tools,
|
||||||
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
response = OpenAIResponseObject(
|
response = OpenAIResponseObject(
|
||||||
|
@ -525,11 +525,12 @@ class OpenAIResponsesImpl:
|
||||||
)
|
)
|
||||||
|
|
||||||
# Process response choices (tool execution and message creation)
|
# Process response choices (tool execution and message creation)
|
||||||
output_messages = await self._process_response_choices(
|
output_messages.extend(
|
||||||
chat_response=chat_response_obj,
|
await self._process_response_choices(
|
||||||
ctx=ctx,
|
chat_response=chat_response_obj,
|
||||||
tools=tools,
|
ctx=ctx,
|
||||||
output_messages=output_messages,
|
tools=tools,
|
||||||
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
# Create final response
|
# Create final response
|
||||||
|
@ -589,15 +590,6 @@ class OpenAIResponsesImpl:
|
||||||
chat_tools.append(ChatCompletionToolParam(type="function", function=input_tool.model_dump()))
|
chat_tools.append(ChatCompletionToolParam(type="function", function=input_tool.model_dump()))
|
||||||
elif input_tool.type == "web_search":
|
elif input_tool.type == "web_search":
|
||||||
tool_name = "web_search"
|
tool_name = "web_search"
|
||||||
|
|
||||||
# we need to list all the toolgroups so tools can be found. avoid MCPs because they
|
|
||||||
# may need authentication.
|
|
||||||
groups = await self.tool_groups_api.list_tool_groups()
|
|
||||||
for group in groups.data:
|
|
||||||
if group.mcp_endpoint:
|
|
||||||
continue
|
|
||||||
_ = await self.tool_groups_api.list_tools(group.identifier)
|
|
||||||
|
|
||||||
tool = await self.tool_groups_api.get_tool(tool_name)
|
tool = await self.tool_groups_api.get_tool(tool_name)
|
||||||
if not tool:
|
if not tool:
|
||||||
raise ValueError(f"Tool {tool_name} not found")
|
raise ValueError(f"Tool {tool_name} not found")
|
||||||
|
|
|
@ -77,11 +77,12 @@ test_response_image:
|
||||||
image_url: "https://upload.wikimedia.org/wikipedia/commons/f/f7/Llamas%2C_Vernagt-Stausee%2C_Italy.jpg"
|
image_url: "https://upload.wikimedia.org/wikipedia/commons/f/f7/Llamas%2C_Vernagt-Stausee%2C_Italy.jpg"
|
||||||
output: "llama"
|
output: "llama"
|
||||||
|
|
||||||
|
# the models are really poor at tool calling after seeing images :/
|
||||||
test_response_multi_turn_image:
|
test_response_multi_turn_image:
|
||||||
test_name: test_response_multi_turn_image
|
test_name: test_response_multi_turn_image
|
||||||
test_params:
|
test_params:
|
||||||
case:
|
case:
|
||||||
- case_id: "llama_image_search"
|
- case_id: "llama_image_understanding"
|
||||||
turns:
|
turns:
|
||||||
- input:
|
- input:
|
||||||
- role: user
|
- role: user
|
||||||
|
@ -91,7 +92,5 @@ test_response_multi_turn_image:
|
||||||
- type: input_image
|
- type: input_image
|
||||||
image_url: "https://upload.wikimedia.org/wikipedia/commons/f/f7/Llamas%2C_Vernagt-Stausee%2C_Italy.jpg"
|
image_url: "https://upload.wikimedia.org/wikipedia/commons/f/f7/Llamas%2C_Vernagt-Stausee%2C_Italy.jpg"
|
||||||
output: "llama"
|
output: "llama"
|
||||||
- input: "Search the web using the search tool for the animal from the previous response. Your search query should be a single phrase that includes the animal's name and the words 'maverick', 'scout' and 'llm'"
|
- input: "What country do you find this animal primarily in? What continent?"
|
||||||
tools:
|
output: "peru"
|
||||||
- type: web_search
|
|
||||||
output: "model"
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue