mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-11 19:56:03 +00:00
refactor: remove ResponseGuardrail cast by fixing implementation signature
The implementation already handles both str and ResponseGuardrailSpec via extract_guardrail_ids(), so update the signature to match the API.
This commit is contained in:
parent
804d9420c9
commit
f4012d7fde
2 changed files with 3 additions and 7 deletions
|
|
@ -349,10 +349,6 @@ class MetaReferenceAgentsImpl(Agents):
|
|||
guardrails: list[ResponseGuardrail] | None = None,
|
||||
) -> OpenAIResponseObject:
|
||||
assert self.openai_responses_impl is not None, "OpenAI responses not initialized"
|
||||
from llama_stack.apis.agents.agents import ResponseGuardrailSpec
|
||||
from typing import cast as typing_cast
|
||||
# Cast guardrails to the more specific type expected by the implementation
|
||||
guardrails_spec = typing_cast(list[ResponseGuardrailSpec] | None, guardrails)
|
||||
result = await self.openai_responses_impl.create_openai_response(
|
||||
input,
|
||||
model,
|
||||
|
|
@ -367,9 +363,9 @@ class MetaReferenceAgentsImpl(Agents):
|
|||
tools,
|
||||
include,
|
||||
max_infer_iters,
|
||||
guardrails_spec,
|
||||
guardrails,
|
||||
)
|
||||
return typing_cast(OpenAIResponseObject, result)
|
||||
return result # type: ignore[no-any-return]
|
||||
|
||||
async def list_openai_responses(
|
||||
self,
|
||||
|
|
|
|||
|
|
@ -254,7 +254,7 @@ class OpenAIResponsesImpl:
|
|||
tools: list[OpenAIResponseInputTool] | None = None,
|
||||
include: list[str] | None = None,
|
||||
max_infer_iters: int | None = 10,
|
||||
guardrails: list[ResponseGuardrailSpec] | None = None,
|
||||
guardrails: list[str | ResponseGuardrailSpec] | None = None,
|
||||
):
|
||||
stream = bool(stream)
|
||||
text = OpenAIResponseText(format=OpenAIResponseTextFormat(type="text")) if text is None else text
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue