mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-14 09:52:47 +00:00
updated sync logic and fixed tests
Signed-off-by: Francisco Javier Arceo <farceo@redhat.com>
This commit is contained in:
parent
2ba7b186f1
commit
b378282cc0
2 changed files with 12 additions and 10 deletions
|
|
@ -234,6 +234,7 @@ class OpenAIResponsesImpl:
|
||||||
"Mutually exclusive parameters: 'previous_response_id' and 'conversation'. Ensure you are only providing one of these parameters."
|
"Mutually exclusive parameters: 'previous_response_id' and 'conversation'. Ensure you are only providing one of these parameters."
|
||||||
)
|
)
|
||||||
|
|
||||||
|
original_input = input # needed for syncing to Conversations
|
||||||
if conversation is not None:
|
if conversation is not None:
|
||||||
if not conversation.startswith("conv_"):
|
if not conversation.startswith("conv_"):
|
||||||
raise InvalidConversationIdError(conversation)
|
raise InvalidConversationIdError(conversation)
|
||||||
|
|
@ -243,6 +244,7 @@ class OpenAIResponsesImpl:
|
||||||
|
|
||||||
stream_gen = self._create_streaming_response(
|
stream_gen = self._create_streaming_response(
|
||||||
input=input,
|
input=input,
|
||||||
|
original_input=original_input,
|
||||||
model=model,
|
model=model,
|
||||||
instructions=instructions,
|
instructions=instructions,
|
||||||
previous_response_id=previous_response_id,
|
previous_response_id=previous_response_id,
|
||||||
|
|
@ -289,6 +291,7 @@ class OpenAIResponsesImpl:
|
||||||
self,
|
self,
|
||||||
input: str | list[OpenAIResponseInput],
|
input: str | list[OpenAIResponseInput],
|
||||||
model: str,
|
model: str,
|
||||||
|
original_input: str | list[OpenAIResponseInput] | None = None,
|
||||||
instructions: str | None = None,
|
instructions: str | None = None,
|
||||||
previous_response_id: str | None = None,
|
previous_response_id: str | None = None,
|
||||||
conversation: str | None = None,
|
conversation: str | None = None,
|
||||||
|
|
@ -350,7 +353,9 @@ class OpenAIResponsesImpl:
|
||||||
)
|
)
|
||||||
|
|
||||||
if conversation and final_response:
|
if conversation and final_response:
|
||||||
await self._sync_response_to_conversation(conversation, all_input, final_response)
|
# for Conversations, we need to use the original_input if it's available, otherwise use input
|
||||||
|
sync_input = original_input if original_input is not None else input
|
||||||
|
await self._sync_response_to_conversation(conversation, sync_input, final_response)
|
||||||
|
|
||||||
async def delete_openai_response(self, response_id: str) -> OpenAIDeleteResponseObject:
|
async def delete_openai_response(self, response_id: str) -> OpenAIDeleteResponseObject:
|
||||||
return await self.responses_store.delete_response_object(response_id)
|
return await self.responses_store.delete_response_object(response_id)
|
||||||
|
|
@ -431,8 +436,4 @@ class OpenAIResponsesImpl:
|
||||||
if conversation_items:
|
if conversation_items:
|
||||||
adapter = TypeAdapter(list[ConversationItem])
|
adapter = TypeAdapter(list[ConversationItem])
|
||||||
validated_items = adapter.validate_python(conversation_items)
|
validated_items = adapter.validate_python(conversation_items)
|
||||||
try:
|
await self.conversations_api.add_items(conversation_id, validated_items)
|
||||||
await self.conversations_api.add_items(conversation_id, validated_items)
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Failed to sync response {response.id} to conversation {conversation_id}: {e}")
|
|
||||||
# don't fail response creation if conversation sync fails
|
|
||||||
|
|
|
||||||
|
|
@ -223,10 +223,11 @@ class TestMessageSyncing:
|
||||||
id="resp_123", created_at=1234567890, model="test-model", object="response", output=[], status="completed"
|
id="resp_123", created_at=1234567890, model="test-model", object="response", output=[], status="completed"
|
||||||
)
|
)
|
||||||
|
|
||||||
result = await responses_impl_with_conversations._sync_response_to_conversation(
|
# matching the behavior of OpenAI here
|
||||||
"conv_test123", "Hello", mock_response
|
with pytest.raises(Exception, match="API Error"):
|
||||||
)
|
await responses_impl_with_conversations._sync_response_to_conversation(
|
||||||
assert result is None
|
"conv_test123", "Hello", mock_response
|
||||||
|
)
|
||||||
|
|
||||||
async def test_sync_unsupported_types(self, responses_impl_with_conversations):
|
async def test_sync_unsupported_types(self, responses_impl_with_conversations):
|
||||||
mock_response = OpenAIResponseObject(
|
mock_response = OpenAIResponseObject(
|
||||||
Loading…
Add table
Add a link
Reference in a new issue