complete transform litellm to responses api

This commit is contained in:
Ishaan Jaff 2025-04-17 21:44:45 -07:00
parent f6080fcc0b
commit 4e2b04a1e0
3 changed files with 83 additions and 3 deletions

View file

@ -9,11 +9,13 @@ from litellm.types.llms.openai import (
ChatCompletionSystemMessage,
ChatCompletionUserMessage,
GenericChatCompletionMessage,
ResponseAPIUsage,
ResponseInputParam,
ResponsesAPIOptionalRequestParams,
ResponsesAPIResponse,
)
from litellm.types.utils import ModelResponse
from litellm.types.responses.main import GenericResponseOutputItem, OutputText
from litellm.types.utils import Choices, Message, ModelResponse, Usage
class LiteLLMCompletionResponsesConfig:
@ -147,7 +149,10 @@ class LiteLLMCompletionResponsesConfig:
),
instructions=getattr(chat_completion_response, "instructions", None),
metadata=getattr(chat_completion_response, "metadata", None),
output=getattr(chat_completion_response, "output", []),
output=LiteLLMCompletionResponsesConfig._transform_chat_completion_choices_to_responses_output(
chat_completion_response=chat_completion_response,
choices=getattr(chat_completion_response, "choices", []),
),
parallel_tool_calls=getattr(
chat_completion_response, "parallel_tool_calls", False
),
@ -168,3 +173,43 @@ class LiteLLMCompletionResponsesConfig:
usage=getattr(chat_completion_response, "usage", None),
user=getattr(chat_completion_response, "user", None),
)
@staticmethod
def _transform_chat_completion_choices_to_responses_output(
chat_completion_response: ModelResponse,
choices: List[Choices],
) -> List[GenericResponseOutputItem]:
responses_output: List[GenericResponseOutputItem] = []
for choice in choices:
responses_output.append(
GenericResponseOutputItem(
id=chat_completion_response.id,
status=choice.finish_reason,
role=choice.message.role,
content=[
LiteLLMCompletionResponsesConfig._transform_chat_message_to_response_output_text(
choice.message
)
],
)
)
return responses_output
@staticmethod
def _transform_chat_message_to_response_output_text(
message: Message,
) -> OutputText:
return OutputText(
type="text",
text=message.content,
)
@staticmethod
def _transform_chat_completion_usage_to_responses_usage(
usage: Usage,
) -> ResponseAPIUsage:
return ResponseAPIUsage(
input_tokens=usage.prompt_tokens,
output_tokens=usage.completion_tokens,
total_tokens=usage.total_tokens,
)

View file

@ -52,6 +52,8 @@ from openai.types.responses.response_create_params import (
from pydantic import BaseModel, Discriminator, Field, PrivateAttr
from typing_extensions import Annotated, Dict, Required, TypedDict, override
from litellm.types.responses.main import GenericResponseOutputItem
FileContent = Union[IO[bytes], bytes, PathLike]
FileTypes = Union[
@ -963,7 +965,7 @@ class ResponsesAPIResponse(BaseLiteLLMOpenAIResponseObject):
metadata: Optional[Dict]
model: Optional[str]
object: Optional[str]
output: List[ResponseOutputItem]
output: Union[List[ResponseOutputItem], List[GenericResponseOutputItem]]
parallel_tool_calls: bool
temperature: Optional[float]
tool_choice: ToolChoice

View file

@ -0,0 +1,33 @@
from typing_extensions import Any, List, Optional, TypedDict
class GenericResponseOutputItemContentAnnotation(TypedDict, total=False):
"""Annotation for content in a message"""
type: Optional[str]
start_index: Optional[int]
end_index: Optional[int]
url: Optional[str]
title: Optional[str]
pass
class OutputText(TypedDict, total=False):
"""Text output content from an assistant message"""
type: Optional[str] # "output_text"
text: Optional[str]
annotations: Optional[List[GenericResponseOutputItemContentAnnotation]]
class GenericResponseOutputItem(TypedDict, total=False):
"""
Generic response API output item
"""
type: str # "message"
id: str
status: str # "completed", "in_progress", etc.
role: str # "assistant", "user", etc.
content: List[OutputText]