(feat) add Predicted Outputs for OpenAI (#6594)

* bump openai to openai==1.54.0

* add 'prediction' param

* testing fix bedrock deprecated cohere.command-text-v14

* test test_openai_prediction_param.py

* test_openai_prediction_param_with_caching

* doc Predicted Outputs

* doc Predicted Output
This commit is contained in:
Ishaan Jaff 2024-11-05 10:46:57 +05:30 committed by GitHub
parent 57b1bb5e06
commit c047d51cc8
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
12 changed files with 362 additions and 13 deletions

View file

@ -47,7 +47,7 @@ jobs:
pip install opentelemetry-api==1.25.0
pip install opentelemetry-sdk==1.25.0
pip install opentelemetry-exporter-otlp==1.25.0
pip install openai==1.52.0
pip install openai==1.54.0
pip install prisma==0.11.0
pip install "detect_secrets==1.5.0"
pip install "httpx==0.24.1"
@ -520,7 +520,7 @@ jobs:
pip install "aiodynamo==23.10.1"
pip install "asyncio==3.4.3"
pip install "PyGithub==1.59.1"
pip install "openai==1.52.0"
pip install "openai==1.54.0 "
# Run pytest and generate JUnit XML report
- run:
name: Build Docker image
@ -637,7 +637,7 @@ jobs:
pip install "aiodynamo==23.10.1"
pip install "asyncio==3.4.3"
pip install "PyGithub==1.59.1"
pip install "openai==1.52.0"
pip install "openai==1.54.0 "
- run:
name: Build Docker image
command: docker build -t my-app:latest -f ./docker/Dockerfile.database .
@ -729,7 +729,7 @@ jobs:
pip install "pytest-asyncio==0.21.1"
pip install "google-cloud-aiplatform==1.43.0"
pip install aiohttp
pip install "openai==1.52.0"
pip install "openai==1.54.0 "
python -m pip install --upgrade pip
pip install "pydantic==2.7.1"
pip install "pytest==7.3.1"
@ -924,7 +924,7 @@ jobs:
pip install "pytest-retry==1.6.3"
pip install "pytest-asyncio==0.21.1"
pip install aiohttp
pip install "openai==1.52.0"
pip install "openai==1.54.0 "
python -m pip install --upgrade pip
pip install "pydantic==2.7.1"
pip install "pytest==7.3.1"

View file

@ -1,5 +1,5 @@
# used by CI/CD testing
openai==1.52.0
openai==1.54.0
python-dotenv
tiktoken
importlib_metadata

View file

@ -0,0 +1,109 @@
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
# Predicted Outputs
| Property | Details |
|-------|-------|
| Description | Use this when most of the output of the LLM is known ahead of time. For instance, if you are asking the model to rewrite some text or code with only minor changes, you can reduce your latency significantly by using Predicted Outputs, passing in the existing content as your prediction. |
| Supported providers | `openai` |
| Link to OpenAI doc on Predicted Outputs | [Predicted Outputs ↗](https://platform.openai.com/docs/guides/latency-optimization#use-predicted-outputs) |
| Supported from LiteLLM Version | `v1.51.4` |
## Using Predicted Outputs
<Tabs>
<TabItem label="LiteLLM Python SDK" value="Python">
In this example we want to refactor a piece of C# code, and convert the Username property to Email instead:
```python
import litellm
os.environ["OPENAI_API_KEY"] = "your-api-key"
code = """
/// <summary>
/// Represents a user with a first name, last name, and username.
/// </summary>
public class User
{
/// <summary>
/// Gets or sets the user's first name.
/// </summary>
public string FirstName { get; set; }
/// <summary>
/// Gets or sets the user's last name.
/// </summary>
public string LastName { get; set; }
/// <summary>
/// Gets or sets the user's username.
/// </summary>
public string Username { get; set; }
}
"""
completion = litellm.completion(
model="gpt-4o-mini",
messages=[
{
"role": "user",
"content": "Replace the Username property with an Email property. Respond only with code, and with no markdown formatting.",
},
{"role": "user", "content": code},
],
prediction={"type": "content", "content": code},
)
print(completion)
```
</TabItem>
<TabItem label="LiteLLM Proxy Server" value="proxy">
1. Define models on config.yaml
```yaml
model_list:
- model_name: gpt-4o-mini # OpenAI gpt-4o-mini
litellm_params:
model: openai/gpt-4o-mini
api_key: os.environ/OPENAI_API_KEY
```
2. Run proxy server
```bash
litellm --config config.yaml
```
3. Test it using the OpenAI Python SDK
```python
from openai import OpenAI
client = OpenAI(
api_key="LITELLM_PROXY_KEY", # sk-1234
base_url="LITELLM_PROXY_BASE" # http://0.0.0.0:4000
)
completion = client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{
"role": "user",
"content": "Replace the Username property with an Email property. Respond only with code, and with no markdown formatting.",
},
{"role": "user", "content": code},
],
prediction={"type": "content", "content": code},
)
print(completion)
```
</TabItem>
</Tabs>

View file

@ -205,6 +205,7 @@ const sidebars = {
"completion/prompt_caching",
"completion/audio",
"completion/vision",
"completion/predict_outputs",
"completion/prefix",
"completion/drop_params",
"completion/prompt_formatting",

View file

@ -94,6 +94,7 @@ class OpenAIGPTConfig:
"max_tokens",
"max_completion_tokens",
"modalities",
"prediction",
"n",
"presence_penalty",
"seed",

View file

@ -162,6 +162,7 @@ from .types.llms.openai import (
ChatCompletionAssistantMessage,
ChatCompletionAudioParam,
ChatCompletionModality,
ChatCompletionPredictionContentParam,
ChatCompletionUserMessage,
HttpxBinaryResponseContent,
)
@ -304,6 +305,7 @@ async def acompletion(
max_tokens: Optional[int] = None,
max_completion_tokens: Optional[int] = None,
modalities: Optional[List[ChatCompletionModality]] = None,
prediction: Optional[ChatCompletionPredictionContentParam] = None,
audio: Optional[ChatCompletionAudioParam] = None,
presence_penalty: Optional[float] = None,
frequency_penalty: Optional[float] = None,
@ -346,6 +348,7 @@ async def acompletion(
max_tokens (integer, optional): The maximum number of tokens in the generated completion (default is infinity).
max_completion_tokens (integer, optional): An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens.
modalities (List[ChatCompletionModality], optional): Output types that you would like the model to generate for this request. You can use `["text", "audio"]`
prediction (ChatCompletionPredictionContentParam, optional): Configuration for a Predicted Output, which can greatly improve response times when large parts of the model response are known ahead of time. This is most common when you are regenerating a file with only minor changes to most of the content.
audio (ChatCompletionAudioParam, optional): Parameters for audio output. Required when audio output is requested with modalities: ["audio"]
presence_penalty (float, optional): It is used to penalize new tokens based on their existence in the text so far.
frequency_penalty: It is used to penalize new tokens based on their frequency in the text so far.
@ -387,6 +390,7 @@ async def acompletion(
"max_tokens": max_tokens,
"max_completion_tokens": max_completion_tokens,
"modalities": modalities,
"prediction": prediction,
"audio": audio,
"presence_penalty": presence_penalty,
"frequency_penalty": frequency_penalty,
@ -693,6 +697,7 @@ def completion( # type: ignore # noqa: PLR0915
max_completion_tokens: Optional[int] = None,
max_tokens: Optional[int] = None,
modalities: Optional[List[ChatCompletionModality]] = None,
prediction: Optional[ChatCompletionPredictionContentParam] = None,
audio: Optional[ChatCompletionAudioParam] = None,
presence_penalty: Optional[float] = None,
frequency_penalty: Optional[float] = None,
@ -737,6 +742,7 @@ def completion( # type: ignore # noqa: PLR0915
max_tokens (integer, optional): The maximum number of tokens in the generated completion (default is infinity).
max_completion_tokens (integer, optional): An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens.
modalities (List[ChatCompletionModality], optional): Output types that you would like the model to generate for this request.. You can use `["text", "audio"]`
prediction (ChatCompletionPredictionContentParam, optional): Configuration for a Predicted Output, which can greatly improve response times when large parts of the model response are known ahead of time. This is most common when you are regenerating a file with only minor changes to most of the content.
audio (ChatCompletionAudioParam, optional): Parameters for audio output. Required when audio output is requested with modalities: ["audio"]
presence_penalty (float, optional): It is used to penalize new tokens based on their existence in the text so far.
frequency_penalty: It is used to penalize new tokens based on their frequency in the text so far.
@ -843,6 +849,7 @@ def completion( # type: ignore # noqa: PLR0915
"stop",
"max_completion_tokens",
"modalities",
"prediction",
"audio",
"max_tokens",
"presence_penalty",
@ -994,6 +1001,7 @@ def completion( # type: ignore # noqa: PLR0915
max_tokens=max_tokens,
max_completion_tokens=max_completion_tokens,
modalities=modalities,
prediction=prediction,
audio=audio,
presence_penalty=presence_penalty,
frequency_penalty=frequency_penalty,

View file

@ -21,6 +21,9 @@ from openai.types.beta.threads.run import Run
from openai.types.chat import ChatCompletionChunk
from openai.types.chat.chat_completion_audio_param import ChatCompletionAudioParam
from openai.types.chat.chat_completion_modality import ChatCompletionModality
from openai.types.chat.chat_completion_prediction_content_param import (
ChatCompletionPredictionContentParam,
)
from openai.types.embedding import Embedding as OpenAIEmbedding
from pydantic import BaseModel, Field
from typing_extensions import Dict, Required, TypedDict, override

View file

@ -2550,6 +2550,7 @@ def get_optional_params( # noqa: PLR0915
max_tokens=None,
max_completion_tokens=None,
modalities=None,
prediction=None,
audio=None,
presence_penalty=None,
frequency_penalty=None,
@ -2631,6 +2632,7 @@ def get_optional_params( # noqa: PLR0915
"max_tokens": None,
"max_completion_tokens": None,
"modalities": None,
"prediction": None,
"audio": None,
"presence_penalty": None,
"frequency_penalty": None,

10
poetry.lock generated
View file

@ -1823,13 +1823,13 @@ signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"]
[[package]]
name = "openai"
version = "1.52.0"
version = "1.54.0"
description = "The official Python library for the openai API"
optional = false
python-versions = ">=3.7.1"
python-versions = ">=3.8"
files = [
{file = "openai-1.52.0-py3-none-any.whl", hash = "sha256:0c249f20920183b0a2ca4f7dba7b0452df3ecd0fa7985eb1d91ad884bc3ced9c"},
{file = "openai-1.52.0.tar.gz", hash = "sha256:95c65a5f77559641ab8f3e4c3a050804f7b51d278870e2ec1f7444080bfe565a"},
{file = "openai-1.54.0-py3-none-any.whl", hash = "sha256:24ed8874b56e919f0fbb80b7136c3fb022dc82ce9f5f21579b7b280ea4bba249"},
{file = "openai-1.54.0.tar.gz", hash = "sha256:df2a84384314165b706722a7ac8988dc33eba20dd7fc3b939d138110e608b1ce"},
]
[package.dependencies]
@ -3519,4 +3519,4 @@ proxy = ["PyJWT", "apscheduler", "backoff", "cryptography", "fastapi", "fastapi-
[metadata]
lock-version = "2.0"
python-versions = ">=3.8.1,<4.0, !=3.9.7"
content-hash = "491d361cabc637f8f896091b92855040da670bb7b311dcbfe75ad20eab97400c"
content-hash = "64154f16e1bbea8b77ba3eddf1cbf051af39f019820d92b638c448445fa32c83"

View file

@ -17,7 +17,7 @@ documentation = "https://docs.litellm.ai"
[tool.poetry.dependencies]
python = ">=3.8.1,<4.0, !=3.9.7"
openai = ">=1.52.0"
openai = ">=1.54.0"
python-dotenv = ">=0.2.0"
tiktoken = ">=0.7.0"
importlib-metadata = ">=6.8.0"

View file

@ -1,6 +1,6 @@
# LITELLM PROXY DEPENDENCIES #
anyio==4.4.0 # openai + http req.
openai==1.52.0 # openai req.
openai==1.54.0 # openai req.
fastapi==0.111.0 # server dep
backoff==2.2.1 # server dep
pyyaml==6.0.0 # server dep

View file

@ -0,0 +1,225 @@
import json
import os
import sys
from datetime import datetime
from unittest.mock import AsyncMock
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
import httpx
import pytest
from respx import MockRouter
import litellm
from litellm import Choices, Message, ModelResponse
def test_openai_prediction_param():
litellm.set_verbose = True
code = """
/// <summary>
/// Represents a user with a first name, last name, and username.
/// </summary>
public class User
{
/// <summary>
/// Gets or sets the user's first name.
/// </summary>
public string FirstName { get; set; }
/// <summary>
/// Gets or sets the user's last name.
/// </summary>
public string LastName { get; set; }
/// <summary>
/// Gets or sets the user's username.
/// </summary>
public string Username { get; set; }
}
"""
completion = litellm.completion(
model="gpt-4o-mini",
messages=[
{
"role": "user",
"content": "Replace the Username property with an Email property. Respond only with code, and with no markdown formatting.",
},
{"role": "user", "content": code},
],
prediction={"type": "content", "content": code},
)
print(completion)
assert (
completion.usage.completion_tokens_details.accepted_prediction_tokens > 0
or completion.usage.completion_tokens_details.rejected_prediction_tokens > 0
)
@pytest.mark.asyncio
@pytest.mark.respx
async def test_openai_prediction_param_mock(respx_mock: MockRouter):
"""
Tests that prediction parameter is correctly passed to the API
"""
litellm.set_verbose = True
code = """
/// <summary>
/// Represents a user with a first name, last name, and username.
/// </summary>
public class User
{
/// <summary>
/// Gets or sets the user's first name.
/// </summary>
public string FirstName { get; set; }
/// <summary>
/// Gets or sets the user's last name.
/// </summary>
public string LastName { get; set; }
/// <summary>
/// Gets or sets the user's username.
/// </summary>
public string Username { get; set; }
}
"""
mock_response = ModelResponse(
id="chatcmpl-AQ5RmV8GvVSRxEcDxnuXlQnsibiY9",
choices=[
Choices(
message=Message(
content=code.replace("Username", "Email").replace(
"username", "email"
),
role="assistant",
)
)
],
created=int(datetime.now().timestamp()),
model="gpt-4o-mini-2024-07-18",
usage={
"completion_tokens": 207,
"prompt_tokens": 175,
"total_tokens": 382,
"completion_tokens_details": {
"accepted_prediction_tokens": 0,
"reasoning_tokens": 0,
"rejected_prediction_tokens": 80,
},
},
)
mock_request = respx_mock.post("https://api.openai.com/v1/chat/completions").mock(
return_value=httpx.Response(200, json=mock_response.dict())
)
completion = await litellm.acompletion(
model="gpt-4o-mini",
messages=[
{
"role": "user",
"content": "Replace the Username property with an Email property. Respond only with code, and with no markdown formatting.",
},
{"role": "user", "content": code},
],
prediction={"type": "content", "content": code},
)
assert mock_request.called
request_body = json.loads(mock_request.calls[0].request.content)
# Verify the request contains the prediction parameter
assert "prediction" in request_body
# verify prediction is correctly sent to the API
assert request_body["prediction"] == {"type": "content", "content": code}
# Verify the completion tokens details
assert completion.usage.completion_tokens_details.accepted_prediction_tokens == 0
assert completion.usage.completion_tokens_details.rejected_prediction_tokens == 80
@pytest.mark.asyncio
async def test_openai_prediction_param_with_caching():
"""
Tests using `prediction` parameter with caching
"""
from litellm.caching.caching import LiteLLMCacheType
import logging
from litellm._logging import verbose_logger
verbose_logger.setLevel(logging.DEBUG)
import time
litellm.set_verbose = True
litellm.cache = litellm.Cache(type=LiteLLMCacheType.LOCAL)
code = """
/// <summary>
/// Represents a user with a first name, last name, and username.
/// </summary>
public class User
{
/// <summary>
/// Gets or sets the user's first name.
/// </summary>
public string FirstName { get; set; }
/// <summary>
/// Gets or sets the user's last name.
/// </summary>
public string LastName { get; set; }
/// <summary>
/// Gets or sets the user's username.
/// </summary>
public string Username { get; set; }
}
"""
completion_response_1 = litellm.completion(
model="gpt-4o-mini",
messages=[
{
"role": "user",
"content": "Replace the Username property with an Email property. Respond only with code, and with no markdown formatting.",
},
{"role": "user", "content": code},
],
prediction={"type": "content", "content": code},
)
time.sleep(0.5)
# cache hit
completion_response_2 = litellm.completion(
model="gpt-4o-mini",
messages=[
{
"role": "user",
"content": "Replace the Username property with an Email property. Respond only with code, and with no markdown formatting.",
},
{"role": "user", "content": code},
],
prediction={"type": "content", "content": code},
)
assert completion_response_1.id == completion_response_2.id
completion_response_3 = litellm.completion(
model="gpt-4o-mini",
messages=[
{"role": "user", "content": "What is the first name of the user?"},
],
prediction={"type": "content", "content": code + "FirstName"},
)
assert completion_response_3.id != completion_response_1.id