diff --git a/docs/my-website/docs/providers/openai.md b/docs/my-website/docs/providers/openai.md index ccb2c418b..156c8cefc 100644 --- a/docs/my-website/docs/providers/openai.md +++ b/docs/my-website/docs/providers/openai.md @@ -159,6 +159,7 @@ os.environ["OPENAI_API_BASE"] = "openaiai-api-base" # OPTIONAL | Model Name | Function Call | |-----------------------|-----------------------------------------------------------------| +| gpt-4-turbo-preview | `response = completion(model="gpt-4-0125-preview", messages=messages)` | | gpt-4-0125-preview | `response = completion(model="gpt-4-0125-preview", messages=messages)` | | gpt-4-1106-preview | `response = completion(model="gpt-4-1106-preview", messages=messages)` | | gpt-3.5-turbo-1106 | `response = completion(model="gpt-3.5-turbo-1106", messages=messages)` | diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index d3eb93d72..f79a0cd0e 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -10,9 +10,9 @@ "supports_function_calling": true }, "gpt-4-turbo-preview": { - "max_tokens": 4096, - "max_input_tokens": 8192, - "max_output_tokens": 4096, + "max_tokens": 4096, + "max_input_tokens": 128000, + "max_output_tokens": 4096, "input_cost_per_token": 0.00001, "output_cost_per_token": 0.00003, "litellm_provider": "openai",