diff --git a/docs/my-website/docs/providers/openai.md b/docs/my-website/docs/providers/openai.md index d40ab0676..f6f987590 100644 --- a/docs/my-website/docs/providers/openai.md +++ b/docs/my-website/docs/providers/openai.md @@ -163,6 +163,7 @@ os.environ["OPENAI_API_BASE"] = "openaiai-api-base" # OPTIONAL | Model Name | Function Call | |-----------------------|-----------------------------------------------------------------| +| gpt-4-turbo-preview | `response = completion(model="gpt-4-0125-preview", messages=messages)` | | gpt-4-0125-preview | `response = completion(model="gpt-4-0125-preview", messages=messages)` | | gpt-4-1106-preview | `response = completion(model="gpt-4-1106-preview", messages=messages)` | | gpt-3.5-turbo-1106 | `response = completion(model="gpt-3.5-turbo-1106", messages=messages)` | diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index a4c851c84..b5809c741 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -10,9 +10,9 @@ "supports_function_calling": true }, "gpt-4-turbo-preview": { - "max_tokens": 4096, - "max_input_tokens": 8192, - "max_output_tokens": 4096, + "max_tokens": 4096, + "max_input_tokens": 128000, + "max_output_tokens": 4096, "input_cost_per_token": 0.00001, "output_cost_per_token": 0.00003, "litellm_provider": "openai",