From 96e995f53b1aa992e37f72763f44af605e284c76 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Tue, 16 Apr 2024 17:42:57 -0700 Subject: [PATCH] docs(openai.md): add gpt-4-turbo to vision docs --- docs/my-website/docs/providers/openai.md | 2 ++ litellm/model_prices_and_context_window_backup.json | 6 ++++-- model_prices_and_context_window.json | 6 ++++-- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/docs/my-website/docs/providers/openai.md b/docs/my-website/docs/providers/openai.md index f6f987590..a8fe541fc 100644 --- a/docs/my-website/docs/providers/openai.md +++ b/docs/my-website/docs/providers/openai.md @@ -163,6 +163,7 @@ os.environ["OPENAI_API_BASE"] = "openaiai-api-base" # OPTIONAL | Model Name | Function Call | |-----------------------|-----------------------------------------------------------------| +| gpt-4-turbo | `response = completion(model="gpt-4-turbo", messages=messages)` | | gpt-4-turbo-preview | `response = completion(model="gpt-4-0125-preview", messages=messages)` | | gpt-4-0125-preview | `response = completion(model="gpt-4-0125-preview", messages=messages)` | | gpt-4-1106-preview | `response = completion(model="gpt-4-1106-preview", messages=messages)` | @@ -185,6 +186,7 @@ These also support the `OPENAI_API_BASE` environment variable, which can be used ## OpenAI Vision Models | Model Name | Function Call | |-----------------------|-----------------------------------------------------------------| +| gpt-4-turbo | `response = completion(model="gpt-4-turbo", messages=messages)` | | gpt-4-vision-preview | `response = completion(model="gpt-4-vision-preview", messages=messages)` | #### Usage diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index 3ace04f70..2655ab9db 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -75,7 +75,8 @@ "litellm_provider": "openai", "mode": "chat", "supports_function_calling": true, - "supports_parallel_function_calling": true + "supports_parallel_function_calling": true, + "supports_vision": true }, "gpt-4-turbo-2024-04-09": { "max_tokens": 4096, @@ -86,7 +87,8 @@ "litellm_provider": "openai", "mode": "chat", "supports_function_calling": true, - "supports_parallel_function_calling": true + "supports_parallel_function_calling": true, + "supports_vision": true }, "gpt-4-1106-preview": { "max_tokens": 4096, diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index 3ace04f70..2655ab9db 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -75,7 +75,8 @@ "litellm_provider": "openai", "mode": "chat", "supports_function_calling": true, - "supports_parallel_function_calling": true + "supports_parallel_function_calling": true, + "supports_vision": true }, "gpt-4-turbo-2024-04-09": { "max_tokens": 4096, @@ -86,7 +87,8 @@ "litellm_provider": "openai", "mode": "chat", "supports_function_calling": true, - "supports_parallel_function_calling": true + "supports_parallel_function_calling": true, + "supports_vision": true }, "gpt-4-1106-preview": { "max_tokens": 4096,