diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index 3aeccebf60..bf6f78d187 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -1267,7 +1267,7 @@ "max_input_tokens": 4097, "input_cost_per_token": 0.0000015, "output_cost_per_token": 0.000002, - "litellm_provider": "text-completion-openai", + "litellm_provider": "azure_text", "mode": "completion" }, "azure/gpt-35-turbo-instruct": { @@ -1275,7 +1275,7 @@ "max_input_tokens": 4097, "input_cost_per_token": 0.0000015, "output_cost_per_token": 0.000002, - "litellm_provider": "text-completion-openai", + "litellm_provider": "azure_text", "mode": "completion" }, "azure/gpt-35-turbo-instruct-0914": { @@ -1283,7 +1283,7 @@ "max_input_tokens": 4097, "input_cost_per_token": 0.0000015, "output_cost_per_token": 0.000002, - "litellm_provider": "text-completion-openai", + "litellm_provider": "azure_text", "mode": "completion" }, "azure/mistral-large-latest": { diff --git a/litellm/proxy/proxy_config.yaml b/litellm/proxy/proxy_config.yaml index 2861177f1b..acb66345ed 100644 --- a/litellm/proxy/proxy_config.yaml +++ b/litellm/proxy/proxy_config.yaml @@ -1,7 +1,7 @@ model_list: - - model_name: openai/o1-preview + - model_name: openai/* litellm_params: - model: openai/o1-preview + model: openai/* api_key: os.environ/OPENAI_API_KEY diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index 3aeccebf60..bf6f78d187 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -1267,7 +1267,7 @@ "max_input_tokens": 4097, "input_cost_per_token": 0.0000015, "output_cost_per_token": 0.000002, - "litellm_provider": "text-completion-openai", + "litellm_provider": "azure_text", "mode": "completion" }, "azure/gpt-35-turbo-instruct": { @@ -1275,7 +1275,7 @@ "max_input_tokens": 4097, "input_cost_per_token": 0.0000015, "output_cost_per_token": 0.000002, - "litellm_provider": "text-completion-openai", + "litellm_provider": "azure_text", "mode": "completion" }, "azure/gpt-35-turbo-instruct-0914": { @@ -1283,7 +1283,7 @@ "max_input_tokens": 4097, "input_cost_per_token": 0.0000015, "output_cost_per_token": 0.000002, - "litellm_provider": "text-completion-openai", + "litellm_provider": "azure_text", "mode": "completion" }, "azure/mistral-large-latest": { diff --git a/ui/litellm-dashboard/src/components/chat_ui.tsx b/ui/litellm-dashboard/src/components/chat_ui.tsx index 8764dbb8c4..e0925c0712 100644 --- a/ui/litellm-dashboard/src/components/chat_ui.tsx +++ b/ui/litellm-dashboard/src/components/chat_ui.tsx @@ -115,36 +115,27 @@ const ChatUI: React.FC = ({ console.log("model_info:", fetchedAvailableModels); if (fetchedAvailableModels?.data.length > 0) { - const options = fetchedAvailableModels["data"].map((item: { id: string }) => ({ - value: item.id, - label: item.id - })); - - // Now, 'options' contains the list you wanted - console.log(options); // You can log it to verify the list + // Create a Map to store unique models using the model ID as key + const uniqueModelsMap = new Map(); + + fetchedAvailableModels["data"].forEach((item: { id: string }) => { + uniqueModelsMap.set(item.id, { + value: item.id, + label: item.id + }); + }); - // if options.length > 0, only store unique values - if (options.length > 0) { - const uniqueModels = Array.from(new Set(options)); + // Convert Map values back to array + const uniqueModels = Array.from(uniqueModelsMap.values()); - console.log("Unique models:", uniqueModels); + // Sort models alphabetically + uniqueModels.sort((a, b) => a.label.localeCompare(b.label)); - // sort uniqueModels alphabetically - uniqueModels.sort((a: any, b: any) => a.label.localeCompare(b.label)); - - - console.log("Model info:", modelInfo); - - // setModelInfo(options) should be inside the if block to avoid setting it when no data is available - setModelInfo(uniqueModels); - } - - - setSelectedModel(fetchedAvailableModels.data[0].id); + setModelInfo(uniqueModels); + setSelectedModel(uniqueModels[0].value); } } catch (error) { console.error("Error fetching model info:", error); - // Handle error as needed } }; @@ -242,10 +233,8 @@ const ChatUI: React.FC = ({ placeholder="Select a Model" onChange={onChange} options={modelInfo} - style={{ width: "200px" }} - - - + style={{ width: "350px" }} + showSearch={true} /> @@ -270,7 +259,15 @@ const ChatUI: React.FC = ({ {chatHistory.map((message, index) => ( - {`${message.role}: ${message.content}`} + +
+ {message.role}: {message.content} +
+
))}