(Admin UI) - Test Key Tab - Allow typing in model name + Add wrapping for text response (#7347)

* ui fix - allow searching model list + fix bug on filtering

* qa fix - use correct provider name for azure_text

* ui wrap content onto next line
This commit is contained in:
Ishaan Jaff 2024-12-21 13:14:01 -08:00 committed by GitHub
parent bb9171e037
commit b52783445e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 34 additions and 37 deletions

View file

@ -1267,7 +1267,7 @@
"max_input_tokens": 4097, "max_input_tokens": 4097,
"input_cost_per_token": 0.0000015, "input_cost_per_token": 0.0000015,
"output_cost_per_token": 0.000002, "output_cost_per_token": 0.000002,
"litellm_provider": "text-completion-openai", "litellm_provider": "azure_text",
"mode": "completion" "mode": "completion"
}, },
"azure/gpt-35-turbo-instruct": { "azure/gpt-35-turbo-instruct": {
@ -1275,7 +1275,7 @@
"max_input_tokens": 4097, "max_input_tokens": 4097,
"input_cost_per_token": 0.0000015, "input_cost_per_token": 0.0000015,
"output_cost_per_token": 0.000002, "output_cost_per_token": 0.000002,
"litellm_provider": "text-completion-openai", "litellm_provider": "azure_text",
"mode": "completion" "mode": "completion"
}, },
"azure/gpt-35-turbo-instruct-0914": { "azure/gpt-35-turbo-instruct-0914": {
@ -1283,7 +1283,7 @@
"max_input_tokens": 4097, "max_input_tokens": 4097,
"input_cost_per_token": 0.0000015, "input_cost_per_token": 0.0000015,
"output_cost_per_token": 0.000002, "output_cost_per_token": 0.000002,
"litellm_provider": "text-completion-openai", "litellm_provider": "azure_text",
"mode": "completion" "mode": "completion"
}, },
"azure/mistral-large-latest": { "azure/mistral-large-latest": {

View file

@ -1,7 +1,7 @@
model_list: model_list:
- model_name: openai/o1-preview - model_name: openai/*
litellm_params: litellm_params:
model: openai/o1-preview model: openai/*
api_key: os.environ/OPENAI_API_KEY api_key: os.environ/OPENAI_API_KEY

View file

@ -1267,7 +1267,7 @@
"max_input_tokens": 4097, "max_input_tokens": 4097,
"input_cost_per_token": 0.0000015, "input_cost_per_token": 0.0000015,
"output_cost_per_token": 0.000002, "output_cost_per_token": 0.000002,
"litellm_provider": "text-completion-openai", "litellm_provider": "azure_text",
"mode": "completion" "mode": "completion"
}, },
"azure/gpt-35-turbo-instruct": { "azure/gpt-35-turbo-instruct": {
@ -1275,7 +1275,7 @@
"max_input_tokens": 4097, "max_input_tokens": 4097,
"input_cost_per_token": 0.0000015, "input_cost_per_token": 0.0000015,
"output_cost_per_token": 0.000002, "output_cost_per_token": 0.000002,
"litellm_provider": "text-completion-openai", "litellm_provider": "azure_text",
"mode": "completion" "mode": "completion"
}, },
"azure/gpt-35-turbo-instruct-0914": { "azure/gpt-35-turbo-instruct-0914": {
@ -1283,7 +1283,7 @@
"max_input_tokens": 4097, "max_input_tokens": 4097,
"input_cost_per_token": 0.0000015, "input_cost_per_token": 0.0000015,
"output_cost_per_token": 0.000002, "output_cost_per_token": 0.000002,
"litellm_provider": "text-completion-openai", "litellm_provider": "azure_text",
"mode": "completion" "mode": "completion"
}, },
"azure/mistral-large-latest": { "azure/mistral-large-latest": {

View file

@ -115,36 +115,27 @@ const ChatUI: React.FC<ChatUIProps> = ({
console.log("model_info:", fetchedAvailableModels); console.log("model_info:", fetchedAvailableModels);
if (fetchedAvailableModels?.data.length > 0) { if (fetchedAvailableModels?.data.length > 0) {
const options = fetchedAvailableModels["data"].map((item: { id: string }) => ({ // Create a Map to store unique models using the model ID as key
value: item.id, const uniqueModelsMap = new Map();
label: item.id
})); fetchedAvailableModels["data"].forEach((item: { id: string }) => {
uniqueModelsMap.set(item.id, {
// Now, 'options' contains the list you wanted value: item.id,
console.log(options); // You can log it to verify the list label: item.id
});
});
// if options.length > 0, only store unique values // Convert Map values back to array
if (options.length > 0) { const uniqueModels = Array.from(uniqueModelsMap.values());
const uniqueModels = Array.from(new Set(options));
console.log("Unique models:", uniqueModels); // Sort models alphabetically
uniqueModels.sort((a, b) => a.label.localeCompare(b.label));
// sort uniqueModels alphabetically setModelInfo(uniqueModels);
uniqueModels.sort((a: any, b: any) => a.label.localeCompare(b.label)); setSelectedModel(uniqueModels[0].value);
console.log("Model info:", modelInfo);
// setModelInfo(options) should be inside the if block to avoid setting it when no data is available
setModelInfo(uniqueModels);
}
setSelectedModel(fetchedAvailableModels.data[0].id);
} }
} catch (error) { } catch (error) {
console.error("Error fetching model info:", error); console.error("Error fetching model info:", error);
// Handle error as needed
} }
}; };
@ -242,10 +233,8 @@ const ChatUI: React.FC<ChatUIProps> = ({
placeholder="Select a Model" placeholder="Select a Model"
onChange={onChange} onChange={onChange}
options={modelInfo} options={modelInfo}
style={{ width: "200px" }} style={{ width: "350px" }}
showSearch={true}
/> />
</Col> </Col>
</Grid> </Grid>
@ -270,7 +259,15 @@ const ChatUI: React.FC<ChatUIProps> = ({
<TableBody> <TableBody>
{chatHistory.map((message, index) => ( {chatHistory.map((message, index) => (
<TableRow key={index}> <TableRow key={index}>
<TableCell>{`${message.role}: ${message.content}`}</TableCell> <TableCell>
<div style={{
whiteSpace: "pre-wrap",
wordBreak: "break-word",
maxWidth: "100%"
}}>
<strong>{message.role}:</strong> {message.content}
</div>
</TableCell>
</TableRow> </TableRow>
))} ))}
</TableBody> </TableBody>