mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
UI - Allow admin to control default model access for internal users (#8912)
All checks were successful
Read Version from pyproject.toml / read-version (push) Successful in 36s
All checks were successful
Read Version from pyproject.toml / read-version (push) Successful in 36s
* fix(create_user_button.tsx): allow admin to set models user has access to, on invite Enables controlling model access on invite * feat(auth_checks.py): enforce 'no-model-access' special model name on backend prevent user from calling models if default key has no model access * fix(chat_ui.tsx): allow user to input custom model * fix(chat_ui.tsx): pull available models based on models key has access to * style(create_user_button.tsx): move default model inside 'personal key creation' accordion * fix(chat_ui.tsx): fix linting error * test(test_auth_checks.py): add unit-test for special model name * docs(internal_user_endpoints.py): update docstring * fix test_moderations_bad_model * Litellm dev 02 27 2025 p6 (#8891) * fix(http_parsing_utils.py): orjson can throw errors on some emoji's in text, default to json.loads * fix(sagemaker/handler.py): support passing model id on async streaming * fix(litellm_pre_call_utils.py): Fixes https://github.com/BerriAI/litellm/issues/7237 * Fix calling claude via invoke route + response_format support for claude on invoke route (#8908) * fix(anthropic_claude3_transformation.py): fix amazon anthropic claude 3 tool calling transformation on invoke route move to using anthropic config as base * fix(utils.py): expose anthropic config via providerconfigmanager * fix(llm_http_handler.py): support json mode on async completion calls * fix(invoke_handler/make_call): support json mode for anthropic called via bedrock invoke * fix(anthropic/): handle 'response_format: {"type": "text"}` + migrate amazon claude 3 invoke config to inherit from anthropic config Prevents error when passing in 'response_format: {"type": "text"} * test: fix test * fix(utils.py): fix base invoke provider check * fix(anthropic_claude3_transformation.py): don't pass 'stream' param * fix: fix linting errors * fix(converse_transformation.py): handle response_format type=text for converse * converse_transformation: pass 'description' if set in response_format (#8907) * test(test_bedrock_completion.py): e2e test ensuring tool description is passed in * fix(converse_transformation.py): pass description, if set * fix(transformation.py): Fixes https://github.com/BerriAI/litellm/issues/8767#issuecomment-2689887663 * Fix bedrock passing `response_format: {"type": "text"}` (#8900) * fix(converse_transformation.py): ignore type: text, value in response_format no-op for bedrock * fix(converse_transformation.py): handle adding response format value to tools * fix(base_invoke_transformation.py): fix 'get_bedrock_invoke_provider' to handle cross-region-inferencing models * test(test_bedrock_completion.py): add unit testing for bedrock invoke provider logic * test: update test * fix(exception_mapping_utils.py): add context window exceeded error handling for databricks provider route * fix(fireworks_ai/): support passing tools + response_format together * fix: cleanup * fix(base_invoke_transformation.py): fix imports * (Feat) - Show Error Logs on LiteLLM UI (#8904) * fix test_moderations_bad_model * use async_post_call_failure_hook * basic logging errors in DB * show status on ui * show status on ui * ui show request / response side by side * stash fixes * working, track raw request * track error info in metadata * fix showing error / request / response logs * show traceback on error viewer * ui with traceback of error * fix async_post_call_failure_hook * fix(http_parsing_utils.py): orjson can throw errors on some emoji's in text, default to json.loads * test_get_error_information * fix code quality * rename proxy track cost callback test * _should_store_errors_in_spend_logs * feature flag error logs * Revert "_should_store_errors_in_spend_logs" This reverts commit7f345df477
. * Revert "feature flag error logs" This reverts commit0e90c022bb
. * test_spend_logs_payload * fix OTEL log_db_metrics * fix import json * fix ui linting error * test_async_post_call_failure_hook * test_chat_completion_bad_model_with_spend_logs --------- Co-authored-by: Krrish Dholakia <krrishdholakia@gmail.com> * ui new build * test_chat_completion_bad_model_with_spend_logs * docs(release_cycle.md): document release cycle * bump: version 1.62.0 → 1.62.1 --------- Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com>
This commit is contained in:
parent
fecc02dd45
commit
c1527ebf52
6 changed files with 109 additions and 8 deletions
|
@ -93,20 +93,27 @@ const ChatUI: React.FC<ChatUIProps> = ({
|
|||
const [selectedModel, setSelectedModel] = useState<string | undefined>(
|
||||
undefined
|
||||
);
|
||||
const [showCustomModelInput, setShowCustomModelInput] = useState<boolean>(false);
|
||||
const [modelInfo, setModelInfo] = useState<any[]>([]);
|
||||
const customModelTimeout = useRef<NodeJS.Timeout | null>(null);
|
||||
|
||||
const chatEndRef = useRef<HTMLDivElement>(null);
|
||||
|
||||
useEffect(() => {
|
||||
if (!accessToken || !token || !userRole || !userID) {
|
||||
let useApiKey = apiKeySource === 'session' ? accessToken : apiKey;
|
||||
console.log("useApiKey:", useApiKey);
|
||||
if (!useApiKey || !token || !userRole || !userID) {
|
||||
console.log("useApiKey or token or userRole or userID is missing = ", useApiKey, token, userRole, userID);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
|
||||
// Fetch model info and set the default selected model
|
||||
const fetchModelInfo = async () => {
|
||||
try {
|
||||
const fetchedAvailableModels = await modelAvailableCall(
|
||||
accessToken,
|
||||
useApiKey ?? '', // Use empty string if useApiKey is null,
|
||||
userID,
|
||||
userRole
|
||||
);
|
||||
|
@ -139,7 +146,7 @@ const ChatUI: React.FC<ChatUIProps> = ({
|
|||
};
|
||||
|
||||
fetchModelInfo();
|
||||
}, [accessToken, userID, userRole]);
|
||||
}, [accessToken, userID, userRole, apiKeySource, apiKey]);
|
||||
|
||||
|
||||
useEffect(() => {
|
||||
|
@ -234,6 +241,7 @@ const ChatUI: React.FC<ChatUIProps> = ({
|
|||
const onChange = (value: string) => {
|
||||
console.log(`selected ${value}`);
|
||||
setSelectedModel(value);
|
||||
setShowCustomModelInput(value === 'custom');
|
||||
};
|
||||
|
||||
return (
|
||||
|
@ -276,10 +284,29 @@ const ChatUI: React.FC<ChatUIProps> = ({
|
|||
<Select
|
||||
placeholder="Select a Model"
|
||||
onChange={onChange}
|
||||
options={modelInfo}
|
||||
options={[
|
||||
...modelInfo,
|
||||
{ value: 'custom', label: 'Enter custom model' }
|
||||
]}
|
||||
style={{ width: "350px" }}
|
||||
showSearch={true}
|
||||
/>
|
||||
{showCustomModelInput && (
|
||||
<TextInput
|
||||
className="mt-2"
|
||||
placeholder="Enter custom model name"
|
||||
onValueChange={(value) => {
|
||||
// Using setTimeout to create a simple debounce effect
|
||||
if (customModelTimeout.current) {
|
||||
clearTimeout(customModelTimeout.current);
|
||||
}
|
||||
|
||||
customModelTimeout.current = setTimeout(() => {
|
||||
setSelectedModel(value);
|
||||
}, 500); // 500ms delay after typing stops
|
||||
}}
|
||||
/>
|
||||
)}
|
||||
</Col>
|
||||
</Grid>
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue