Merge pull request #3660 from BerriAI/litellm_proxy_ui_general_settings

feat(proxy_server.py): Enabling Admin to control general settings on proxy ui
This commit is contained in:
Krish Dholakia 2024-05-15 20:36:42 -07:00 committed by GitHub
commit d294e26fdb
7 changed files with 1099 additions and 283 deletions

View file

@ -696,6 +696,25 @@ class DynamoDBArgs(LiteLLMBase):
assume_role_aws_session_name: Optional[str] = None
class ConfigFieldUpdate(LiteLLMBase):
field_name: str
field_value: Any
config_type: Literal["general_settings"]
class ConfigFieldDelete(LiteLLMBase):
config_type: Literal["general_settings"]
field_name: str
class ConfigList(LiteLLMBase):
field_name: str
field_type: str
field_description: str
field_value: Any
stored_in_db: Optional[bool]
class ConfigGeneralSettings(LiteLLMBase):
"""
Documents all the fields supported by `general_settings` in config.yaml
@ -743,7 +762,11 @@ class ConfigGeneralSettings(LiteLLMBase):
description="override user_api_key_auth with your own auth script - https://docs.litellm.ai/docs/proxy/virtual_keys#custom-auth",
)
max_parallel_requests: Optional[int] = Field(
None, description="maximum parallel requests for each api key"
None,
description="maximum parallel requests for each api key",
)
global_max_parallel_requests: Optional[int] = Field(
None, description="global max parallel requests to allow for a proxy instance."
)
infer_model_from_keys: Optional[bool] = Field(
None,

View file

@ -79,6 +79,9 @@ class _PROXY_MaxParallelRequestsHandler(CustomLogger):
max_parallel_requests = user_api_key_dict.max_parallel_requests
if max_parallel_requests is None:
max_parallel_requests = sys.maxsize
global_max_parallel_requests = data.get("metadata", {}).get(
"global_max_parallel_requests", None
)
tpm_limit = getattr(user_api_key_dict, "tpm_limit", sys.maxsize)
if tpm_limit is None:
tpm_limit = sys.maxsize
@ -91,6 +94,24 @@ class _PROXY_MaxParallelRequestsHandler(CustomLogger):
# Setup values
# ------------
if global_max_parallel_requests is not None:
# get value from cache
_key = "global_max_parallel_requests"
current_global_requests = await cache.async_get_cache(
key=_key, local_only=True
)
# check if below limit
if current_global_requests is None:
current_global_requests = 1
# if above -> raise error
if current_global_requests >= global_max_parallel_requests:
raise HTTPException(
status_code=429, detail="Max parallel request limit reached."
)
# if below -> increment
else:
await cache.async_increment_cache(key=_key, value=1, local_only=True)
current_date = datetime.now().strftime("%Y-%m-%d")
current_hour = datetime.now().strftime("%H")
current_minute = datetime.now().strftime("%M")
@ -207,6 +228,9 @@ class _PROXY_MaxParallelRequestsHandler(CustomLogger):
async def async_log_success_event(self, kwargs, response_obj, start_time, end_time):
try:
self.print_verbose(f"INSIDE parallel request limiter ASYNC SUCCESS LOGGING")
global_max_parallel_requests = kwargs["litellm_params"]["metadata"].get(
"global_max_parallel_requests", None
)
user_api_key = kwargs["litellm_params"]["metadata"]["user_api_key"]
user_api_key_user_id = kwargs["litellm_params"]["metadata"].get(
"user_api_key_user_id", None
@ -222,6 +246,14 @@ class _PROXY_MaxParallelRequestsHandler(CustomLogger):
# Setup values
# ------------
if global_max_parallel_requests is not None:
# get value from cache
_key = "global_max_parallel_requests"
# decrement
await self.user_api_key_cache.async_increment_cache(
key=_key, value=-1, local_only=True
)
current_date = datetime.now().strftime("%Y-%m-%d")
current_hour = datetime.now().strftime("%H")
current_minute = datetime.now().strftime("%M")
@ -336,6 +368,9 @@ class _PROXY_MaxParallelRequestsHandler(CustomLogger):
async def async_log_failure_event(self, kwargs, response_obj, start_time, end_time):
try:
self.print_verbose(f"Inside Max Parallel Request Failure Hook")
global_max_parallel_requests = kwargs["litellm_params"]["metadata"].get(
"global_max_parallel_requests", None
)
user_api_key = (
kwargs["litellm_params"].get("metadata", {}).get("user_api_key", None)
)
@ -347,17 +382,26 @@ class _PROXY_MaxParallelRequestsHandler(CustomLogger):
return
## decrement call count if call failed
if (
hasattr(kwargs["exception"], "status_code")
and kwargs["exception"].status_code == 429
and "Max parallel request limit reached" in str(kwargs["exception"])
):
if "Max parallel request limit reached" in str(kwargs["exception"]):
pass # ignore failed calls due to max limit being reached
else:
# ------------
# Setup values
# ------------
if global_max_parallel_requests is not None:
# get value from cache
_key = "global_max_parallel_requests"
current_global_requests = (
await self.user_api_key_cache.async_get_cache(
key=_key, local_only=True
)
)
# decrement
await self.user_api_key_cache.async_increment_cache(
key=_key, value=-1, local_only=True
)
current_date = datetime.now().strftime("%Y-%m-%d")
current_hour = datetime.now().strftime("%H")
current_minute = datetime.now().strftime("%M")

View file

@ -234,6 +234,7 @@ class SpecialModelNames(enum.Enum):
class CommonProxyErrors(enum.Enum):
db_not_connected_error = "DB not connected"
no_llm_router = "No models configured on proxy"
not_allowed_access = "Admin-only endpoint. Not allowed to access this."
@app.exception_handler(ProxyException)
@ -566,9 +567,9 @@ async def user_api_key_auth(
#### ELSE ####
if master_key is None:
if isinstance(api_key, str):
return UserAPIKeyAuth(api_key=api_key)
return UserAPIKeyAuth(api_key=api_key, user_role="proxy_admin")
else:
return UserAPIKeyAuth()
return UserAPIKeyAuth(user_role="proxy_admin")
elif api_key is None: # only require api key if master key is set
raise Exception("No api key passed in.")
elif api_key == "":
@ -659,6 +660,7 @@ async def user_api_key_auth(
verbose_proxy_logger.debug("Token from db: %s", valid_token)
elif valid_token is not None:
verbose_proxy_logger.debug("API Key Cache Hit!")
user_id_information = None
if valid_token:
# Got Valid Token from Cache, DB
@ -1187,6 +1189,17 @@ async def user_api_key_auth(
# No token was found when looking up in the DB
raise Exception("Invalid token passed")
if valid_token_dict is not None:
if user_id_information is not None and _is_user_proxy_admin(
user_id_information
):
return UserAPIKeyAuth(
api_key=api_key, user_role="proxy_admin", **valid_token_dict
)
elif _has_user_setup_sso() and route in LiteLLMRoutes.sso_only_routes.value:
return UserAPIKeyAuth(
api_key=api_key, user_role="app_owner", **valid_token_dict
)
else:
return UserAPIKeyAuth(api_key=api_key, **valid_token_dict)
else:
raise Exception()
@ -2795,7 +2808,19 @@ class ProxyConfig:
"Error setting env variable: %s - %s", k, str(e)
)
# general_settings
# router settings
if llm_router is not None and prisma_client is not None:
db_router_settings = await prisma_client.db.litellm_config.find_first(
where={"param_name": "router_settings"}
)
if (
db_router_settings is not None
and db_router_settings.param_value is not None
):
_router_settings = db_router_settings.param_value
llm_router.update_settings(**_router_settings)
## ALERTING ## [TODO] move this to the _update_general_settings() block
_general_settings = config_data.get("general_settings", {})
if "alerting" in _general_settings:
general_settings["alerting"] = _general_settings["alerting"]
@ -2819,17 +2844,24 @@ class ProxyConfig:
alert_to_webhook_url=general_settings["alert_to_webhook_url"]
)
# router settings
if llm_router is not None and prisma_client is not None:
db_router_settings = await prisma_client.db.litellm_config.find_first(
where={"param_name": "router_settings"}
)
if (
db_router_settings is not None
and db_router_settings.param_value is not None
):
_router_settings = db_router_settings.param_value
llm_router.update_settings(**_router_settings)
async def _update_general_settings(self, db_general_settings: Optional[Json]):
"""
Pull from DB, read general settings value
"""
global general_settings
if db_general_settings is None:
return
_general_settings = dict(db_general_settings)
## MAX PARALLEL REQUESTS ##
if "max_parallel_requests" in _general_settings:
general_settings["max_parallel_requests"] = _general_settings[
"max_parallel_requests"
]
if "global_max_parallel_requests" in _general_settings:
general_settings["global_max_parallel_requests"] = _general_settings[
"global_max_parallel_requests"
]
async def add_deployment(
self,
@ -2837,7 +2869,7 @@ class ProxyConfig:
proxy_logging_obj: ProxyLogging,
):
"""
- Check db for new models (last 10 most recently updated)
- Check db for new models
- Check if model id's in router already
- If not, add to router
"""
@ -2850,9 +2882,21 @@ class ProxyConfig:
)
verbose_proxy_logger.debug(f"llm_router: {llm_router}")
new_models = await prisma_client.db.litellm_proxymodeltable.find_many()
# update llm router
await self._update_llm_router(
new_models=new_models, proxy_logging_obj=proxy_logging_obj
)
db_general_settings = await prisma_client.db.litellm_config.find_first(
where={"param_name": "general_settings"}
)
# update general settings
if db_general_settings is not None:
await self._update_general_settings(
db_general_settings=db_general_settings.param_value,
)
except Exception as e:
verbose_proxy_logger.error(
"{}\nTraceback:{}".format(str(e), traceback.format_exc())
@ -3052,27 +3096,6 @@ async def generate_key_helper_fn(
data=key_data, table_name="key"
)
key_data["token_id"] = getattr(create_key_response, "token", None)
elif custom_db_client is not None:
if table_name is None or table_name == "user":
## CREATE USER (If necessary)
verbose_proxy_logger.debug(
"CustomDBClient: Creating User= %s", user_data
)
user_row = await custom_db_client.insert_data(
value=user_data, table_name="user"
)
if user_row is None:
# GET USER ROW
user_row = await custom_db_client.get_data(
key=user_id, table_name="user" # type: ignore
)
## use default user model list if no key-specific model list provided
if len(user_row.models) > 0 and len(key_data["models"]) == 0: # type: ignore
key_data["models"] = user_row.models
## CREATE KEY
verbose_proxy_logger.debug("CustomDBClient: Creating Key= %s", key_data)
await custom_db_client.insert_data(value=key_data, table_name="key")
except Exception as e:
traceback.print_exc()
if isinstance(e, HTTPException):
@ -3668,6 +3691,9 @@ async def chat_completion(
data["metadata"]["user_api_key_alias"] = getattr(
user_api_key_dict, "key_alias", None
)
data["metadata"]["global_max_parallel_requests"] = general_settings.get(
"global_max_parallel_requests", None
)
data["metadata"]["user_api_key_user_id"] = user_api_key_dict.user_id
data["metadata"]["user_api_key_org_id"] = user_api_key_dict.org_id
data["metadata"]["user_api_key_team_id"] = getattr(
@ -3938,6 +3964,9 @@ async def completion(
data["metadata"]["user_api_key_team_id"] = getattr(
user_api_key_dict, "team_id", None
)
data["metadata"]["global_max_parallel_requests"] = general_settings.get(
"global_max_parallel_requests", None
)
data["metadata"]["user_api_key_team_alias"] = getattr(
user_api_key_dict, "team_alias", None
)
@ -4134,6 +4163,9 @@ async def embeddings(
data["metadata"]["user_api_key_alias"] = getattr(
user_api_key_dict, "key_alias", None
)
data["metadata"]["global_max_parallel_requests"] = general_settings.get(
"global_max_parallel_requests", None
)
data["metadata"]["user_api_key_user_id"] = user_api_key_dict.user_id
data["metadata"]["user_api_key_team_id"] = getattr(
user_api_key_dict, "team_id", None
@ -4338,6 +4370,9 @@ async def image_generation(
data["metadata"]["user_api_key_alias"] = getattr(
user_api_key_dict, "key_alias", None
)
data["metadata"]["global_max_parallel_requests"] = general_settings.get(
"global_max_parallel_requests", None
)
data["metadata"]["user_api_key_user_id"] = user_api_key_dict.user_id
data["metadata"]["user_api_key_team_id"] = getattr(
user_api_key_dict, "team_id", None
@ -4518,6 +4553,9 @@ async def audio_transcriptions(
data["metadata"]["user_api_key_team_id"] = getattr(
user_api_key_dict, "team_id", None
)
data["metadata"]["global_max_parallel_requests"] = general_settings.get(
"global_max_parallel_requests", None
)
data["metadata"]["user_api_key_team_alias"] = getattr(
user_api_key_dict, "team_alias", None
)
@ -4715,6 +4753,9 @@ async def moderations(
"authorization", None
) # do not store the original `sk-..` api key in the db
data["metadata"]["headers"] = _headers
data["metadata"]["global_max_parallel_requests"] = general_settings.get(
"global_max_parallel_requests", None
)
data["metadata"]["user_api_key_alias"] = getattr(
user_api_key_dict, "key_alias", None
)
@ -9405,7 +9446,7 @@ async def auth_callback(request: Request):
return RedirectResponse(url=litellm_dashboard_ui)
#### BASIC ENDPOINTS ####
#### CONFIG MANAGEMENT ####
@router.post(
"/config/update",
tags=["config.yaml"],
@ -9541,6 +9582,299 @@ async def update_config(config_info: ConfigYAML):
)
### CONFIG GENERAL SETTINGS
"""
- Update config settings
- Get config settings
Keep it more precise, to prevent overwrite other values unintentially
"""
@router.post(
"/config/field/update",
tags=["config.yaml"],
dependencies=[Depends(user_api_key_auth)],
)
async def update_config_general_settings(
data: ConfigFieldUpdate,
user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
):
"""
Update a specific field in litellm general settings
"""
global prisma_client
## VALIDATION ##
"""
- Check if prisma_client is None
- Check if user allowed to call this endpoint (admin-only)
- Check if param in general settings
- Check if config value is valid type
"""
if prisma_client is None:
raise HTTPException(
status_code=400,
detail={"error": CommonProxyErrors.db_not_connected_error.value},
)
if user_api_key_dict.user_role != "proxy_admin":
raise HTTPException(
status_code=400,
detail={"error": CommonProxyErrors.not_allowed_access.value},
)
if data.field_name not in ConfigGeneralSettings.model_fields:
raise HTTPException(
status_code=400,
detail={"error": "Invalid field={} passed in.".format(data.field_name)},
)
try:
cgs = ConfigGeneralSettings(**{data.field_name: data.field_value})
except:
raise HTTPException(
status_code=400,
detail={
"error": "Invalid type of field value={} passed in.".format(
type(data.field_value),
)
},
)
## get general settings from db
db_general_settings = await prisma_client.db.litellm_config.find_first(
where={"param_name": "general_settings"}
)
### update value
if db_general_settings is None or db_general_settings.param_value is None:
general_settings = {}
else:
general_settings = dict(db_general_settings.param_value)
## update db
general_settings[data.field_name] = data.field_value
response = await prisma_client.db.litellm_config.upsert(
where={"param_name": "general_settings"},
data={
"create": {"param_name": "general_settings", "param_value": json.dumps(general_settings)}, # type: ignore
"update": {"param_value": json.dumps(general_settings)}, # type: ignore
},
)
return response
@router.get(
"/config/field/info",
tags=["config.yaml"],
dependencies=[Depends(user_api_key_auth)],
)
async def get_config_general_settings(
field_name: str,
user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
):
global prisma_client
## VALIDATION ##
"""
- Check if prisma_client is None
- Check if user allowed to call this endpoint (admin-only)
- Check if param in general settings
"""
if prisma_client is None:
raise HTTPException(
status_code=400,
detail={"error": CommonProxyErrors.db_not_connected_error.value},
)
if user_api_key_dict.user_role != "proxy_admin":
raise HTTPException(
status_code=400,
detail={"error": CommonProxyErrors.not_allowed_access.value},
)
if field_name not in ConfigGeneralSettings.model_fields:
raise HTTPException(
status_code=400,
detail={"error": "Invalid field={} passed in.".format(field_name)},
)
## get general settings from db
db_general_settings = await prisma_client.db.litellm_config.find_first(
where={"param_name": "general_settings"}
)
### pop the value
if db_general_settings is None or db_general_settings.param_value is None:
raise HTTPException(
status_code=400,
detail={"error": "Field name={} not in DB".format(field_name)},
)
else:
general_settings = dict(db_general_settings.param_value)
if field_name in general_settings:
return {
"field_name": field_name,
"field_value": general_settings[field_name],
}
else:
raise HTTPException(
status_code=400,
detail={"error": "Field name={} not in DB".format(field_name)},
)
@router.get(
"/config/list",
tags=["config.yaml"],
dependencies=[Depends(user_api_key_auth)],
)
async def get_config_list(
config_type: Literal["general_settings"],
user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
) -> List[ConfigList]:
"""
List the available fields + current values for a given type of setting (currently just 'general_settings'user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),)
"""
global prisma_client, general_settings
## VALIDATION ##
"""
- Check if prisma_client is None
- Check if user allowed to call this endpoint (admin-only)
- Check if param in general settings
"""
if prisma_client is None:
raise HTTPException(
status_code=400,
detail={"error": CommonProxyErrors.db_not_connected_error.value},
)
if user_api_key_dict.user_role != "proxy_admin":
raise HTTPException(
status_code=400,
detail={
"error": "{}, your role={}".format(
CommonProxyErrors.not_allowed_access.value,
user_api_key_dict.user_role,
)
},
)
## get general settings from db
db_general_settings = await prisma_client.db.litellm_config.find_first(
where={"param_name": "general_settings"}
)
if db_general_settings is not None and db_general_settings.param_value is not None:
db_general_settings_dict = dict(db_general_settings.param_value)
else:
db_general_settings_dict = {}
allowed_args = {
"max_parallel_requests": {"type": "Integer"},
"global_max_parallel_requests": {"type": "Integer"},
}
return_val = []
for field_name, field_info in ConfigGeneralSettings.model_fields.items():
if field_name in allowed_args:
_stored_in_db = None
if field_name in db_general_settings_dict:
_stored_in_db = True
elif field_name in general_settings:
_stored_in_db = False
_response_obj = ConfigList(
field_name=field_name,
field_type=allowed_args[field_name]["type"],
field_description=field_info.description or "",
field_value=general_settings.get(field_name, None),
stored_in_db=_stored_in_db,
)
return_val.append(_response_obj)
return return_val
@router.post(
"/config/field/delete",
tags=["config.yaml"],
dependencies=[Depends(user_api_key_auth)],
)
async def delete_config_general_settings(
data: ConfigFieldDelete,
user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
):
"""
Delete the db value of this field in litellm general settings. Resets it to it's initial default value on litellm.
"""
global prisma_client
## VALIDATION ##
"""
- Check if prisma_client is None
- Check if user allowed to call this endpoint (admin-only)
- Check if param in general settings
"""
if prisma_client is None:
raise HTTPException(
status_code=400,
detail={"error": CommonProxyErrors.db_not_connected_error.value},
)
if user_api_key_dict.user_role != "proxy_admin":
raise HTTPException(
status_code=400,
detail={
"error": "{}, your role={}".format(
CommonProxyErrors.not_allowed_access.value,
user_api_key_dict.user_role,
)
},
)
if data.field_name not in ConfigGeneralSettings.model_fields:
raise HTTPException(
status_code=400,
detail={"error": "Invalid field={} passed in.".format(data.field_name)},
)
## get general settings from db
db_general_settings = await prisma_client.db.litellm_config.find_first(
where={"param_name": "general_settings"}
)
### pop the value
if db_general_settings is None or db_general_settings.param_value is None:
raise HTTPException(
status_code=400,
detail={"error": "Field name={} not in config".format(data.field_name)},
)
else:
general_settings = dict(db_general_settings.param_value)
## update db
general_settings.pop(data.field_name, None)
response = await prisma_client.db.litellm_config.upsert(
where={"param_name": "general_settings"},
data={
"create": {"param_name": "general_settings", "param_value": json.dumps(general_settings)}, # type: ignore
"update": {"param_value": json.dumps(general_settings)}, # type: ignore
},
)
return response
@router.get(
"/get/config/callbacks",
tags=["config.yaml"],
@ -9708,6 +10042,7 @@ async def config_yaml_endpoint(config_info: ConfigYAML):
return {"hello": "world"}
#### BASIC ENDPOINTS ####
@router.get(
"/test",
tags=["health"],

View file

@ -28,6 +28,37 @@ from datetime import datetime
## On Request failure
@pytest.mark.asyncio
async def test_global_max_parallel_requests():
"""
Test if ParallelRequestHandler respects 'global_max_parallel_requests'
data["metadata"]["global_max_parallel_requests"]
"""
global_max_parallel_requests = 0
_api_key = "sk-12345"
_api_key = hash_token("sk-12345")
user_api_key_dict = UserAPIKeyAuth(api_key=_api_key, max_parallel_requests=100)
local_cache = DualCache()
parallel_request_handler = MaxParallelRequestsHandler()
for _ in range(3):
try:
await parallel_request_handler.async_pre_call_hook(
user_api_key_dict=user_api_key_dict,
cache=local_cache,
data={
"metadata": {
"global_max_parallel_requests": global_max_parallel_requests
}
},
call_type="",
)
pytest.fail("Expected call to fail")
except Exception as e:
pass
@pytest.mark.asyncio
async def test_pre_call_hook():
"""

View file

@ -2565,7 +2565,7 @@ class Logging:
response_obj=result,
start_time=start_time,
end_time=end_time,
)
) # type: ignore
if callable(callback): # custom logger functions
await customLogger.async_log_event(
kwargs=self.model_call_details,

View file

@ -23,12 +23,44 @@ import {
AccordionHeader,
AccordionList,
} from "@tremor/react";
import { TabPanel, TabPanels, TabGroup, TabList, Tab, Icon } from "@tremor/react";
import { getCallbacksCall, setCallbacksCall, serviceHealthCheck } from "./networking";
import { Modal, Form, Input, Select, Button as Button2, message } from "antd";
import { InformationCircleIcon, PencilAltIcon, PencilIcon, StatusOnlineIcon, TrashIcon, RefreshIcon } from "@heroicons/react/outline";
import {
TabPanel,
TabPanels,
TabGroup,
TabList,
Tab,
Icon,
} from "@tremor/react";
import {
getCallbacksCall,
setCallbacksCall,
getGeneralSettingsCall,
serviceHealthCheck,
updateConfigFieldSetting,
deleteConfigFieldSetting,
} from "./networking";
import {
Modal,
Form,
Input,
Select,
Button as Button2,
message,
InputNumber,
} from "antd";
import {
InformationCircleIcon,
PencilAltIcon,
PencilIcon,
StatusOnlineIcon,
TrashIcon,
RefreshIcon,
CheckCircleIcon,
XCircleIcon,
QuestionMarkCircleIcon,
} from "@heroicons/react/outline";
import StaticGenerationSearchParamsBailoutProvider from "next/dist/client/components/static-generation-searchparams-bailout-provider";
import AddFallbacks from "./add_fallbacks"
import AddFallbacks from "./add_fallbacks";
import openai from "openai";
import Paragraph from "antd/es/skeleton/Paragraph";
@ -36,7 +68,7 @@ interface GeneralSettingsPageProps {
accessToken: string | null;
userRole: string | null;
userID: string | null;
modelData: any
modelData: any;
}
async function testFallbackModelResponse(
@ -65,24 +97,39 @@ async function testFallbackModelResponse(
},
],
// @ts-ignore
mock_testing_fallbacks: true
mock_testing_fallbacks: true,
});
message.success(
<span>
Test model=<strong>{selectedModel}</strong>, received model=<strong>{response.model}</strong>.
See <a href="#" onClick={() => window.open('https://docs.litellm.ai/docs/proxy/reliability', '_blank')} style={{ textDecoration: 'underline', color: 'blue' }}>curl</a>
Test model=<strong>{selectedModel}</strong>, received model=
<strong>{response.model}</strong>. See{" "}
<a
href="#"
onClick={() =>
window.open(
"https://docs.litellm.ai/docs/proxy/reliability",
"_blank"
)
}
style={{ textDecoration: "underline", color: "blue" }}
>
curl
</a>
</span>
);
} catch (error) {
message.error(`Error occurred while generating model response. Please try again. Error: ${error}`, 20);
message.error(
`Error occurred while generating model response. Please try again. Error: ${error}`,
20
);
}
}
interface AccordionHeroProps {
selectedStrategy: string | null;
strategyArgs: routingStrategyArgs;
paramExplanation: { [key: string]: string }
paramExplanation: { [key: string]: string };
}
interface routingStrategyArgs {
@ -90,17 +137,30 @@ interface routingStrategyArgs {
lowest_latency_buffer?: number;
}
const defaultLowestLatencyArgs: routingStrategyArgs = {
"ttl": 3600,
"lowest_latency_buffer": 0
interface generalSettingsItem {
field_name: string;
field_type: string;
field_value: any;
field_description: string;
stored_in_db: boolean | null;
}
export const AccordionHero: React.FC<AccordionHeroProps> = ({ selectedStrategy, strategyArgs, paramExplanation }) => (
const defaultLowestLatencyArgs: routingStrategyArgs = {
ttl: 3600,
lowest_latency_buffer: 0,
};
export const AccordionHero: React.FC<AccordionHeroProps> = ({
selectedStrategy,
strategyArgs,
paramExplanation,
}) => (
<Accordion>
<AccordionHeader className="text-sm font-medium text-tremor-content-strong dark:text-dark-tremor-content-strong">Routing Strategy Specific Args</AccordionHeader>
<AccordionHeader className="text-sm font-medium text-tremor-content-strong dark:text-dark-tremor-content-strong">
Routing Strategy Specific Args
</AccordionHeader>
<AccordionBody>
{
selectedStrategy == "latency-based-routing" ?
{selectedStrategy == "latency-based-routing" ? (
<Card>
<Table>
<TableHead>
@ -114,13 +174,24 @@ export const AccordionHero: React.FC<AccordionHeroProps> = ({ selectedStrategy,
<TableRow key={param}>
<TableCell>
<Text>{param}</Text>
<p style={{fontSize: '0.65rem', color: '#808080', fontStyle: 'italic'}} className="mt-1">{paramExplanation[param]}</p>
<p
style={{
fontSize: "0.65rem",
color: "#808080",
fontStyle: "italic",
}}
className="mt-1"
>
{paramExplanation[param]}
</p>
</TableCell>
<TableCell>
<TextInput
name={param}
defaultValue={
typeof value === 'object' ? JSON.stringify(value, null, 2) : value.toString()
typeof value === "object"
? JSON.stringify(value, null, 2)
: value.toString()
}
/>
</TableCell>
@ -129,8 +200,9 @@ export const AccordionHero: React.FC<AccordionHeroProps> = ({ selectedStrategy,
</TableBody>
</Table>
</Card>
: <Text>No specific settings</Text>
}
) : (
<Text>No specific settings</Text>
)}
</AccordionBody>
</Accordion>
);
@ -139,26 +211,38 @@ const GeneralSettings: React.FC<GeneralSettingsPageProps> = ({
accessToken,
userRole,
userID,
modelData
modelData,
}) => {
const [routerSettings, setRouterSettings] = useState<{ [key: string]: any }>({});
const [routerSettings, setRouterSettings] = useState<{ [key: string]: any }>(
{}
);
const [generalSettingsDict, setGeneralSettingsDict] = useState<{
[key: string]: any;
}>({});
const [generalSettings, setGeneralSettings] = useState<generalSettingsItem[]>(
[]
);
const [isModalVisible, setIsModalVisible] = useState(false);
const [form] = Form.useForm();
const [selectedCallback, setSelectedCallback] = useState<string | null>(null);
const [selectedStrategy, setSelectedStrategy] = useState<string | null>(null)
const [strategySettings, setStrategySettings] = useState<routingStrategyArgs | null>(null);
const [selectedStrategy, setSelectedStrategy] = useState<string | null>(null);
const [strategySettings, setStrategySettings] =
useState<routingStrategyArgs | null>(null);
let paramExplanation: { [key: string]: string } = {
"routing_strategy_args": "(dict) Arguments to pass to the routing strategy",
"routing_strategy": "(string) Routing strategy to use",
"allowed_fails": "(int) Number of times a deployment can fail before being added to cooldown",
"cooldown_time": "(int) time in seconds to cooldown a deployment after failure",
"num_retries": "(int) Number of retries for failed requests. Defaults to 0.",
"timeout": "(float) Timeout for requests. Defaults to None.",
"retry_after": "(int) Minimum time to wait before retrying a failed request",
"ttl": "(int) Sliding window to look back over when calculating the average latency of a deployment. Default - 1 hour (in seconds).",
"lowest_latency_buffer": "(float) Shuffle between deployments within this % of the lowest latency. Default - 0 (i.e. always pick lowest latency)."
}
routing_strategy_args: "(dict) Arguments to pass to the routing strategy",
routing_strategy: "(string) Routing strategy to use",
allowed_fails:
"(int) Number of times a deployment can fail before being added to cooldown",
cooldown_time:
"(int) time in seconds to cooldown a deployment after failure",
num_retries: "(int) Number of retries for failed requests. Defaults to 0.",
timeout: "(float) Timeout for requests. Defaults to None.",
retry_after: "(int) Minimum time to wait before retrying a failed request",
ttl: "(int) Sliding window to look back over when calculating the average latency of a deployment. Default - 1 hour (in seconds).",
lowest_latency_buffer:
"(float) Shuffle between deployments within this % of the lowest latency. Default - 0 (i.e. always pick lowest latency).",
};
useEffect(() => {
if (!accessToken || !userRole || !userID) {
@ -169,6 +253,10 @@ const GeneralSettings: React.FC<GeneralSettingsPageProps> = ({
let router_settings = data.router_settings;
setRouterSettings(router_settings);
});
getGeneralSettingsCall(accessToken).then((data) => {
let general_settings = data;
setGeneralSettings(general_settings);
});
}, [accessToken, userRole, userID]);
const handleAddCallback = () => {
@ -190,8 +278,8 @@ const GeneralSettings: React.FC<GeneralSettingsPageProps> = ({
return;
}
console.log(`received key: ${key}`)
console.log(`routerSettings['fallbacks']: ${routerSettings['fallbacks']}`)
console.log(`received key: ${key}`);
console.log(`routerSettings['fallbacks']: ${routerSettings["fallbacks"]}`);
routerSettings["fallbacks"].map((dict: { [key: string]: any }) => {
// Check if the dictionary has the specified key and delete it if present
@ -202,19 +290,74 @@ const GeneralSettings: React.FC<GeneralSettingsPageProps> = ({
});
const payload = {
router_settings: routerSettings
router_settings: routerSettings,
};
try {
await setCallbacksCall(accessToken, payload);
setRouterSettings({ ...routerSettings });
setSelectedStrategy(routerSettings["routing_strategy"])
setSelectedStrategy(routerSettings["routing_strategy"]);
message.success("Router settings updated successfully");
} catch (error) {
message.error("Failed to update router settings: " + error, 20);
}
};
const handleInputChange = (fieldName: string, newValue: any) => {
// Update the value in the state
const updatedSettings = generalSettings.map((setting) =>
setting.field_name === fieldName
? { ...setting, field_value: newValue }
: setting
);
setGeneralSettings(updatedSettings);
};
const handleUpdateField = (fieldName: string, idx: number) => {
if (!accessToken) {
return;
}
let fieldValue = generalSettings[idx].field_value;
if (fieldValue == null || fieldValue == undefined) {
return;
}
try {
updateConfigFieldSetting(accessToken, fieldName, fieldValue);
// update value in state
const updatedSettings = generalSettings.map((setting) =>
setting.field_name === fieldName
? { ...setting, stored_in_db: true }
: setting
);
setGeneralSettings(updatedSettings);
} catch (error) {
// do something
}
};
const handleResetField = (fieldName: string, idx: number) => {
if (!accessToken) {
return;
}
try {
deleteConfigFieldSetting(accessToken, fieldName);
// update value in state
const updatedSettings = generalSettings.map((setting) =>
setting.field_name === fieldName
? { ...setting, stored_in_db: null, field_value: null }
: setting
);
setGeneralSettings(updatedSettings);
} catch (error) {
// do something
}
};
const handleSaveChanges = (router_settings: any) => {
if (!accessToken) {
return;
@ -223,39 +366,55 @@ const GeneralSettings: React.FC<GeneralSettingsPageProps> = ({
console.log("router_settings", router_settings);
const updatedVariables = Object.fromEntries(
Object.entries(router_settings).map(([key, value]) => {
if (key !== 'routing_strategy_args' && key !== "routing_strategy") {
return [key, (document.querySelector(`input[name="${key}"]`) as HTMLInputElement)?.value || value];
}
else if (key == "routing_strategy") {
return [key, selectedStrategy]
}
else if (key == "routing_strategy_args" && selectedStrategy == "latency-based-routing") {
let setRoutingStrategyArgs: routingStrategyArgs = {}
Object.entries(router_settings)
.map(([key, value]) => {
if (key !== "routing_strategy_args" && key !== "routing_strategy") {
return [
key,
(
document.querySelector(
`input[name="${key}"]`
) as HTMLInputElement
)?.value || value,
];
} else if (key == "routing_strategy") {
return [key, selectedStrategy];
} else if (
key == "routing_strategy_args" &&
selectedStrategy == "latency-based-routing"
) {
let setRoutingStrategyArgs: routingStrategyArgs = {};
const lowestLatencyBufferElement = document.querySelector(`input[name="lowest_latency_buffer"]`) as HTMLInputElement;
const ttlElement = document.querySelector(`input[name="ttl"]`) as HTMLInputElement;
const lowestLatencyBufferElement = document.querySelector(
`input[name="lowest_latency_buffer"]`
) as HTMLInputElement;
const ttlElement = document.querySelector(
`input[name="ttl"]`
) as HTMLInputElement;
if (lowestLatencyBufferElement?.value) {
setRoutingStrategyArgs["lowest_latency_buffer"] = Number(lowestLatencyBufferElement.value)
setRoutingStrategyArgs["lowest_latency_buffer"] = Number(
lowestLatencyBufferElement.value
);
}
if (ttlElement?.value) {
setRoutingStrategyArgs["ttl"] = Number(ttlElement.value)
setRoutingStrategyArgs["ttl"] = Number(ttlElement.value);
}
console.log(`setRoutingStrategyArgs: ${setRoutingStrategyArgs}`)
return [
"routing_strategy_args", setRoutingStrategyArgs
]
console.log(`setRoutingStrategyArgs: ${setRoutingStrategyArgs}`);
return ["routing_strategy_args", setRoutingStrategyArgs];
}
return null;
}).filter(entry => entry !== null && entry !== undefined) as Iterable<[string, unknown]>
})
.filter((entry) => entry !== null && entry !== undefined) as Iterable<
[string, unknown]
>
);
console.log("updatedVariables", updatedVariables);
const payload = {
router_settings: updatedVariables
router_settings: updatedVariables,
};
try {
@ -267,25 +426,23 @@ const GeneralSettings: React.FC<GeneralSettingsPageProps> = ({
message.success("router settings updated successfully");
};
if (!accessToken) {
return null;
}
return (
<div className="w-full mx-4">
<TabGroup className="gap-2 p-8 h-[75vh] w-full mt-2">
<TabList variant="line" defaultValue="1">
<Tab value="1">General Settings</Tab>
<Tab value="1">Loadbalancing</Tab>
<Tab value="2">Fallbacks</Tab>
<Tab value="3">General</Tab>
</TabList>
<TabPanels>
<TabPanel>
<Grid numItems={1} className="gap-2 p-8 w-full mt-2">
<Title>Router Settings</Title>
<Card >
<Card>
<Table>
<TableHead>
<TableRow>
@ -294,27 +451,55 @@ const GeneralSettings: React.FC<GeneralSettingsPageProps> = ({
</TableRow>
</TableHead>
<TableBody>
{Object.entries(routerSettings).filter(([param, value]) => param != "fallbacks" && param != "context_window_fallbacks" && param != "routing_strategy_args").map(([param, value]) => (
{Object.entries(routerSettings)
.filter(
([param, value]) =>
param != "fallbacks" &&
param != "context_window_fallbacks" &&
param != "routing_strategy_args"
)
.map(([param, value]) => (
<TableRow key={param}>
<TableCell>
<Text>{param}</Text>
<p style={{fontSize: '0.65rem', color: '#808080', fontStyle: 'italic'}} className="mt-1">{paramExplanation[param]}</p>
<p
style={{
fontSize: "0.65rem",
color: "#808080",
fontStyle: "italic",
}}
className="mt-1"
>
{paramExplanation[param]}
</p>
</TableCell>
<TableCell>
{
param == "routing_strategy" ?
<Select2 defaultValue={value} className="w-full max-w-md" onValueChange={setSelectedStrategy}>
<SelectItem value="usage-based-routing">usage-based-routing</SelectItem>
<SelectItem value="latency-based-routing">latency-based-routing</SelectItem>
<SelectItem value="simple-shuffle">simple-shuffle</SelectItem>
</Select2> :
{param == "routing_strategy" ? (
<Select2
defaultValue={value}
className="w-full max-w-md"
onValueChange={setSelectedStrategy}
>
<SelectItem value="usage-based-routing">
usage-based-routing
</SelectItem>
<SelectItem value="latency-based-routing">
latency-based-routing
</SelectItem>
<SelectItem value="simple-shuffle">
simple-shuffle
</SelectItem>
</Select2>
) : (
<TextInput
name={param}
defaultValue={
typeof value === 'object' ? JSON.stringify(value, null, 2) : value.toString()
typeof value === "object"
? JSON.stringify(value, null, 2)
: value.toString()
}
/>
}
)}
</TableCell>
</TableRow>
))}
@ -323,15 +508,21 @@ const GeneralSettings: React.FC<GeneralSettingsPageProps> = ({
<AccordionHero
selectedStrategy={selectedStrategy}
strategyArgs={
routerSettings && routerSettings['routing_strategy_args'] && Object.keys(routerSettings['routing_strategy_args']).length > 0
? routerSettings['routing_strategy_args']
routerSettings &&
routerSettings["routing_strategy_args"] &&
Object.keys(routerSettings["routing_strategy_args"])
.length > 0
? routerSettings["routing_strategy_args"]
: defaultLowestLatencyArgs // default value when keys length is 0
}
paramExplanation={paramExplanation}
/>
</Card>
<Col>
<Button className="mt-2" onClick={() => handleSaveChanges(routerSettings)}>
<Button
className="mt-2"
onClick={() => handleSaveChanges(routerSettings)}
>
Save Changes
</Button>
</Col>
@ -347,15 +538,21 @@ const GeneralSettings: React.FC<GeneralSettingsPageProps> = ({
</TableHead>
<TableBody>
{
routerSettings["fallbacks"] &&
routerSettings["fallbacks"].map((item: Object, index: number) =>
{routerSettings["fallbacks"] &&
routerSettings["fallbacks"].map(
(item: Object, index: number) =>
Object.entries(item).map(([key, value]) => (
<TableRow key={index.toString() + key}>
<TableCell>{key}</TableCell>
<TableCell>{Array.isArray(value) ? value.join(', ') : value}</TableCell>
<TableCell>
<Button onClick={() => testFallbackModelResponse(key, accessToken)}>
{Array.isArray(value) ? value.join(", ") : value}
</TableCell>
<TableCell>
<Button
onClick={() =>
testFallbackModelResponse(key, accessToken)
}
>
Test Fallback
</Button>
</TableCell>
@ -368,11 +565,96 @@ const GeneralSettings: React.FC<GeneralSettingsPageProps> = ({
</TableCell>
</TableRow>
))
)
}
)}
</TableBody>
</Table>
<AddFallbacks models={modelData?.data ? modelData.data.map((data: any) => data.model_name) : []} accessToken={accessToken} routerSettings={routerSettings} setRouterSettings={setRouterSettings}/>
<AddFallbacks
models={
modelData?.data
? modelData.data.map((data: any) => data.model_name)
: []
}
accessToken={accessToken}
routerSettings={routerSettings}
setRouterSettings={setRouterSettings}
/>
</TabPanel>
<TabPanel>
<Card>
<Table>
<TableHead>
<TableRow>
<TableHeaderCell>Setting</TableHeaderCell>
<TableHeaderCell>Value</TableHeaderCell>
<TableHeaderCell>Status</TableHeaderCell>
<TableHeaderCell>Action</TableHeaderCell>
</TableRow>
</TableHead>
<TableBody>
{generalSettings.map((value, index) => (
<TableRow key={index}>
<TableCell>
<Text>{value.field_name}</Text>
<p
style={{
fontSize: "0.65rem",
color: "#808080",
fontStyle: "italic",
}}
className="mt-1"
>
{value.field_description}
</p>
</TableCell>
<TableCell>
{value.field_type == "Integer" ? (
<InputNumber
step={1}
value={value.field_value}
onChange={(newValue) =>
handleInputChange(value.field_name, newValue)
} // Handle value change
/>
) : null}
</TableCell>
<TableCell>
{value.stored_in_db == true ? (
<Badge icon={CheckCircleIcon} className="text-white">
In DB
</Badge>
) : value.stored_in_db == false ? (
<Badge className="text-gray bg-white outline">
In Config
</Badge>
) : (
<Badge className="text-gray bg-white outline">
Not Set
</Badge>
)}
</TableCell>
<TableCell>
<Button
onClick={() =>
handleUpdateField(value.field_name, index)
}
>
Update
</Button>
<Icon
icon={TrashIcon}
color="red"
onClick={() =>
handleResetField(value.field_name, index)
}
>
Reset
</Icon>
</TableCell>
</TableRow>
))}
</TableBody>
</Table>
</Card>
</TabPanel>
</TabPanels>
</TabGroup>

View file

@ -14,15 +14,17 @@ export interface Model {
export const modelCostMap = async () => {
try {
const response = await fetch('https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json');
const response = await fetch(
"https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json"
);
const jsonData = await response.json();
console.log(`received data: ${jsonData}`)
return jsonData
console.log(`received data: ${jsonData}`);
return jsonData;
} catch (error) {
console.error("Failed to get model cost map:", error);
throw error;
}
}
};
export const modelCreateCall = async (
accessToken: string,
@ -50,19 +52,21 @@ export const modelCreateCall = async (
const data = await response.json();
console.log("API Response:", data);
message.success("Model created successfully. Wait 60s and refresh on 'All Models' page");
message.success(
"Model created successfully. Wait 60s and refresh on 'All Models' page"
);
return data;
} catch (error) {
console.error("Failed to create key:", error);
throw error;
}
}
};
export const modelDeleteCall = async (
accessToken: string,
model_id: string,
model_id: string
) => {
console.log(`model_id in model delete call: ${model_id}`)
console.log(`model_id in model delete call: ${model_id}`);
try {
const url = proxyBaseUrl ? `${proxyBaseUrl}/model/delete` : `/model/delete`;
const response = await fetch(url, {
@ -72,7 +76,7 @@ export const modelDeleteCall = async (
"Content-Type": "application/json",
},
body: JSON.stringify({
"id": model_id,
id: model_id,
}),
});
@ -91,7 +95,7 @@ export const modelDeleteCall = async (
console.error("Failed to create key:", error);
throw error;
}
}
};
export const keyCreateCall = async (
accessToken: string,
@ -280,8 +284,7 @@ export const teamDeleteCall = async (accessToken: String, teamID: String) => {
console.error("Failed to delete key:", error);
throw error;
}
}
};
export const userInfoCall = async (
accessToken: String,
@ -300,7 +303,7 @@ export const userInfoCall = async (
url = `${url}?user_id=${userID}`;
}
console.log("in userInfoCall viewAll=", viewAll);
if (viewAll && page_size && (page != null) && (page != undefined)) {
if (viewAll && page_size && page != null && page != undefined) {
url = `${url}?view_all=true&page=${page}&page_size=${page_size}`;
}
//message.info("Requesting user data");
@ -329,10 +332,9 @@ export const userInfoCall = async (
}
};
export const teamInfoCall = async (
accessToken: String,
teamID: String | null,
teamID: String | null
) => {
try {
let url = proxyBaseUrl ? `${proxyBaseUrl}/team/info` : `/team/info`;
@ -364,10 +366,7 @@ export const teamInfoCall = async (
}
};
export const getTotalSpendCall = async (
accessToken: String,
) => {
export const getTotalSpendCall = async (accessToken: String) => {
/**
* Get all models on proxy
*/
@ -435,7 +434,6 @@ export const modelInfoCall = async (
}
};
export const modelMetricsCall = async (
accessToken: String,
userID: String,
@ -450,7 +448,7 @@ export const modelMetricsCall = async (
try {
let url = proxyBaseUrl ? `${proxyBaseUrl}/model/metrics` : `/model/metrics`;
if (modelGroup) {
url = `${url}?_selected_model_group=${modelGroup}&startTime=${startTime}&endTime=${endTime}`
url = `${url}?_selected_model_group=${modelGroup}&startTime=${startTime}&endTime=${endTime}`;
}
// message.info("Requesting model data");
const response = await fetch(url, {
@ -476,8 +474,6 @@ export const modelMetricsCall = async (
}
};
export const modelMetricsSlowResponsesCall = async (
accessToken: String,
userID: String,
@ -490,9 +486,11 @@ export const modelMetricsSlowResponsesCall = async (
* Get all models on proxy
*/
try {
let url = proxyBaseUrl ? `${proxyBaseUrl}/model/metrics/slow_responses` : `/model/metrics/slow_responses`;
let url = proxyBaseUrl
? `${proxyBaseUrl}/model/metrics/slow_responses`
: `/model/metrics/slow_responses`;
if (modelGroup) {
url = `${url}?_selected_model_group=${modelGroup}&startTime=${startTime}&endTime=${endTime}`
url = `${url}?_selected_model_group=${modelGroup}&startTime=${startTime}&endTime=${endTime}`;
}
// message.info("Requesting model data");
@ -519,7 +517,6 @@ export const modelMetricsSlowResponsesCall = async (
}
};
export const modelExceptionsCall = async (
accessToken: String,
userID: String,
@ -532,10 +529,12 @@ export const modelExceptionsCall = async (
* Get all models on proxy
*/
try {
let url = proxyBaseUrl ? `${proxyBaseUrl}/model/metrics/exceptions` : `/model/metrics/exceptions`;
let url = proxyBaseUrl
? `${proxyBaseUrl}/model/metrics/exceptions`
: `/model/metrics/exceptions`;
if (modelGroup) {
url = `${url}?_selected_model_group=${modelGroup}&startTime=${startTime}&endTime=${endTime}`
url = `${url}?_selected_model_group=${modelGroup}&startTime=${startTime}&endTime=${endTime}`;
}
const response = await fetch(url, {
method: "GET",
@ -560,7 +559,6 @@ export const modelExceptionsCall = async (
}
};
export const modelAvailableCall = async (
accessToken: String,
userID: String,
@ -625,7 +623,6 @@ export const keySpendLogsCall = async (accessToken: String, token: String) => {
}
};
export const teamSpendLogsCall = async (accessToken: String) => {
try {
const url = proxyBaseUrl
@ -654,19 +651,18 @@ export const teamSpendLogsCall = async (accessToken: String) => {
}
};
export const tagsSpendLogsCall = async (
accessToken: String,
startTime: String | undefined,
endTime: String | undefined
) => {
) => {
try {
let url = proxyBaseUrl
? `${proxyBaseUrl}/global/spend/tags`
: `/global/spend/tags`;
if (startTime && endTime) {
url = `${url}?start_date=${startTime}&end_date=${endTime}`
url = `${url}?start_date=${startTime}&end_date=${endTime}`;
}
console.log("in tagsSpendLogsCall:", url);
@ -692,7 +688,6 @@ export const tagsSpendLogsCall = async (
}
};
export const userSpendLogsCall = async (
accessToken: String,
token: String,
@ -806,7 +801,11 @@ export const adminTopEndUsersCall = async (
let body = "";
if (keyToken) {
body = JSON.stringify({ api_key: keyToken, startTime: startTime, endTime: endTime });
body = JSON.stringify({
api_key: keyToken,
startTime: startTime,
endTime: endTime,
});
} else {
body = JSON.stringify({ startTime: startTime, endTime: endTime });
}
@ -1079,7 +1078,6 @@ export const teamCreateCall = async (
}
};
export const keyUpdateCall = async (
accessToken: string,
formValues: Record<string, any> // Assuming formValues is an object
@ -1240,7 +1238,7 @@ export const userUpdateUserCall = async (
console.log("Form Values in userUpdateUserCall:", formValues); // Log the form values before making the API call
const url = proxyBaseUrl ? `${proxyBaseUrl}/user/update` : `/user/update`;
let response_body = {...formValues};
let response_body = { ...formValues };
if (userRole !== null) {
response_body["user_role"] = userRole;
}
@ -1347,9 +1345,10 @@ export const slackBudgetAlertsHealthCheck = async (accessToken: String) => {
}
};
export const serviceHealthCheck= async (accessToken: String, service: String) => {
export const serviceHealthCheck = async (
accessToken: String,
service: String
) => {
try {
let url = proxyBaseUrl
? `${proxyBaseUrl}/health/services?service=${service}`
@ -1373,7 +1372,9 @@ export const serviceHealthCheck= async (accessToken: String, service: String) =>
}
const data = await response.json();
message.success(`Test request to ${service} made - check logs/alerts on ${service} to verify`);
message.success(
`Test request to ${service} made - check logs/alerts on ${service} to verify`
);
// You can add additional logic here based on the response if needed
return data;
} catch (error) {
@ -1382,9 +1383,6 @@ export const serviceHealthCheck= async (accessToken: String, service: String) =>
}
};
export const getCallbacksCall = async (
accessToken: String,
userID: String,
@ -1394,7 +1392,9 @@ export const getCallbacksCall = async (
* Get all the models user has access to
*/
try {
let url = proxyBaseUrl ? `${proxyBaseUrl}/get/config/callbacks` : `/get/config/callbacks`;
let url = proxyBaseUrl
? `${proxyBaseUrl}/get/config/callbacks`
: `/get/config/callbacks`;
//message.info("Requesting model data");
const response = await fetch(url, {
@ -1421,11 +1421,117 @@ export const getCallbacksCall = async (
}
};
export const getGeneralSettingsCall = async (accessToken: String) => {
try {
let url = proxyBaseUrl
? `${proxyBaseUrl}/config/list?config_type=general_settings`
: `/config/list?config_type=general_settings`;
//message.info("Requesting model data");
const response = await fetch(url, {
method: "GET",
headers: {
Authorization: `Bearer ${accessToken}`,
"Content-Type": "application/json",
},
});
if (!response.ok) {
const errorData = await response.text();
message.error(errorData, 10);
throw new Error("Network response was not ok");
}
const data = await response.json();
//message.info("Received model data");
return data;
// Handle success - you might want to update some state or UI based on the created key
} catch (error) {
console.error("Failed to get callbacks:", error);
throw error;
}
};
export const updateConfigFieldSetting = async (
accessToken: String,
fieldName: string,
fieldValue: any
) => {
try {
let url = proxyBaseUrl
? `${proxyBaseUrl}/config/field/update`
: `/config/field/update`;
let formData = {
field_name: fieldName,
field_value: fieldValue,
config_type: "general_settings",
};
//message.info("Requesting model data");
const response = await fetch(url, {
method: "POST",
headers: {
Authorization: `Bearer ${accessToken}`,
"Content-Type": "application/json",
},
body: JSON.stringify(formData),
});
if (!response.ok) {
const errorData = await response.text();
message.error(errorData, 10);
throw new Error("Network response was not ok");
}
const data = await response.json();
//message.info("Received model data");
message.success("Successfully updated value!");
return data;
// Handle success - you might want to update some state or UI based on the created key
} catch (error) {
console.error("Failed to set callbacks:", error);
throw error;
}
};
export const deleteConfigFieldSetting = async (
accessToken: String,
fieldName: String
) => {
try {
let url = proxyBaseUrl
? `${proxyBaseUrl}/config/field/delete`
: `/config/field/delete`;
let formData = {
field_name: fieldName,
config_type: "general_settings",
};
//message.info("Requesting model data");
const response = await fetch(url, {
method: "POST",
headers: {
Authorization: `Bearer ${accessToken}`,
"Content-Type": "application/json",
},
body: JSON.stringify(formData),
});
if (!response.ok) {
const errorData = await response.text();
message.error(errorData, 10);
throw new Error("Network response was not ok");
}
const data = await response.json();
message.success("Field reset on proxy");
return data;
// Handle success - you might want to update some state or UI based on the created key
} catch (error) {
console.error("Failed to get callbacks:", error);
throw error;
}
};
export const setCallbacksCall = async (
accessToken: String,
formValues: Record<string, any>
@ -1464,9 +1570,7 @@ export const setCallbacksCall = async (
}
};
export const healthCheckCall = async (
accessToken: String,
) => {
export const healthCheckCall = async (accessToken: String) => {
/**
* Get all the models user has access to
*/
@ -1497,6 +1601,3 @@ export const healthCheckCall = async (
throw error;
}
};