mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
Litellm perf improvements 3 (#6573)
* perf: move writing key to cache, to background task * perf(litellm_pre_call_utils.py): add otel tracing for pre-call utils adds 200ms on calls with pgdb connected * fix(litellm_pre_call_utils.py'): rename call_type to actual call used * perf(proxy_server.py): remove db logic from _get_config_from_file was causing db calls to occur on every llm request, if team_id was set on key * fix(auth_checks.py): add check for reducing db calls if user/team id does not exist in db reduces latency/call by ~100ms * fix(proxy_server.py): minor fix on existing_settings not incl alerting * fix(exception_mapping_utils.py): map databricks exception string * fix(auth_checks.py): fix auth check logic * test: correctly mark flaky test * fix(utils.py): handle auth token error for tokenizers.from_pretrained
This commit is contained in:
parent
7525b6bbaa
commit
3a6ba0b955
14 changed files with 137 additions and 86 deletions
|
@ -1539,9 +1539,15 @@ def create_pretrained_tokenizer(
|
|||
dict: A dictionary with the tokenizer and its type.
|
||||
"""
|
||||
|
||||
tokenizer = Tokenizer.from_pretrained(
|
||||
identifier, revision=revision, auth_token=auth_token
|
||||
)
|
||||
try:
|
||||
tokenizer = Tokenizer.from_pretrained(
|
||||
identifier, revision=revision, auth_token=auth_token
|
||||
)
|
||||
except Exception as e:
|
||||
verbose_logger.error(
|
||||
f"Error creating pretrained tokenizer: {e}. Defaulting to version without 'auth_token'."
|
||||
)
|
||||
tokenizer = Tokenizer.from_pretrained(identifier, revision=revision)
|
||||
return {"type": "huggingface_tokenizer", "tokenizer": tokenizer}
|
||||
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue