mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 02:34:29 +00:00
LiteLLM Minor Fixes & Improvements (04/02/2025) (#9725)
* Add date picker to usage tab + Add reasoning_content token tracking across all providers on streaming (#9722) * feat(new_usage.tsx): add date picker for new usage tab allow user to look back on their usage data * feat(anthropic/chat/transformation.py): report reasoning tokens in completion token details allows usage tracking on how many reasoning tokens are actually being used * feat(streaming_chunk_builder.py): return reasoning_tokens in anthropic/openai streaming response allows tracking reasoning_token usage across providers * Fix update team metadata + fix bulk adding models on Ui (#9721) * fix(handle_add_model_submit.tsx): fix bulk adding models * fix(team_info.tsx): fix team metadata update Fixes https://github.com/BerriAI/litellm/issues/9689 * (v0) Unified file id - allow calling multiple providers with same file id (#9718) * feat(files_endpoints.py): initial commit adding 'target_model_names' support allow developer to specify all the models they want to call with the file * feat(files_endpoints.py): return unified files endpoint * test(test_files_endpoints.py): add validation test - if invalid purpose submitted * feat: more updates * feat: initial working commit of unified file id translation * fix: additional fixes * fix(router.py): remove model replace logic in jsonl on acreate_file enables file upload to work for chat completion requests as well * fix(files_endpoints.py): remove whitespace around model name * fix(azure/handler.py): return acreate_file with correct response type * fix: fix linting errors * test: fix mock test to run on github actions * fix: fix ruff errors * fix: fix file too large error * fix(utils.py): remove redundant var * test: modify test to work on github actions * test: update tests * test: more debug logs to understand ci/cd issue * test: fix test for respx * test: skip mock respx test fails on ci/cd - not clear why * fix: fix ruff check * fix: fix test * fix(model_connection_test.tsx): fix linting error * test: update unit tests
This commit is contained in:
parent
5a18eebdb6
commit
6dda1ba6dd
27 changed files with 889 additions and 96 deletions
|
@ -68,10 +68,7 @@ from litellm.router_utils.add_retry_fallback_headers import (
|
|||
add_fallback_headers_to_response,
|
||||
add_retry_headers_to_response,
|
||||
)
|
||||
from litellm.router_utils.batch_utils import (
|
||||
_get_router_metadata_variable_name,
|
||||
replace_model_in_jsonl,
|
||||
)
|
||||
from litellm.router_utils.batch_utils import _get_router_metadata_variable_name
|
||||
from litellm.router_utils.client_initalization_utils import InitalizeCachedClient
|
||||
from litellm.router_utils.clientside_credential_handler import (
|
||||
get_dynamic_litellm_params,
|
||||
|
@ -105,7 +102,12 @@ from litellm.router_utils.router_callbacks.track_deployment_metrics import (
|
|||
increment_deployment_successes_for_current_minute,
|
||||
)
|
||||
from litellm.scheduler import FlowItem, Scheduler
|
||||
from litellm.types.llms.openai import AllMessageValues, Batch, FileObject, FileTypes
|
||||
from litellm.types.llms.openai import (
|
||||
AllMessageValues,
|
||||
Batch,
|
||||
FileTypes,
|
||||
OpenAIFileObject,
|
||||
)
|
||||
from litellm.types.router import (
|
||||
CONFIGURABLE_CLIENTSIDE_AUTH_PARAMS,
|
||||
VALID_LITELLM_ENVIRONMENTS,
|
||||
|
@ -2703,7 +2705,7 @@ class Router:
|
|||
self,
|
||||
model: str,
|
||||
**kwargs,
|
||||
) -> FileObject:
|
||||
) -> OpenAIFileObject:
|
||||
try:
|
||||
kwargs["model"] = model
|
||||
kwargs["original_function"] = self._acreate_file
|
||||
|
@ -2727,7 +2729,7 @@ class Router:
|
|||
self,
|
||||
model: str,
|
||||
**kwargs,
|
||||
) -> FileObject:
|
||||
) -> OpenAIFileObject:
|
||||
try:
|
||||
verbose_router_logger.debug(
|
||||
f"Inside _atext_completion()- model: {model}; kwargs: {kwargs}"
|
||||
|
@ -2754,9 +2756,9 @@ class Router:
|
|||
stripped_model, custom_llm_provider, _, _ = get_llm_provider(
|
||||
model=data["model"]
|
||||
)
|
||||
kwargs["file"] = replace_model_in_jsonl(
|
||||
file_content=kwargs["file"], new_model_name=stripped_model
|
||||
)
|
||||
# kwargs["file"] = replace_model_in_jsonl(
|
||||
# file_content=kwargs["file"], new_model_name=stripped_model
|
||||
# )
|
||||
|
||||
response = litellm.acreate_file(
|
||||
**{
|
||||
|
@ -2796,6 +2798,7 @@ class Router:
|
|||
verbose_router_logger.info(
|
||||
f"litellm.acreate_file(model={model_name})\033[32m 200 OK\033[0m"
|
||||
)
|
||||
|
||||
return response # type: ignore
|
||||
except Exception as e:
|
||||
verbose_router_logger.exception(
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue