Litellm dev 01 06 2025 p1 (#7594)

* fix(custom_logger.py): expose new 'async_get_chat_completion_prompt' event hook

* fix(custom_logger.py): langfuse_prompt_management.py

remove 'headers' from custom logger 'async_get_chat_completion_prompt' and 'get_chat_completion_prompt' event hooks

* feat(router.py): expose new function for prompt management based routing

* feat(router.py): partial working router prompt factory logic

allows load balanced model to be used for model name w/ langfuse prompt management call

* feat(router.py): fix prompt management with load balanced model group

* feat(langfuse_prompt_management.py): support reading in openai params from langfuse

enables user to define optional params on langfuse vs. client code

* test(test_Router.py): add unit test for router based langfuse prompt management

* fix: fix linting errors
This commit is contained in:
Krish Dholakia 2025-01-06 21:26:21 -08:00 committed by GitHub
parent 7133cf5b74
commit fef7839e8a
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
9 changed files with 214 additions and 90 deletions

View file

@ -144,6 +144,7 @@ from litellm.types.utils import (
TextCompletionResponse,
TranscriptionResponse,
Usage,
all_litellm_params,
)
with resources.open_text(
@ -6448,3 +6449,12 @@ def _add_path_to_api_base(api_base: str, ending_path: str) -> str:
# Re-add the original query parameters
return str(modified_url.copy_with(params=original_url.params))
def get_non_default_completion_params(kwargs: dict) -> dict:
openai_params = litellm.OPENAI_CHAT_COMPLETION_PARAMS
default_params = openai_params + all_litellm_params
non_default_params = {
k: v for k, v in kwargs.items() if k not in default_params
} # model-specific params - pass them straight to the model/provider
return non_default_params