mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 11:14:04 +00:00
fix(vertex_ai/gemini/transformation.py): handle 'http://' in gemini p… (#7660)
* fix(vertex_ai/gemini/transformation.py): handle 'http://' in gemini process url * refactor(router.py): refactor '_prompt_management_factory' to use logging obj get_chat_completion logic deduplicates code * fix(litellm_logging.py): update 'get_chat_completion_prompt' to update logging object messages * docs(prompt_management.md): update prompt management to be in beta given feedback - this still needs to be revised (e.g. passing in user message, not ignoring) * refactor(prompt_management_base.py): introduce base class for prompt management allows consistent behaviour across prompt management integrations * feat(prompt_management_base.py): support adding client message to template message + refactor langfuse prompt management to use prompt management base * fix(litellm_logging.py): log prompt id + prompt variables to langfuse if set allows tracking what prompt was used for what purpose * feat(litellm_logging.py): log prompt management metadata in standard logging payload + use in langfuse allows logging prompt id / prompt variables to langfuse * test: fix test * fix(router.py): cleanup unused imports * fix: fix linting error * fix: fix trace param typing * fix: fix linting errors * fix: fix code qa check
This commit is contained in:
parent
865e6d5bda
commit
c10ae8879e
15 changed files with 340 additions and 76 deletions
|
@ -500,6 +500,37 @@ def test_get_supported_openai_params() -> None:
|
|||
assert get_supported_openai_params("nonexistent") is None
|
||||
|
||||
|
||||
def test_get_chat_completion_prompt():
|
||||
"""
|
||||
Unit test to ensure get_chat_completion_prompt updates messages in logging object.
|
||||
"""
|
||||
from litellm.litellm_core_utils.litellm_logging import Logging
|
||||
|
||||
litellm_logging_obj = Logging(
|
||||
model="gpt-3.5-turbo",
|
||||
messages=[{"role": "user", "content": "hi"}],
|
||||
stream=False,
|
||||
call_type="acompletion",
|
||||
litellm_call_id="1234",
|
||||
start_time=datetime.now(),
|
||||
function_id="1234",
|
||||
)
|
||||
|
||||
updated_message = "hello world"
|
||||
|
||||
litellm_logging_obj.get_chat_completion_prompt(
|
||||
model="gpt-3.5-turbo",
|
||||
messages=[{"role": "user", "content": updated_message}],
|
||||
non_default_params={},
|
||||
prompt_id="1234",
|
||||
prompt_variables=None,
|
||||
)
|
||||
|
||||
assert litellm_logging_obj.messages == [
|
||||
{"role": "user", "content": updated_message}
|
||||
]
|
||||
|
||||
|
||||
def test_redact_msgs_from_logs():
|
||||
"""
|
||||
Tests that turn_off_message_logging does not modify the response_obj
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue