mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 03:34:10 +00:00
Litellm dev 2024 12 19 p3 (#7322)
* fix(utils.py): remove unsupported optional params (if drop_params=True) before passing into map openai params Fixes https://github.com/BerriAI/litellm/issues/7242 * test: new test for langfuse prompt management hook Addresses https://github.com/BerriAI/litellm/issues/3893#issuecomment-2549080296 * feat(main.py): add 'get_chat_completion_prompt' customlogger hook allows for langfuse prompt management Addresses https://github.com/BerriAI/litellm/issues/3893#issuecomment-2549080296 * feat(langfuse_prompt_management.py): working e2e langfuse prompt management works with `langfuse/` route * feat(main.py): initial tracing for dynamic langfuse params allows admin to specify langfuse keys by model in model_list * feat(main.py): support passing langfuse credentials dynamically * fix(langfuse_prompt_management.py): create langfuse client based on dynamic callback params allows dynamic langfuse params to work * fix: fix linting errors * docs(prompt_management.md): refactor docs for sdk + proxy prompt management tutorial * docs(prompt_management.md): cleanup doc * docs: cleanup topnav * docs(prompt_management.md): update docs to be easier to use * fix: remove unused imports * docs(prompt_management.md): add architectural overview doc * fix(litellm_logging.py): fix dynamic param passing * fix(langfuse_prompt_management.py): fix linting errors * fix: fix linting errors * fix: use typing_extensions for typealias to ensure python3.8 compatibility * test: use stream_options in test to account for tiktoken diff * fix: improve import error message, and check run test earlier
This commit is contained in:
parent
205e2dbe3c
commit
b026230b0a
17 changed files with 648 additions and 260 deletions
|
@ -4513,3 +4513,23 @@ def test_openai_hallucinated_tool_call_util(function_name, expect_modification):
|
|||
else:
|
||||
assert len(response) == 1
|
||||
assert response[0].function.name == function_name
|
||||
|
||||
|
||||
def test_langfuse_completion(monkeypatch):
|
||||
monkeypatch.setenv(
|
||||
"LANGFUSE_PUBLIC_KEY", "pk-lf-b3db7e8e-c2f6-4fc7-825c-a541a8fbe003"
|
||||
)
|
||||
monkeypatch.setenv(
|
||||
"LANGFUSE_SECRET_KEY", "sk-lf-b11ef3a8-361c-4445-9652-12318b8596e4"
|
||||
)
|
||||
monkeypatch.setenv("LANGFUSE_HOST", "https://us.cloud.langfuse.com")
|
||||
litellm.set_verbose = True
|
||||
resp = litellm.completion(
|
||||
model="langfuse/gpt-3.5-turbo",
|
||||
langfuse_public_key=os.getenv("LANGFUSE_PUBLIC_KEY"),
|
||||
langfuse_secret_key=os.getenv("LANGFUSE_SECRET_KEY"),
|
||||
langfuse_host="https://us.cloud.langfuse.com",
|
||||
prompt_id="test-chat-prompt",
|
||||
prompt_variables={"user_message": "this is used"},
|
||||
messages=[{"role": "user", "content": "this is ignored"}],
|
||||
)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue