forked from phoenix/litellm-mirror
LiteLLM Minor Fixes & Improvements (10/16/2024) (#6265)
* fix(caching_handler.py): handle positional arguments in add cache logic Fixes https://github.com/BerriAI/litellm/issues/6264 * feat(litellm_pre_call_utils.py): allow forwarding openai org id to backend client https://github.com/BerriAI/litellm/issues/6237 * docs(configs.md): add 'forward_openai_org_id' to docs * fix(proxy_server.py): return model info if user_model is set Fixes https://github.com/BerriAI/litellm/issues/6233 * fix(hosted_vllm/chat/transformation.py): don't set tools unless non-none * fix(openai.py): improve debug log for openai 'str' error Addresses https://github.com/BerriAI/litellm/issues/6272 * fix(proxy_server.py): fix linting error * fix(proxy_server.py): fix linting errors * test: skip WIP test * docs(openai.md): add docs on passing openai org id from client to openai
This commit is contained in:
parent
43878bd2a0
commit
38a9a106d2
14 changed files with 371 additions and 47 deletions
|
@ -368,3 +368,41 @@ def test_is_request_body_safe_model_enabled(
|
|||
error_raised = True
|
||||
|
||||
assert expect_error == error_raised
|
||||
|
||||
|
||||
def test_reading_openai_org_id_from_headers():
|
||||
from litellm.proxy.litellm_pre_call_utils import get_openai_org_id_from_headers
|
||||
|
||||
headers = {
|
||||
"OpenAI-Organization": "test_org_id",
|
||||
}
|
||||
org_id = get_openai_org_id_from_headers(headers)
|
||||
assert org_id == "test_org_id"
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"headers, expected_data",
|
||||
[
|
||||
({"OpenAI-Organization": "test_org_id"}, {"organization": "test_org_id"}),
|
||||
({"openai-organization": "test_org_id"}, {"organization": "test_org_id"}),
|
||||
({}, {}),
|
||||
(
|
||||
{
|
||||
"OpenAI-Organization": "test_org_id",
|
||||
"Authorization": "Bearer test_token",
|
||||
},
|
||||
{
|
||||
"organization": "test_org_id",
|
||||
},
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_add_litellm_data_for_backend_llm_call(headers, expected_data):
|
||||
import json
|
||||
from litellm.proxy.litellm_pre_call_utils import (
|
||||
add_litellm_data_for_backend_llm_call,
|
||||
)
|
||||
|
||||
data = add_litellm_data_for_backend_llm_call(headers)
|
||||
|
||||
assert json.dumps(data, sort_keys=True) == json.dumps(expected_data, sort_keys=True)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue