forked from phoenix/litellm-mirror
LiteLLM Minor Fixes & Improvements (11/23/2024) (#6870)
* feat(pass_through_endpoints/): support logging anthropic/gemini pass through calls to langfuse/s3/etc. * fix(utils.py): allow disabling end user cost tracking with new param Allows proxy admin to disable cost tracking for end user - keeps prometheus metrics small * docs(configs.md): add disable_end_user_cost_tracking reference to docs * feat(key_management_endpoints.py): add support for restricting access to `/key/generate` by team/proxy level role Enables admin to restrict key creation, and assign team admins to handle distributing keys * test(test_key_management.py): add unit testing for personal / team key restriction checks * docs: add docs on restricting key creation * docs(finetuned_models.md): add new guide on calling finetuned models * docs(input.md): cleanup anthropic supported params Closes https://github.com/BerriAI/litellm/issues/6856 * test(test_embedding.py): add test for passing extra headers via embedding * feat(cohere/embed): pass client to async embedding * feat(rerank.py): add `/v1/rerank` if missing for cohere base url Closes https://github.com/BerriAI/litellm/issues/6844 * fix(main.py): pass extra_headers param to openai Fixes https://github.com/BerriAI/litellm/issues/6836 * fix(litellm_logging.py): don't disable global callbacks when dynamic callbacks are set Fixes issue where global callbacks - e.g. prometheus were overriden when langfuse was set dynamically * fix(handler.py): fix linting error * fix: fix typing * build: add conftest to proxy_admin_ui_tests/ * test: fix test * fix: fix linting errors * test: fix test * fix: fix pass through testing
This commit is contained in:
parent
d81ae45827
commit
7e9d8b58f6
35 changed files with 871 additions and 248 deletions
|
@ -1080,3 +1080,34 @@ def test_cohere_img_embeddings(input, input_type):
|
|||
assert response.usage.prompt_tokens_details.image_tokens > 0
|
||||
else:
|
||||
assert response.usage.prompt_tokens_details.text_tokens > 0
|
||||
|
||||
|
||||
@pytest.mark.parametrize("sync_mode", [True, False])
|
||||
@pytest.mark.asyncio
|
||||
async def test_embedding_with_extra_headers(sync_mode):
|
||||
|
||||
input = ["hello world"]
|
||||
from litellm.llms.custom_httpx.http_handler import HTTPHandler, AsyncHTTPHandler
|
||||
|
||||
if sync_mode:
|
||||
client = HTTPHandler()
|
||||
else:
|
||||
client = AsyncHTTPHandler()
|
||||
|
||||
data = {
|
||||
"model": "cohere/embed-english-v3.0",
|
||||
"input": input,
|
||||
"extra_headers": {"my-test-param": "hello-world"},
|
||||
"client": client,
|
||||
}
|
||||
with patch.object(client, "post") as mock_post:
|
||||
try:
|
||||
if sync_mode:
|
||||
embedding(**data)
|
||||
else:
|
||||
await litellm.aembedding(**data)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
mock_post.assert_called_once()
|
||||
assert "my-test-param" in mock_post.call_args.kwargs["headers"]
|
||||
|
|
|
@ -215,7 +215,10 @@ async def test_rerank_custom_api_base():
|
|||
args_to_api = kwargs["json"]
|
||||
print("Arguments passed to API=", args_to_api)
|
||||
print("url = ", _url)
|
||||
assert _url[0] == "https://exampleopenaiendpoint-production.up.railway.app/"
|
||||
assert (
|
||||
_url[0]
|
||||
== "https://exampleopenaiendpoint-production.up.railway.app/v1/rerank"
|
||||
)
|
||||
assert args_to_api == expected_payload
|
||||
assert response.id is not None
|
||||
assert response.results is not None
|
||||
|
@ -258,3 +261,32 @@ async def test_rerank_custom_callbacks():
|
|||
assert custom_logger.kwargs.get("response_cost") > 0.0
|
||||
assert custom_logger.response_obj is not None
|
||||
assert custom_logger.response_obj.results is not None
|
||||
|
||||
|
||||
def test_complete_base_url_cohere():
|
||||
from litellm.llms.custom_httpx.http_handler import HTTPHandler
|
||||
|
||||
client = HTTPHandler()
|
||||
litellm.api_base = "http://localhost:4000"
|
||||
litellm.set_verbose = True
|
||||
|
||||
text = "Hello there!"
|
||||
list_texts = ["Hello there!", "How are you?", "How do you do?"]
|
||||
|
||||
rerank_model = "rerank-multilingual-v3.0"
|
||||
|
||||
with patch.object(client, "post") as mock_post:
|
||||
try:
|
||||
litellm.rerank(
|
||||
model=rerank_model,
|
||||
query=text,
|
||||
documents=list_texts,
|
||||
custom_llm_provider="cohere",
|
||||
client=client,
|
||||
)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
print("mock_post.call_args", mock_post.call_args)
|
||||
mock_post.assert_called_once()
|
||||
assert "http://localhost:4000/v1/rerank" in mock_post.call_args.kwargs["url"]
|
||||
|
|
|
@ -1012,3 +1012,23 @@ def test_models_by_provider():
|
|||
|
||||
for provider in providers:
|
||||
assert provider in models_by_provider.keys()
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"litellm_params, disable_end_user_cost_tracking, expected_end_user_id",
|
||||
[
|
||||
({}, False, None),
|
||||
({"proxy_server_request": {"body": {"user": "123"}}}, False, "123"),
|
||||
({"proxy_server_request": {"body": {"user": "123"}}}, True, None),
|
||||
],
|
||||
)
|
||||
def test_get_end_user_id_for_cost_tracking(
|
||||
litellm_params, disable_end_user_cost_tracking, expected_end_user_id
|
||||
):
|
||||
from litellm.utils import get_end_user_id_for_cost_tracking
|
||||
|
||||
litellm.disable_end_user_cost_tracking = disable_end_user_cost_tracking
|
||||
assert (
|
||||
get_end_user_id_for_cost_tracking(litellm_params=litellm_params)
|
||||
== expected_end_user_id
|
||||
)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue