forked from phoenix/litellm-mirror
LiteLLM Minor Fixes & Improvements (11/23/2024) (#6870)
* feat(pass_through_endpoints/): support logging anthropic/gemini pass through calls to langfuse/s3/etc. * fix(utils.py): allow disabling end user cost tracking with new param Allows proxy admin to disable cost tracking for end user - keeps prometheus metrics small * docs(configs.md): add disable_end_user_cost_tracking reference to docs * feat(key_management_endpoints.py): add support for restricting access to `/key/generate` by team/proxy level role Enables admin to restrict key creation, and assign team admins to handle distributing keys * test(test_key_management.py): add unit testing for personal / team key restriction checks * docs: add docs on restricting key creation * docs(finetuned_models.md): add new guide on calling finetuned models * docs(input.md): cleanup anthropic supported params Closes https://github.com/BerriAI/litellm/issues/6856 * test(test_embedding.py): add test for passing extra headers via embedding * feat(cohere/embed): pass client to async embedding * feat(rerank.py): add `/v1/rerank` if missing for cohere base url Closes https://github.com/BerriAI/litellm/issues/6844 * fix(main.py): pass extra_headers param to openai Fixes https://github.com/BerriAI/litellm/issues/6836 * fix(litellm_logging.py): don't disable global callbacks when dynamic callbacks are set Fixes issue where global callbacks - e.g. prometheus were overriden when langfuse was set dynamically * fix(handler.py): fix linting error * fix: fix typing * build: add conftest to proxy_admin_ui_tests/ * test: fix test * fix: fix linting errors * test: fix test * fix: fix pass through testing
This commit is contained in:
parent
d81ae45827
commit
7e9d8b58f6
35 changed files with 871 additions and 248 deletions
|
@ -1080,3 +1080,34 @@ def test_cohere_img_embeddings(input, input_type):
|
|||
assert response.usage.prompt_tokens_details.image_tokens > 0
|
||||
else:
|
||||
assert response.usage.prompt_tokens_details.text_tokens > 0
|
||||
|
||||
|
||||
@pytest.mark.parametrize("sync_mode", [True, False])
|
||||
@pytest.mark.asyncio
|
||||
async def test_embedding_with_extra_headers(sync_mode):
|
||||
|
||||
input = ["hello world"]
|
||||
from litellm.llms.custom_httpx.http_handler import HTTPHandler, AsyncHTTPHandler
|
||||
|
||||
if sync_mode:
|
||||
client = HTTPHandler()
|
||||
else:
|
||||
client = AsyncHTTPHandler()
|
||||
|
||||
data = {
|
||||
"model": "cohere/embed-english-v3.0",
|
||||
"input": input,
|
||||
"extra_headers": {"my-test-param": "hello-world"},
|
||||
"client": client,
|
||||
}
|
||||
with patch.object(client, "post") as mock_post:
|
||||
try:
|
||||
if sync_mode:
|
||||
embedding(**data)
|
||||
else:
|
||||
await litellm.aembedding(**data)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
mock_post.assert_called_once()
|
||||
assert "my-test-param" in mock_post.call_args.kwargs["headers"]
|
||||
|
|
|
@ -215,7 +215,10 @@ async def test_rerank_custom_api_base():
|
|||
args_to_api = kwargs["json"]
|
||||
print("Arguments passed to API=", args_to_api)
|
||||
print("url = ", _url)
|
||||
assert _url[0] == "https://exampleopenaiendpoint-production.up.railway.app/"
|
||||
assert (
|
||||
_url[0]
|
||||
== "https://exampleopenaiendpoint-production.up.railway.app/v1/rerank"
|
||||
)
|
||||
assert args_to_api == expected_payload
|
||||
assert response.id is not None
|
||||
assert response.results is not None
|
||||
|
@ -258,3 +261,32 @@ async def test_rerank_custom_callbacks():
|
|||
assert custom_logger.kwargs.get("response_cost") > 0.0
|
||||
assert custom_logger.response_obj is not None
|
||||
assert custom_logger.response_obj.results is not None
|
||||
|
||||
|
||||
def test_complete_base_url_cohere():
|
||||
from litellm.llms.custom_httpx.http_handler import HTTPHandler
|
||||
|
||||
client = HTTPHandler()
|
||||
litellm.api_base = "http://localhost:4000"
|
||||
litellm.set_verbose = True
|
||||
|
||||
text = "Hello there!"
|
||||
list_texts = ["Hello there!", "How are you?", "How do you do?"]
|
||||
|
||||
rerank_model = "rerank-multilingual-v3.0"
|
||||
|
||||
with patch.object(client, "post") as mock_post:
|
||||
try:
|
||||
litellm.rerank(
|
||||
model=rerank_model,
|
||||
query=text,
|
||||
documents=list_texts,
|
||||
custom_llm_provider="cohere",
|
||||
client=client,
|
||||
)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
print("mock_post.call_args", mock_post.call_args)
|
||||
mock_post.assert_called_once()
|
||||
assert "http://localhost:4000/v1/rerank" in mock_post.call_args.kwargs["url"]
|
||||
|
|
|
@ -1012,3 +1012,23 @@ def test_models_by_provider():
|
|||
|
||||
for provider in providers:
|
||||
assert provider in models_by_provider.keys()
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"litellm_params, disable_end_user_cost_tracking, expected_end_user_id",
|
||||
[
|
||||
({}, False, None),
|
||||
({"proxy_server_request": {"body": {"user": "123"}}}, False, "123"),
|
||||
({"proxy_server_request": {"body": {"user": "123"}}}, True, None),
|
||||
],
|
||||
)
|
||||
def test_get_end_user_id_for_cost_tracking(
|
||||
litellm_params, disable_end_user_cost_tracking, expected_end_user_id
|
||||
):
|
||||
from litellm.utils import get_end_user_id_for_cost_tracking
|
||||
|
||||
litellm.disable_end_user_cost_tracking = disable_end_user_cost_tracking
|
||||
assert (
|
||||
get_end_user_id_for_cost_tracking(litellm_params=litellm_params)
|
||||
== expected_end_user_id
|
||||
)
|
||||
|
|
|
@ -216,3 +216,78 @@ async def test_init_custom_logger_compatible_class_as_callback():
|
|||
await use_callback_in_llm_call(callback, used_in="success_callback")
|
||||
|
||||
reset_env_vars()
|
||||
|
||||
|
||||
def test_dynamic_logging_global_callback():
|
||||
from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
|
||||
from litellm.integrations.custom_logger import CustomLogger
|
||||
from litellm.types.utils import ModelResponse, Choices, Message, Usage
|
||||
|
||||
cl = CustomLogger()
|
||||
|
||||
litellm_logging = LiteLLMLoggingObj(
|
||||
model="claude-3-opus-20240229",
|
||||
messages=[{"role": "user", "content": "hi"}],
|
||||
stream=False,
|
||||
call_type="completion",
|
||||
start_time=datetime.now(),
|
||||
litellm_call_id="123",
|
||||
function_id="456",
|
||||
kwargs={
|
||||
"langfuse_public_key": "my-mock-public-key",
|
||||
"langfuse_secret_key": "my-mock-secret-key",
|
||||
},
|
||||
dynamic_success_callbacks=["langfuse"],
|
||||
)
|
||||
|
||||
with patch.object(cl, "log_success_event") as mock_log_success_event:
|
||||
cl.log_success_event = mock_log_success_event
|
||||
litellm.success_callback = [cl]
|
||||
|
||||
try:
|
||||
litellm_logging.success_handler(
|
||||
result=ModelResponse(
|
||||
id="chatcmpl-5418737b-ab14-420b-b9c5-b278b6681b70",
|
||||
created=1732306261,
|
||||
model="claude-3-opus-20240229",
|
||||
object="chat.completion",
|
||||
system_fingerprint=None,
|
||||
choices=[
|
||||
Choices(
|
||||
finish_reason="stop",
|
||||
index=0,
|
||||
message=Message(
|
||||
content="hello",
|
||||
role="assistant",
|
||||
tool_calls=None,
|
||||
function_call=None,
|
||||
),
|
||||
)
|
||||
],
|
||||
usage=Usage(
|
||||
completion_tokens=20,
|
||||
prompt_tokens=10,
|
||||
total_tokens=30,
|
||||
completion_tokens_details=None,
|
||||
prompt_tokens_details=None,
|
||||
),
|
||||
),
|
||||
start_time=datetime.now(),
|
||||
end_time=datetime.now(),
|
||||
cache_hit=False,
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"Error: {e}")
|
||||
|
||||
mock_log_success_event.assert_called_once()
|
||||
|
||||
|
||||
def test_get_combined_callback_list():
|
||||
from litellm.litellm_core_utils.litellm_logging import get_combined_callback_list
|
||||
|
||||
assert "langfuse" in get_combined_callback_list(
|
||||
dynamic_success_callbacks=["langfuse"], global_callbacks=["lago"]
|
||||
)
|
||||
assert "lago" in get_combined_callback_list(
|
||||
dynamic_success_callbacks=["langfuse"], global_callbacks=["lago"]
|
||||
)
|
||||
|
|
|
@ -73,7 +73,7 @@ async def test_anthropic_passthrough_handler(
|
|||
start_time = datetime.now()
|
||||
end_time = datetime.now()
|
||||
|
||||
await AnthropicPassthroughLoggingHandler.anthropic_passthrough_handler(
|
||||
result = AnthropicPassthroughLoggingHandler.anthropic_passthrough_handler(
|
||||
httpx_response=mock_httpx_response,
|
||||
response_body=mock_response,
|
||||
logging_obj=mock_logging_obj,
|
||||
|
@ -84,30 +84,7 @@ async def test_anthropic_passthrough_handler(
|
|||
cache_hit=False,
|
||||
)
|
||||
|
||||
# Assert that async_success_handler was called
|
||||
assert mock_logging_obj.async_success_handler.called
|
||||
|
||||
call_args = mock_logging_obj.async_success_handler.call_args
|
||||
call_kwargs = call_args.kwargs
|
||||
print("call_kwargs", call_kwargs)
|
||||
|
||||
# Assert required fields are present in call_kwargs
|
||||
assert "result" in call_kwargs
|
||||
assert "start_time" in call_kwargs
|
||||
assert "end_time" in call_kwargs
|
||||
assert "cache_hit" in call_kwargs
|
||||
assert "response_cost" in call_kwargs
|
||||
assert "model" in call_kwargs
|
||||
assert "standard_logging_object" in call_kwargs
|
||||
|
||||
# Assert specific values and types
|
||||
assert isinstance(call_kwargs["result"], litellm.ModelResponse)
|
||||
assert isinstance(call_kwargs["start_time"], datetime)
|
||||
assert isinstance(call_kwargs["end_time"], datetime)
|
||||
assert isinstance(call_kwargs["cache_hit"], bool)
|
||||
assert isinstance(call_kwargs["response_cost"], float)
|
||||
assert call_kwargs["model"] == "claude-3-opus-20240229"
|
||||
assert isinstance(call_kwargs["standard_logging_object"], dict)
|
||||
assert isinstance(result["result"], litellm.ModelResponse)
|
||||
|
||||
|
||||
def test_create_anthropic_response_logging_payload(mock_logging_obj):
|
||||
|
|
|
@ -64,6 +64,7 @@ async def test_chunk_processor_yields_raw_bytes(endpoint_type, url_route):
|
|||
litellm_logging_obj = MagicMock()
|
||||
start_time = datetime.now()
|
||||
passthrough_success_handler_obj = MagicMock()
|
||||
litellm_logging_obj.async_success_handler = AsyncMock()
|
||||
|
||||
# Capture yielded chunks and perform detailed assertions
|
||||
received_chunks = []
|
||||
|
|
54
tests/proxy_admin_ui_tests/conftest.py
Normal file
54
tests/proxy_admin_ui_tests/conftest.py
Normal file
|
@ -0,0 +1,54 @@
|
|||
# conftest.py
|
||||
|
||||
import importlib
|
||||
import os
|
||||
import sys
|
||||
|
||||
import pytest
|
||||
|
||||
sys.path.insert(
|
||||
0, os.path.abspath("../..")
|
||||
) # Adds the parent directory to the system path
|
||||
import litellm
|
||||
|
||||
|
||||
@pytest.fixture(scope="function", autouse=True)
|
||||
def setup_and_teardown():
|
||||
"""
|
||||
This fixture reloads litellm before every function. To speed up testing by removing callbacks being chained.
|
||||
"""
|
||||
curr_dir = os.getcwd() # Get the current working directory
|
||||
sys.path.insert(
|
||||
0, os.path.abspath("../..")
|
||||
) # Adds the project directory to the system path
|
||||
|
||||
import litellm
|
||||
from litellm import Router
|
||||
|
||||
importlib.reload(litellm)
|
||||
import asyncio
|
||||
|
||||
loop = asyncio.get_event_loop_policy().new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
print(litellm)
|
||||
# from litellm import Router, completion, aembedding, acompletion, embedding
|
||||
yield
|
||||
|
||||
# Teardown code (executes after the yield point)
|
||||
loop.close() # Close the loop created earlier
|
||||
asyncio.set_event_loop(None) # Remove the reference to the loop
|
||||
|
||||
|
||||
def pytest_collection_modifyitems(config, items):
|
||||
# Separate tests in 'test_amazing_proxy_custom_logger.py' and other tests
|
||||
custom_logger_tests = [
|
||||
item for item in items if "custom_logger" in item.parent.name
|
||||
]
|
||||
other_tests = [item for item in items if "custom_logger" not in item.parent.name]
|
||||
|
||||
# Sort tests based on their names
|
||||
custom_logger_tests.sort(key=lambda x: x.name)
|
||||
other_tests.sort(key=lambda x: x.name)
|
||||
|
||||
# Reorder the items list
|
||||
items[:] = custom_logger_tests + other_tests
|
|
@ -542,3 +542,65 @@ async def test_list_teams(prisma_client):
|
|||
|
||||
# Clean up
|
||||
await prisma_client.delete_data(team_id_list=[team_id], table_name="team")
|
||||
|
||||
|
||||
def test_is_team_key():
|
||||
from litellm.proxy.management_endpoints.key_management_endpoints import _is_team_key
|
||||
|
||||
assert _is_team_key(GenerateKeyRequest(team_id="test_team_id"))
|
||||
assert not _is_team_key(GenerateKeyRequest(user_id="test_user_id"))
|
||||
|
||||
|
||||
def test_team_key_generation_check():
|
||||
from litellm.proxy.management_endpoints.key_management_endpoints import (
|
||||
_team_key_generation_check,
|
||||
)
|
||||
from fastapi import HTTPException
|
||||
|
||||
litellm.key_generation_settings = {
|
||||
"team_key_generation": {"allowed_team_member_roles": ["admin"]}
|
||||
}
|
||||
|
||||
assert _team_key_generation_check(
|
||||
UserAPIKeyAuth(
|
||||
user_role=LitellmUserRoles.INTERNAL_USER,
|
||||
api_key="sk-1234",
|
||||
team_member=Member(role="admin", user_id="test_user_id"),
|
||||
)
|
||||
)
|
||||
|
||||
with pytest.raises(HTTPException):
|
||||
_team_key_generation_check(
|
||||
UserAPIKeyAuth(
|
||||
user_role=LitellmUserRoles.INTERNAL_USER,
|
||||
api_key="sk-1234",
|
||||
user_id="test_user_id",
|
||||
team_member=Member(role="user", user_id="test_user_id"),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def test_personal_key_generation_check():
|
||||
from litellm.proxy.management_endpoints.key_management_endpoints import (
|
||||
_personal_key_generation_check,
|
||||
)
|
||||
from fastapi import HTTPException
|
||||
|
||||
litellm.key_generation_settings = {
|
||||
"personal_key_generation": {"allowed_user_roles": ["proxy_admin"]}
|
||||
}
|
||||
|
||||
assert _personal_key_generation_check(
|
||||
UserAPIKeyAuth(
|
||||
user_role=LitellmUserRoles.PROXY_ADMIN, api_key="sk-1234", user_id="admin"
|
||||
)
|
||||
)
|
||||
|
||||
with pytest.raises(HTTPException):
|
||||
_personal_key_generation_check(
|
||||
UserAPIKeyAuth(
|
||||
user_role=LitellmUserRoles.INTERNAL_USER,
|
||||
api_key="sk-1234",
|
||||
user_id="admin",
|
||||
)
|
||||
)
|
||||
|
|
|
@ -160,7 +160,7 @@ async def test_create_new_user_in_organization(prisma_client, user_role):
|
|||
response = await organization_member_add(
|
||||
data=OrganizationMemberAddRequest(
|
||||
organization_id=org_id,
|
||||
member=Member(role=user_role, user_id=created_user_id),
|
||||
member=OrgMember(role=user_role, user_id=created_user_id),
|
||||
),
|
||||
http_request=None,
|
||||
)
|
||||
|
@ -220,7 +220,7 @@ async def test_org_admin_create_team_permissions(prisma_client):
|
|||
response = await organization_member_add(
|
||||
data=OrganizationMemberAddRequest(
|
||||
organization_id=org_id,
|
||||
member=Member(role=LitellmUserRoles.ORG_ADMIN, user_id=created_user_id),
|
||||
member=OrgMember(role=LitellmUserRoles.ORG_ADMIN, user_id=created_user_id),
|
||||
),
|
||||
http_request=None,
|
||||
)
|
||||
|
@ -292,7 +292,7 @@ async def test_org_admin_create_user_permissions(prisma_client):
|
|||
response = await organization_member_add(
|
||||
data=OrganizationMemberAddRequest(
|
||||
organization_id=org_id,
|
||||
member=Member(role=LitellmUserRoles.ORG_ADMIN, user_id=created_user_id),
|
||||
member=OrgMember(role=LitellmUserRoles.ORG_ADMIN, user_id=created_user_id),
|
||||
),
|
||||
http_request=None,
|
||||
)
|
||||
|
@ -323,7 +323,7 @@ async def test_org_admin_create_user_permissions(prisma_client):
|
|||
response = await organization_member_add(
|
||||
data=OrganizationMemberAddRequest(
|
||||
organization_id=org_id,
|
||||
member=Member(
|
||||
member=OrgMember(
|
||||
role=LitellmUserRoles.INTERNAL_USER, user_id=new_internal_user_for_org
|
||||
),
|
||||
),
|
||||
|
@ -375,7 +375,7 @@ async def test_org_admin_create_user_team_wrong_org_permissions(prisma_client):
|
|||
response = await organization_member_add(
|
||||
data=OrganizationMemberAddRequest(
|
||||
organization_id=org1_id,
|
||||
member=Member(role=LitellmUserRoles.ORG_ADMIN, user_id=created_user_id),
|
||||
member=OrgMember(role=LitellmUserRoles.ORG_ADMIN, user_id=created_user_id),
|
||||
),
|
||||
http_request=None,
|
||||
)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue