forked from phoenix/litellm-mirror
(UI) Fix viewing members, keys in a team + added testing (#6514)
* fix listing teams on ui * LiteLLM Minor Fixes & Improvements (10/28/2024) (#6475) * fix(anthropic/chat/transformation.py): support anthropic disable_parallel_tool_use param Fixes https://github.com/BerriAI/litellm/issues/6456 * feat(anthropic/chat/transformation.py): support anthropic computer tool use Closes https://github.com/BerriAI/litellm/issues/6427 * fix(vertex_ai/common_utils.py): parse out '$schema' when calling vertex ai Fixes issue when trying to call vertex from vercel sdk * fix(main.py): add 'extra_headers' support for azure on all translation endpoints Fixes https://github.com/BerriAI/litellm/issues/6465 * fix: fix linting errors * fix(transformation.py): handle no beta headers for anthropic * test: cleanup test * fix: fix linting error * fix: fix linting errors * fix: fix linting errors * fix(transformation.py): handle dummy tool call * fix(main.py): fix linting error * fix(azure.py): pass required param * LiteLLM Minor Fixes & Improvements (10/24/2024) (#6441) * fix(azure.py): handle /openai/deployment in azure api base * fix(factory.py): fix faulty anthropic tool result translation check Fixes https://github.com/BerriAI/litellm/issues/6422 * fix(gpt_transformation.py): add support for parallel_tool_calls to azure Fixes https://github.com/BerriAI/litellm/issues/6440 * fix(factory.py): support anthropic prompt caching for tool results * fix(vertex_ai/common_utils): don't pop non-null required field Fixes https://github.com/BerriAI/litellm/issues/6426 * feat(vertex_ai.py): support code_execution tool call for vertex ai + gemini Closes https://github.com/BerriAI/litellm/issues/6434 * build(model_prices_and_context_window.json): Add 'supports_assistant_prefill' for bedrock claude-3-5-sonnet v2 models Closes https://github.com/BerriAI/litellm/issues/6437 * fix(types/utils.py): fix linting * test: update test to include required fields * test: fix test * test: handle flaky test * test: remove e2e test - hitting gemini rate limits * Litellm dev 10 26 2024 (#6472) * docs(exception_mapping.md): add missing exception types Fixes https://github.com/Aider-AI/aider/issues/2120#issuecomment-2438971183 * fix(main.py): register custom model pricing with specific key Ensure custom model pricing is registered to the specific model+provider key combination * test: make testing more robust for custom pricing * fix(redis_cache.py): instrument otel logging for sync redis calls ensures complete coverage for all redis cache calls * (Testing) Add unit testing for DualCache - ensure in memory cache is used when expected (#6471) * test test_dual_cache_get_set * unit testing for dual cache * fix async_set_cache_sadd * test_dual_cache_local_only * redis otel tracing + async support for latency routing (#6452) * docs(exception_mapping.md): add missing exception types Fixes https://github.com/Aider-AI/aider/issues/2120#issuecomment-2438971183 * fix(main.py): register custom model pricing with specific key Ensure custom model pricing is registered to the specific model+provider key combination * test: make testing more robust for custom pricing * fix(redis_cache.py): instrument otel logging for sync redis calls ensures complete coverage for all redis cache calls * refactor: pass parent_otel_span for redis caching calls in router allows for more observability into what calls are causing latency issues * test: update tests with new params * refactor: ensure e2e otel tracing for router * refactor(router.py): add more otel tracing acrosss router catch all latency issues for router requests * fix: fix linting error * fix(router.py): fix linting error * fix: fix test * test: fix tests * fix(dual_cache.py): pass ttl to redis cache * fix: fix param * fix(dual_cache.py): set default value for parent_otel_span * fix(transformation.py): support 'response_format' for anthropic calls * fix(transformation.py): check for cache_control inside 'function' block * fix: fix linting error * fix: fix linting errors --------- Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com> --------- Co-authored-by: Krish Dholakia <krrishdholakia@gmail.com>
This commit is contained in:
parent
aa4c2827e8
commit
cf877c89ec
7 changed files with 129 additions and 9 deletions
|
@ -786,7 +786,6 @@ def test_unmapped_vertex_anthropic_model():
|
|||
assert "max_retries" not in optional_params
|
||||
|
||||
|
||||
|
||||
@pytest.mark.parametrize("provider", ["anthropic", "vertex_ai"])
|
||||
def test_anthropic_parallel_tool_calls(provider):
|
||||
optional_params = get_optional_params(
|
||||
|
|
|
@ -25,6 +25,8 @@ import logging
|
|||
import pytest
|
||||
import litellm
|
||||
from litellm._logging import verbose_proxy_logger
|
||||
from litellm.proxy.management_endpoints.team_endpoints import list_team
|
||||
from litellm.proxy._types import *
|
||||
from litellm.proxy.management_endpoints.internal_user_endpoints import (
|
||||
new_user,
|
||||
user_info,
|
||||
|
@ -421,3 +423,120 @@ async def test_get_users_key_count(prisma_client):
|
|||
assert (
|
||||
updated_key_count == initial_key_count + 1
|
||||
), f"Expected key count to increase by 1, but got {updated_key_count} (was {initial_key_count})"
|
||||
|
||||
|
||||
async def cleanup_existing_teams(prisma_client):
|
||||
all_teams = await prisma_client.db.litellm_teamtable.find_many()
|
||||
for team in all_teams:
|
||||
await prisma_client.delete_data(team_id_list=[team.team_id], table_name="team")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_list_teams(prisma_client):
|
||||
"""
|
||||
Tests /team/list endpoint to verify it returns both keys and members_with_roles
|
||||
"""
|
||||
litellm.set_verbose = True
|
||||
setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client)
|
||||
setattr(litellm.proxy.proxy_server, "master_key", "sk-1234")
|
||||
await litellm.proxy.proxy_server.prisma_client.connect()
|
||||
|
||||
# Delete all existing teams first
|
||||
await cleanup_existing_teams(prisma_client)
|
||||
|
||||
# Create a test team with members
|
||||
team_id = f"test_team_{uuid.uuid4()}"
|
||||
team_alias = f"test_team_alias_{uuid.uuid4()}"
|
||||
test_team = await new_team(
|
||||
data=NewTeamRequest(
|
||||
team_id=team_id,
|
||||
team_alias=team_alias,
|
||||
members_with_roles=[
|
||||
Member(role="admin", user_id="test_user_1"),
|
||||
Member(role="user", user_id="test_user_2"),
|
||||
],
|
||||
models=["gpt-4"],
|
||||
tpm_limit=1000,
|
||||
rpm_limit=1000,
|
||||
budget_duration="30d",
|
||||
max_budget=1000,
|
||||
),
|
||||
http_request=Request(scope={"type": "http"}),
|
||||
user_api_key_dict=UserAPIKeyAuth(
|
||||
user_role=LitellmUserRoles.PROXY_ADMIN, api_key="sk-1234", user_id="admin"
|
||||
),
|
||||
)
|
||||
|
||||
# Create a key for the team
|
||||
test_key = await generate_key_fn(
|
||||
data=GenerateKeyRequest(
|
||||
team_id=team_id,
|
||||
key_alias=f"test_key_{uuid.uuid4()}",
|
||||
),
|
||||
user_api_key_dict=UserAPIKeyAuth(
|
||||
user_role=LitellmUserRoles.PROXY_ADMIN, api_key="sk-1234", user_id="admin"
|
||||
),
|
||||
)
|
||||
|
||||
# Get team list
|
||||
teams = await list_team(
|
||||
http_request=Request(scope={"type": "http"}),
|
||||
user_api_key_dict=UserAPIKeyAuth(
|
||||
user_role=LitellmUserRoles.PROXY_ADMIN, api_key="sk-1234", user_id="admin"
|
||||
),
|
||||
user_id=None,
|
||||
)
|
||||
|
||||
print("teams", teams)
|
||||
|
||||
# Find our test team in the response
|
||||
test_team_response = None
|
||||
for team in teams:
|
||||
if team.team_id == team_id:
|
||||
test_team_response = team
|
||||
break
|
||||
|
||||
assert (
|
||||
test_team_response is not None
|
||||
), f"Could not find test team {team_id} in response"
|
||||
|
||||
# Verify members_with_roles
|
||||
assert (
|
||||
len(test_team_response.members_with_roles) == 3
|
||||
), "Expected 3 members in team" # 2 members + 1 team admin
|
||||
member_roles = {m.role for m in test_team_response.members_with_roles}
|
||||
assert "admin" in member_roles, "Expected admin role in members"
|
||||
assert "user" in member_roles, "Expected user role in members"
|
||||
|
||||
# Verify all required fields in TeamListResponseObject
|
||||
assert (
|
||||
test_team_response.team_id == team_id
|
||||
), f"team_id should be expected value {team_id}"
|
||||
assert (
|
||||
test_team_response.team_alias == team_alias
|
||||
), f"team_alias should be expected value {team_alias}"
|
||||
assert test_team_response.spend is not None, "spend should not be None"
|
||||
assert (
|
||||
test_team_response.max_budget == 1000
|
||||
), f"max_budget should be expected value 1000"
|
||||
assert test_team_response.models == [
|
||||
"gpt-4"
|
||||
], f"models should be expected value ['gpt-4']"
|
||||
assert (
|
||||
test_team_response.tpm_limit == 1000
|
||||
), f"tpm_limit should be expected value 1000"
|
||||
assert (
|
||||
test_team_response.rpm_limit == 1000
|
||||
), f"rpm_limit should be expected value 1000"
|
||||
assert (
|
||||
test_team_response.budget_reset_at is not None
|
||||
), "budget_reset_at should not be None since budget_duration is 30d"
|
||||
|
||||
# Verify keys are returned
|
||||
assert len(test_team_response.keys) > 0, "Expected at least one key for team"
|
||||
assert any(
|
||||
k.team_id == team_id for k in test_team_response.keys
|
||||
), "Expected to find team key in response"
|
||||
|
||||
# Clean up
|
||||
await prisma_client.delete_data(team_id_list=[team_id], table_name="team")
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue