(UI) Fix viewing members, keys in a team + added testing (#6514)

* fix listing teams on ui

* LiteLLM Minor Fixes & Improvements (10/28/2024)  (#6475)

* fix(anthropic/chat/transformation.py): support anthropic disable_parallel_tool_use param

Fixes https://github.com/BerriAI/litellm/issues/6456

* feat(anthropic/chat/transformation.py): support anthropic computer tool use

Closes https://github.com/BerriAI/litellm/issues/6427

* fix(vertex_ai/common_utils.py): parse out '$schema' when calling vertex ai

Fixes issue when trying to call vertex from vercel sdk

* fix(main.py): add 'extra_headers' support for azure on all translation endpoints

Fixes https://github.com/BerriAI/litellm/issues/6465

* fix: fix linting errors

* fix(transformation.py): handle no beta headers for anthropic

* test: cleanup test

* fix: fix linting error

* fix: fix linting errors

* fix: fix linting errors

* fix(transformation.py): handle dummy tool call

* fix(main.py): fix linting error

* fix(azure.py): pass required param

* LiteLLM Minor Fixes & Improvements (10/24/2024) (#6441)

* fix(azure.py): handle /openai/deployment in azure api base

* fix(factory.py): fix faulty anthropic tool result translation check

Fixes https://github.com/BerriAI/litellm/issues/6422

* fix(gpt_transformation.py): add support for parallel_tool_calls to azure

Fixes https://github.com/BerriAI/litellm/issues/6440

* fix(factory.py): support anthropic prompt caching for tool results

* fix(vertex_ai/common_utils): don't pop non-null required field

Fixes https://github.com/BerriAI/litellm/issues/6426

* feat(vertex_ai.py): support code_execution tool call for vertex ai + gemini

Closes https://github.com/BerriAI/litellm/issues/6434

* build(model_prices_and_context_window.json): Add 'supports_assistant_prefill' for bedrock claude-3-5-sonnet v2 models

Closes https://github.com/BerriAI/litellm/issues/6437

* fix(types/utils.py): fix linting

* test: update test to include required fields

* test: fix test

* test: handle flaky test

* test: remove e2e test - hitting gemini rate limits

* Litellm dev 10 26 2024 (#6472)

* docs(exception_mapping.md): add missing exception types

Fixes https://github.com/Aider-AI/aider/issues/2120#issuecomment-2438971183

* fix(main.py): register custom model pricing with specific key

Ensure custom model pricing is registered to the specific model+provider key combination

* test: make testing more robust for custom pricing

* fix(redis_cache.py): instrument otel logging for sync redis calls

ensures complete coverage for all redis cache calls

* (Testing) Add unit testing for DualCache - ensure in memory cache is used when expected  (#6471)

* test test_dual_cache_get_set

* unit testing for dual cache

* fix async_set_cache_sadd

* test_dual_cache_local_only

* redis otel tracing + async support for latency routing (#6452)

* docs(exception_mapping.md): add missing exception types

Fixes https://github.com/Aider-AI/aider/issues/2120#issuecomment-2438971183

* fix(main.py): register custom model pricing with specific key

Ensure custom model pricing is registered to the specific model+provider key combination

* test: make testing more robust for custom pricing

* fix(redis_cache.py): instrument otel logging for sync redis calls

ensures complete coverage for all redis cache calls

* refactor: pass parent_otel_span for redis caching calls in router

allows for more observability into what calls are causing latency issues

* test: update tests with new params

* refactor: ensure e2e otel tracing for router

* refactor(router.py): add more otel tracing acrosss router

catch all latency issues for router requests

* fix: fix linting error

* fix(router.py): fix linting error

* fix: fix test

* test: fix tests

* fix(dual_cache.py): pass ttl to redis cache

* fix: fix param

* fix(dual_cache.py): set default value for parent_otel_span

* fix(transformation.py): support 'response_format' for anthropic calls

* fix(transformation.py): check for cache_control inside 'function' block

* fix: fix linting error

* fix: fix linting errors

---------

Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com>

---------

Co-authored-by: Krish Dholakia <krrishdholakia@gmail.com>
This commit is contained in:
Ishaan Jaff 2024-10-30 23:51:13 +05:30 committed by GitHub
parent aa4c2827e8
commit cf877c89ec
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
7 changed files with 129 additions and 9 deletions

View file

@ -2033,6 +2033,7 @@ class TeamInfoResponseObject(TypedDict):
class TeamListResponseObject(LiteLLM_TeamTable):
team_memberships: List[LiteLLM_TeamMembership]
keys: List # list of keys that belong to the team
class CurrentItemRateLimit(TypedDict):

View file

@ -1275,10 +1275,17 @@ async def list_team(
for tm in returned_tm:
if tm.team_id == team.team_id:
_team_memberships.append(tm)
# add all keys that belong to the team
keys = await prisma_client.db.litellm_verificationtoken.find_many(
where={"team_id": team.team_id}
)
returned_responses.append(
TeamListResponseObject(
**team.model_dump(),
team_memberships=_team_memberships,
keys=keys,
)
)

View file

@ -5204,7 +5204,6 @@ class Router:
parent_otel_span=parent_otel_span,
)
raise exception
verbose_router_logger.info(
f"get_available_deployment for model: {model}, Selected deployment: {self.print_deployment(deployment)} for model: {model}"
)

View file

@ -64,7 +64,6 @@ async def send_llm_exception_alert(
)
async def async_raise_no_deployment_exception(
litellm_router_instance: LitellmRouter, model: str, parent_otel_span: Optional[Span]
):
@ -74,7 +73,6 @@ async def async_raise_no_deployment_exception(
verbose_router_logger.info(
f"get_available_deployment for model: {model}, No deployment available"
)
model_ids = litellm_router_instance.get_model_ids(model_name=model)
_cooldown_time = litellm_router_instance.cooldown_cache.get_min_cooldown(
model_ids=model_ids, parent_otel_span=parent_otel_span

View file

@ -786,7 +786,6 @@ def test_unmapped_vertex_anthropic_model():
assert "max_retries" not in optional_params
@pytest.mark.parametrize("provider", ["anthropic", "vertex_ai"])
def test_anthropic_parallel_tool_calls(provider):
optional_params = get_optional_params(

View file

@ -25,6 +25,8 @@ import logging
import pytest
import litellm
from litellm._logging import verbose_proxy_logger
from litellm.proxy.management_endpoints.team_endpoints import list_team
from litellm.proxy._types import *
from litellm.proxy.management_endpoints.internal_user_endpoints import (
new_user,
user_info,
@ -421,3 +423,120 @@ async def test_get_users_key_count(prisma_client):
assert (
updated_key_count == initial_key_count + 1
), f"Expected key count to increase by 1, but got {updated_key_count} (was {initial_key_count})"
async def cleanup_existing_teams(prisma_client):
all_teams = await prisma_client.db.litellm_teamtable.find_many()
for team in all_teams:
await prisma_client.delete_data(team_id_list=[team.team_id], table_name="team")
@pytest.mark.asyncio
async def test_list_teams(prisma_client):
"""
Tests /team/list endpoint to verify it returns both keys and members_with_roles
"""
litellm.set_verbose = True
setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client)
setattr(litellm.proxy.proxy_server, "master_key", "sk-1234")
await litellm.proxy.proxy_server.prisma_client.connect()
# Delete all existing teams first
await cleanup_existing_teams(prisma_client)
# Create a test team with members
team_id = f"test_team_{uuid.uuid4()}"
team_alias = f"test_team_alias_{uuid.uuid4()}"
test_team = await new_team(
data=NewTeamRequest(
team_id=team_id,
team_alias=team_alias,
members_with_roles=[
Member(role="admin", user_id="test_user_1"),
Member(role="user", user_id="test_user_2"),
],
models=["gpt-4"],
tpm_limit=1000,
rpm_limit=1000,
budget_duration="30d",
max_budget=1000,
),
http_request=Request(scope={"type": "http"}),
user_api_key_dict=UserAPIKeyAuth(
user_role=LitellmUserRoles.PROXY_ADMIN, api_key="sk-1234", user_id="admin"
),
)
# Create a key for the team
test_key = await generate_key_fn(
data=GenerateKeyRequest(
team_id=team_id,
key_alias=f"test_key_{uuid.uuid4()}",
),
user_api_key_dict=UserAPIKeyAuth(
user_role=LitellmUserRoles.PROXY_ADMIN, api_key="sk-1234", user_id="admin"
),
)
# Get team list
teams = await list_team(
http_request=Request(scope={"type": "http"}),
user_api_key_dict=UserAPIKeyAuth(
user_role=LitellmUserRoles.PROXY_ADMIN, api_key="sk-1234", user_id="admin"
),
user_id=None,
)
print("teams", teams)
# Find our test team in the response
test_team_response = None
for team in teams:
if team.team_id == team_id:
test_team_response = team
break
assert (
test_team_response is not None
), f"Could not find test team {team_id} in response"
# Verify members_with_roles
assert (
len(test_team_response.members_with_roles) == 3
), "Expected 3 members in team" # 2 members + 1 team admin
member_roles = {m.role for m in test_team_response.members_with_roles}
assert "admin" in member_roles, "Expected admin role in members"
assert "user" in member_roles, "Expected user role in members"
# Verify all required fields in TeamListResponseObject
assert (
test_team_response.team_id == team_id
), f"team_id should be expected value {team_id}"
assert (
test_team_response.team_alias == team_alias
), f"team_alias should be expected value {team_alias}"
assert test_team_response.spend is not None, "spend should not be None"
assert (
test_team_response.max_budget == 1000
), f"max_budget should be expected value 1000"
assert test_team_response.models == [
"gpt-4"
], f"models should be expected value ['gpt-4']"
assert (
test_team_response.tpm_limit == 1000
), f"tpm_limit should be expected value 1000"
assert (
test_team_response.rpm_limit == 1000
), f"rpm_limit should be expected value 1000"
assert (
test_team_response.budget_reset_at is not None
), "budget_reset_at should not be None since budget_duration is 30d"
# Verify keys are returned
assert len(test_team_response.keys) > 0, "Expected at least one key for team"
assert any(
k.team_id == team_id for k in test_team_response.keys
), "Expected to find team key in response"
# Clean up
await prisma_client.delete_data(team_id_list=[team_id], table_name="team")

View file

@ -551,11 +551,8 @@ const Team: React.FC<TeamProps> = ({
{perTeamInfo &&
team.team_id &&
perTeamInfo[team.team_id] &&
perTeamInfo[team.team_id].team_info &&
perTeamInfo[team.team_id].team_info
.members_with_roles &&
perTeamInfo[team.team_id].team_info
.members_with_roles.length}{" "}
perTeamInfo[team.team_id].members_with_roles &&
perTeamInfo[team.team_id].members_with_roles.length}{" "}
Members
</Text>
</TableCell>