forked from phoenix/litellm-mirror
* fix(pattern_match_deployments.py): default to user input if unable to map based on wildcards * test: fix test * test: reset test name * test: update conftest to reload proxy server module between tests * ci(config.yml): move langfuse out of local_testing reduce ci/cd time * ci(config.yml): cleanup langfuse ci/cd tests * fix: update test to not use global proxy_server app module * ci: move caching to a separate test pipeline speed up ci pipeline * test: update conftest to check if proxy_server attr exists before reloading * build(conftest.py): don't block on inability to reload proxy_server * ci(config.yml): update caching unit test filter to work on 'cache' keyword as well * fix(encrypt_decrypt_utils.py): use function to get salt key * test: mark flaky test * test: handle anthropic overloaded errors * refactor: create separate ci/cd pipeline for proxy unit tests make ci/cd faster * ci(config.yml): add litellm_proxy_unit_testing to build_and_test jobs * ci(config.yml): generate prisma binaries for proxy unit tests * test: readd vertex_key.json * ci(config.yml): remove `-s` from proxy_unit_test cmd speed up test * ci: remove any 'debug' logging flag speed up ci pipeline * test: fix test * test(test_braintrust.py): rerun * test: add delay for braintrust test
138 lines
3.5 KiB
Python
138 lines
3.5 KiB
Python
# Test the following scenarios:
|
|
# 1. Generate a Key, and use it to make a call
|
|
|
|
|
|
import sys, os
|
|
import traceback
|
|
from dotenv import load_dotenv
|
|
from fastapi import Request
|
|
from datetime import datetime
|
|
|
|
load_dotenv()
|
|
import os, io, time
|
|
|
|
# this file is to test litellm/proxy
|
|
|
|
sys.path.insert(
|
|
0, os.path.abspath("../..")
|
|
) # Adds the parent directory to the system path
|
|
import pytest, logging, asyncio
|
|
import litellm, asyncio
|
|
from litellm.proxy.proxy_server import token_counter
|
|
from litellm.proxy.utils import PrismaClient, ProxyLogging, hash_token, update_spend
|
|
from litellm._logging import verbose_proxy_logger
|
|
|
|
verbose_proxy_logger.setLevel(level=logging.DEBUG)
|
|
|
|
from litellm.proxy._types import TokenCountRequest, TokenCountResponse
|
|
|
|
|
|
from litellm import Router
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_vLLM_token_counting():
|
|
"""
|
|
Test Token counter for vLLM models
|
|
- User passes model="special-alias"
|
|
- token_counter should infer that special_alias -> maps to wolfram/miquliz-120b-v2.0
|
|
-> token counter should use hugging face tokenizer
|
|
"""
|
|
|
|
llm_router = Router(
|
|
model_list=[
|
|
{
|
|
"model_name": "special-alias",
|
|
"litellm_params": {
|
|
"model": "openai/wolfram/miquliz-120b-v2.0",
|
|
"api_base": "https://exampleopenaiendpoint-production.up.railway.app/",
|
|
},
|
|
}
|
|
]
|
|
)
|
|
|
|
setattr(litellm.proxy.proxy_server, "llm_router", llm_router)
|
|
|
|
response = await token_counter(
|
|
request=TokenCountRequest(
|
|
model="special-alias",
|
|
messages=[{"role": "user", "content": "hello"}],
|
|
)
|
|
)
|
|
|
|
print("response: ", response)
|
|
|
|
assert (
|
|
response.tokenizer_type == "huggingface_tokenizer"
|
|
) # SHOULD use the hugging face tokenizer
|
|
assert response.model_used == "wolfram/miquliz-120b-v2.0"
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_token_counting_model_not_in_model_list():
|
|
"""
|
|
Test Token counter - when a model is not in model_list
|
|
-> should use the default OpenAI tokenizer
|
|
"""
|
|
|
|
llm_router = Router(
|
|
model_list=[
|
|
{
|
|
"model_name": "gpt-4",
|
|
"litellm_params": {
|
|
"model": "gpt-4",
|
|
},
|
|
}
|
|
]
|
|
)
|
|
|
|
setattr(litellm.proxy.proxy_server, "llm_router", llm_router)
|
|
|
|
response = await token_counter(
|
|
request=TokenCountRequest(
|
|
model="special-alias",
|
|
messages=[{"role": "user", "content": "hello"}],
|
|
)
|
|
)
|
|
|
|
print("response: ", response)
|
|
|
|
assert (
|
|
response.tokenizer_type == "openai_tokenizer"
|
|
) # SHOULD use the OpenAI tokenizer
|
|
assert response.model_used == "special-alias"
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_gpt_token_counting():
|
|
"""
|
|
Test Token counter
|
|
-> should work for gpt-4
|
|
"""
|
|
|
|
llm_router = Router(
|
|
model_list=[
|
|
{
|
|
"model_name": "gpt-4",
|
|
"litellm_params": {
|
|
"model": "gpt-4",
|
|
},
|
|
}
|
|
]
|
|
)
|
|
|
|
setattr(litellm.proxy.proxy_server, "llm_router", llm_router)
|
|
|
|
response = await token_counter(
|
|
request=TokenCountRequest(
|
|
model="gpt-4",
|
|
messages=[{"role": "user", "content": "hello"}],
|
|
)
|
|
)
|
|
|
|
print("response: ", response)
|
|
|
|
assert (
|
|
response.tokenizer_type == "openai_tokenizer"
|
|
) # SHOULD use the OpenAI tokenizer
|
|
assert response.request_model == "gpt-4"
|