forked from phoenix/litellm-mirror
* fix(caching): convert arg to equivalent kwargs in llm caching handler prevent unexpected errors * fix(caching_handler.py): don't pass args to caching * fix(caching): remove all *args from caching.py * fix(caching): consistent function signatures + abc method * test(caching_unit_tests.py): add unit tests for llm caching ensures coverage for common caching scenarios across different implementations * refactor(litellm_logging.py): move to using cache key from hidden params instead of regenerating one * fix(router.py): drop redis password requirement * fix(proxy_server.py): fix faulty slack alerting check * fix(langfuse.py): avoid copying functions/thread lock objects in metadata fixes metadata copy error when parent otel span in metadata * test: update test * fix(key_management_endpoints.py): fix /key/update with metadata update * fix(key_management_endpoints.py): fix key_prepare_update helper * fix(key_management_endpoints.py): reset value to none if set in key update * fix: update test ' * Litellm dev 11 11 2024 (#6693) * fix(__init__.py): add 'watsonx_text' as mapped llm api route Fixes https://github.com/BerriAI/litellm/issues/6663 * fix(opentelemetry.py): fix passing parallel tool calls to otel Fixes https://github.com/BerriAI/litellm/issues/6677 * refactor(test_opentelemetry_unit_tests.py): create a base set of unit tests for all logging integrations - test for parallel tool call handling reduces bugs in repo * fix(__init__.py): update provider-model mapping to include all known provider-model mappings Fixes https://github.com/BerriAI/litellm/issues/6669 * feat(anthropic): support passing document in llm api call * docs(anthropic.md): add pdf anthropic call to docs + expose new 'supports_pdf_input' function * fix(factory.py): fix linting error * add clear doc string for GCS bucket logging * Add docs to export logs to Laminar (#6674) * Add docs to export logs to Laminar * minor fix: newline at end of file * place laminar after http and grpc * (Feat) Add langsmith key based logging (#6682) * add langsmith_api_key to StandardCallbackDynamicParams * create a file for langsmith types * langsmith add key / team based logging * add key based logging for langsmith * fix langsmith key based logging * fix linting langsmith * remove NOQA violation * add unit test coverage for all helpers in test langsmith * test_langsmith_key_based_logging * docs langsmith key based logging * run langsmith tests in logging callback tests * fix logging testing * test_langsmith_key_based_logging * test_add_callback_via_key_litellm_pre_call_utils_langsmith * add debug statement langsmith key based logging * test_langsmith_key_based_logging * (fix) OpenAI's optional messages[].name does not work with Mistral API (#6701) * use helper for _transform_messages mistral * add test_message_with_name to base LLMChat test * fix linting * add xAI on Admin UI (#6680) * (docs) add benchmarks on 1K RPS (#6704) * docs litellm proxy benchmarks * docs GCS bucket * doc fix - reduce clutter on logging doc title * (feat) add cost tracking stable diffusion 3 on Bedrock (#6676) * add cost tracking for sd3 * test_image_generation_bedrock * fix get model info for image cost * add cost_calculator for stability 1 models * add unit testing for bedrock image cost calc * test_cost_calculator_with_no_optional_params * add test_cost_calculator_basic * correctly allow size Optional * fix cost_calculator * sd3 unit tests cost calc * fix raise correct error 404 when /key/info is called on non-existent key (#6653) * fix raise correct error on /key/info * add not_found_error error * fix key not found in DB error * use 1 helper for checking token hash * fix error code on key info * fix test key gen prisma * test_generate_and_call_key_info * test fix test_call_with_valid_model_using_all_models * fix key info tests * bump: version 1.52.4 → 1.52.5 * add defaults used for GCS logging * LiteLLM Minor Fixes & Improvements (11/12/2024) (#6705) * fix(caching): convert arg to equivalent kwargs in llm caching handler prevent unexpected errors * fix(caching_handler.py): don't pass args to caching * fix(caching): remove all *args from caching.py * fix(caching): consistent function signatures + abc method * test(caching_unit_tests.py): add unit tests for llm caching ensures coverage for common caching scenarios across different implementations * refactor(litellm_logging.py): move to using cache key from hidden params instead of regenerating one * fix(router.py): drop redis password requirement * fix(proxy_server.py): fix faulty slack alerting check * fix(langfuse.py): avoid copying functions/thread lock objects in metadata fixes metadata copy error when parent otel span in metadata * test: update test * bump: version 1.52.5 → 1.52.6 * (feat) helm hook to sync db schema (#6715) * v0 migration job * fix job * fix migrations job.yml * handle standalone DB on helm hook * fix argo cd annotations * fix db migration helm hook * fix migration job * doc fix Using Http/2 with Hypercorn * (fix proxy redis) Add redis sentinel support (#6154) * add sentinel_password support * add doc for setting redis sentinel password * fix redis sentinel - use sentinel password * Fix: Update gpt-4o costs to that of gpt-4o-2024-08-06 (#6714) Fixes #6713 * (fix) using Anthropic `response_format={"type": "json_object"}` (#6721) * add support for response_format=json anthropic * add test_json_response_format to baseLLM ChatTest * fix test_litellm_anthropic_prompt_caching_tools * fix test_anthropic_function_call_with_no_schema * test test_create_json_tool_call_for_response_format * (feat) Add cost tracking for Azure Dall-e-3 Image Generation + use base class to ensure basic image generation tests pass (#6716) * add BaseImageGenTest * use 1 class for unit testing * add debugging to BaseImageGenTest * TestAzureOpenAIDalle3 * fix response_cost_calculator * test_basic_image_generation * fix img gen basic test * fix _select_model_name_for_cost_calc * fix test_aimage_generation_bedrock_with_optional_params * fix undo changes cost tracking * fix response_cost_calculator * fix test_cost_azure_gpt_35 * fix remove dup test (#6718) * (build) update db helm hook * (build) helm db pre sync hook * (build) helm db sync hook * test: run test_team_logging firdst --------- Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com> Co-authored-by: Dinmukhamed Mailibay <47117969+dinmukhamedm@users.noreply.github.com> Co-authored-by: Kilian Lieret <kilian.lieret@posteo.de>
190 lines
5.5 KiB
Python
190 lines
5.5 KiB
Python
# What this tests ?
|
|
## Tests /models and /model/* endpoints
|
|
|
|
import pytest
|
|
import asyncio
|
|
import aiohttp
|
|
import os
|
|
import dotenv
|
|
from dotenv import load_dotenv
|
|
import pytest
|
|
|
|
load_dotenv()
|
|
|
|
|
|
async def generate_key(session, models=[], team_id=None):
|
|
url = "http://0.0.0.0:4000/key/generate"
|
|
headers = {"Authorization": "Bearer sk-1234", "Content-Type": "application/json"}
|
|
data = {
|
|
"models": models,
|
|
"duration": None,
|
|
"team_id": team_id,
|
|
}
|
|
|
|
async with session.post(url, headers=headers, json=data) as response:
|
|
status = response.status
|
|
response_text = await response.text()
|
|
|
|
print(response_text)
|
|
print()
|
|
|
|
if status != 200:
|
|
raise Exception(f"Request did not return a 200 status code: {status}")
|
|
return await response.json()
|
|
|
|
|
|
async def chat_completion(session, key, model="azure-gpt-3.5", request_metadata=None):
|
|
url = "http://0.0.0.0:4000/chat/completions"
|
|
headers = {
|
|
"Authorization": f"Bearer {key}",
|
|
"Content-Type": "application/json",
|
|
}
|
|
data = {
|
|
"model": model,
|
|
"messages": [
|
|
{"role": "system", "content": "You are a helpful assistant."},
|
|
{"role": "user", "content": "Hello!"},
|
|
],
|
|
"metadata": request_metadata,
|
|
}
|
|
|
|
print("data sent in test=", data)
|
|
|
|
async with session.post(url, headers=headers, json=data) as response:
|
|
status = response.status
|
|
response_text = await response.text()
|
|
|
|
print(response_text)
|
|
print()
|
|
|
|
if status != 200:
|
|
raise Exception(f"Request did not return a 200 status code: {status}")
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
@pytest.mark.flaky(retries=12, delay=2)
|
|
async def test_aaateam_logging():
|
|
"""
|
|
-> Team 1 logs to project 1
|
|
-> Create Key
|
|
-> Make chat/completions call
|
|
-> Fetch logs from langfuse
|
|
"""
|
|
try:
|
|
async with aiohttp.ClientSession() as session:
|
|
|
|
key = await generate_key(
|
|
session, models=["fake-openai-endpoint"], team_id="team-1"
|
|
) # team-1 logs to project 1
|
|
|
|
import uuid
|
|
|
|
_trace_id = f"trace-{uuid.uuid4()}"
|
|
_request_metadata = {
|
|
"trace_id": _trace_id,
|
|
}
|
|
|
|
await chat_completion(
|
|
session,
|
|
key["key"],
|
|
model="fake-openai-endpoint",
|
|
request_metadata=_request_metadata,
|
|
)
|
|
|
|
# Test - if the logs were sent to the correct team on langfuse
|
|
import langfuse
|
|
|
|
langfuse_client = langfuse.Langfuse(
|
|
public_key=os.getenv("LANGFUSE_PROJECT1_PUBLIC"),
|
|
secret_key=os.getenv("LANGFUSE_PROJECT1_SECRET"),
|
|
)
|
|
|
|
await asyncio.sleep(10)
|
|
|
|
print(f"searching for trace_id={_trace_id} on langfuse")
|
|
|
|
generations = langfuse_client.get_generations(trace_id=_trace_id).data
|
|
print(generations)
|
|
assert len(generations) == 1
|
|
except Exception as e:
|
|
pytest.fail(f"Unexpected error: {str(e)}")
|
|
|
|
|
|
@pytest.mark.skip(reason="todo fix langfuse credential error")
|
|
@pytest.mark.asyncio
|
|
async def test_team_2logging():
|
|
"""
|
|
-> Team 1 logs to project 2
|
|
-> Create Key
|
|
-> Make chat/completions call
|
|
-> Fetch logs from langfuse
|
|
"""
|
|
langfuse_public_key = os.getenv("LANGFUSE_PROJECT2_PUBLIC")
|
|
|
|
print(f"langfuse_public_key: {langfuse_public_key}")
|
|
langfuse_secret_key = os.getenv("LANGFUSE_PROJECT2_SECRET")
|
|
print(f"langfuse_secret_key: {langfuse_secret_key}")
|
|
langfuse_host = "https://us.cloud.langfuse.com"
|
|
|
|
try:
|
|
assert langfuse_public_key is not None
|
|
assert langfuse_secret_key is not None
|
|
except Exception as e:
|
|
# skip test if langfuse credentials are not set
|
|
return
|
|
|
|
try:
|
|
async with aiohttp.ClientSession() as session:
|
|
|
|
key = await generate_key(
|
|
session, models=["fake-openai-endpoint"], team_id="team-2"
|
|
) # team-1 logs to project 1
|
|
|
|
import uuid
|
|
|
|
_trace_id = f"trace-{uuid.uuid4()}"
|
|
_request_metadata = {
|
|
"trace_id": _trace_id,
|
|
}
|
|
|
|
await chat_completion(
|
|
session,
|
|
key["key"],
|
|
model="fake-openai-endpoint",
|
|
request_metadata=_request_metadata,
|
|
)
|
|
|
|
# Test - if the logs were sent to the correct team on langfuse
|
|
import langfuse
|
|
|
|
langfuse_client = langfuse.Langfuse(
|
|
public_key=langfuse_public_key,
|
|
secret_key=langfuse_secret_key,
|
|
host=langfuse_host,
|
|
)
|
|
|
|
await asyncio.sleep(10)
|
|
|
|
print(f"searching for trace_id={_trace_id} on langfuse")
|
|
|
|
generations = langfuse_client.get_generations(trace_id=_trace_id).data
|
|
print("Team 2 generations", generations)
|
|
|
|
# team-2 should have 1 generation with this trace id
|
|
assert len(generations) == 1
|
|
|
|
# team-1 should have 0 generations with this trace id
|
|
langfuse_client_1 = langfuse.Langfuse(
|
|
public_key=os.getenv("LANGFUSE_PROJECT1_PUBLIC"),
|
|
secret_key=os.getenv("LANGFUSE_PROJECT1_SECRET"),
|
|
)
|
|
|
|
generations_team_1 = langfuse_client_1.get_generations(
|
|
trace_id=_trace_id
|
|
).data
|
|
print("Team 1 generations", generations_team_1)
|
|
|
|
assert len(generations_team_1) == 0
|
|
|
|
except Exception as e:
|
|
pytest.fail("Team 2 logging failed: " + str(e))
|