litellm/tests/local_testing/test_whisper.py
Krish Dholakia b0be5bf3a1
LiteLLM Minor Fixes & Improvements (11/19/2024) (#6820)
* fix(anthropic/chat/transformation.py): add json schema as values: json_schema

fixes passing pydantic obj to anthropic

Fixes https://github.com/BerriAI/litellm/issues/6766

* (feat): Add timestamp_granularities parameter to transcription API (#6457)

* Add timestamp_granularities parameter to transcription API

* add param to the local test

* fix(databricks/chat.py): handle max_retries optional param handling for openai-like calls

Fixes issue with calling finetuned vertex ai models via databricks route

* build(ui/): add team admins via proxy ui

* fix: fix linting error

* test: fix test

* docs(vertex.md): refactor docs

* test: handle overloaded anthropic model error

* test: remove duplicate test

* test: fix test

* test: update test to handle model overloaded error

---------

Co-authored-by: Show <35062952+BrunooShow@users.noreply.github.com>
2024-11-21 00:57:58 +05:30

128 lines
3.2 KiB
Python

# What is this?
## Tests `litellm.transcription` endpoint. Outside litellm module b/c of audio file used in testing (it's ~700kb).
import asyncio
import logging
import os
import sys
import time
import traceback
from typing import Optional
import aiohttp
import dotenv
import pytest
from dotenv import load_dotenv
from openai import AsyncOpenAI
import litellm
from litellm.integrations.custom_logger import CustomLogger
# Get the current directory of the file being run
pwd = os.path.dirname(os.path.realpath(__file__))
print(pwd)
file_path = os.path.join(pwd, "gettysburg.wav")
audio_file = open(file_path, "rb")
file2_path = os.path.join(pwd, "eagle.wav")
audio_file2 = open(file2_path, "rb")
load_dotenv()
sys.path.insert(
0, os.path.abspath("../")
) # Adds the parent directory to the system path
import litellm
from litellm import Router
@pytest.mark.parametrize(
"model, api_key, api_base",
[
("whisper-1", None, None),
# ("groq/whisper-large-v3", None, None),
(
"azure/azure-whisper",
os.getenv("AZURE_EUROPE_API_KEY"),
"https://my-endpoint-europe-berri-992.openai.azure.com/",
),
],
)
@pytest.mark.parametrize(
"response_format, timestamp_granularities",
[("json", None), ("vtt", None), ("verbose_json", ["word"])],
)
@pytest.mark.parametrize("sync_mode", [True, False])
@pytest.mark.asyncio
async def test_transcription(
model, api_key, api_base, response_format, sync_mode, timestamp_granularities
):
if sync_mode:
transcript = litellm.transcription(
model=model,
file=audio_file,
api_key=api_key,
api_base=api_base,
response_format=response_format,
timestamp_granularities=timestamp_granularities,
drop_params=True,
)
else:
transcript = await litellm.atranscription(
model=model,
file=audio_file,
api_key=api_key,
api_base=api_base,
response_format=response_format,
drop_params=True,
)
print(f"transcript: {transcript.model_dump()}")
print(f"transcript: {transcript._hidden_params}")
assert transcript.text is not None
@pytest.mark.asyncio()
async def test_transcription_caching():
import litellm
from litellm.caching.caching import Cache
litellm.set_verbose = True
litellm.cache = Cache()
# make raw llm api call
response_1 = await litellm.atranscription(
model="whisper-1",
file=audio_file,
)
await asyncio.sleep(5)
# cache hit
response_2 = await litellm.atranscription(
model="whisper-1",
file=audio_file,
)
print("response_1", response_1)
print("response_2", response_2)
print("response2 hidden params", response_2._hidden_params)
assert response_2._hidden_params["cache_hit"] is True
# cache miss
response_3 = await litellm.atranscription(
model="whisper-1",
file=audio_file2,
)
print("response_3", response_3)
print("response3 hidden params", response_3._hidden_params)
assert response_3._hidden_params.get("cache_hit") is not True
assert response_3.text != response_2.text
litellm.cache = None