LiteLLM Minor Fixes & Improvements (11/01/2024) (#6551)

* fix: add lm_studio support

* fix(cohere_transformation.py): fix transformation logic for azure cohere embedding model name

Fixes https://github.com/BerriAI/litellm/issues/6540

* fix(utils.py): require base64 str to begin with `data:`

Fixes https://github.com/BerriAI/litellm/issues/6541

* fix: cleanup tests

* docs(guardrails.md): fix typo

* fix(opentelemetry.py): move to `.exception` and update 'response_obj' value to handle 'None' case

Fixes https://github.com/BerriAI/litellm/issues/6510

* fix: fix linting noqa placement
This commit is contained in:
Krish Dholakia 2024-11-02 00:39:31 +04:00 committed by GitHub
parent bac2ac2a49
commit 22b8f93f53
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
12 changed files with 123 additions and 17 deletions

View file

@ -0,0 +1,41 @@
# What is this?
## Unit tests for Azure AI integration
import asyncio
import os
import sys
import traceback
from dotenv import load_dotenv
import litellm.types
import litellm.types.utils
from litellm.llms.anthropic.chat import ModelResponseIterator
load_dotenv()
import io
import os
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
from typing import Optional
from unittest.mock import MagicMock, patch
import pytest
import litellm
@pytest.mark.parametrize(
"model_group_header, expected_model",
[
("offer-cohere-embed-multili-paygo", "Cohere-embed-v3-multilingual"),
("offer-cohere-embed-english-paygo", "Cohere-embed-v3-english"),
],
)
def test_map_azure_model_group(model_group_header, expected_model):
from litellm.llms.azure_ai.embed.cohere_transformation import AzureAICohereConfig
config = AzureAICohereConfig()
assert config._map_azure_model_group(model_group_header) == expected_model

View file

@ -1905,7 +1905,9 @@ def test_hf_test_completion_tgi():
# hf_test_completion_tgi()
@pytest.mark.parametrize("provider", ["openai", "hosted_vllm"]) # "vertex_ai",
@pytest.mark.parametrize(
"provider", ["openai", "hosted_vllm", "lm_studio"]
) # "vertex_ai",
@pytest.mark.asyncio
async def test_openai_compatible_custom_api_base(provider):
litellm.set_verbose = True
@ -1931,8 +1933,8 @@ async def test_openai_compatible_custom_api_base(provider):
api_base="my-custom-api-base",
hello="world",
)
except Exception:
pass
except Exception as e:
print(e)
mock_call.assert_called_once()

View file

@ -194,7 +194,7 @@ def _azure_ai_image_mock_response(*args, **kwargs):
)
],
)
@pytest.mark.parametrize("sync_mode", [True, False])
@pytest.mark.parametrize("sync_mode", [True]) # , False
@pytest.mark.asyncio
async def test_azure_ai_embedding_image(model, api_base, api_key, sync_mode):
try:

View file

@ -839,7 +839,11 @@ def test_is_base64_encoded():
@mock.patch("httpx.AsyncClient")
@mock.patch.dict(os.environ, {"SSL_VERIFY": "/certificate.pem", "SSL_CERTIFICATE": "/client.pem"}, clear=True)
@mock.patch.dict(
os.environ,
{"SSL_VERIFY": "/certificate.pem", "SSL_CERTIFICATE": "/client.pem"},
clear=True,
)
def test_async_http_handler(mock_async_client):
import httpx
@ -861,6 +865,7 @@ def test_async_http_handler(mock_async_client):
verify="/certificate.pem",
)
@pytest.mark.parametrize(
"model, expected_bool", [("gpt-3.5-turbo", False), ("gpt-4o-audio-preview", True)]
)
@ -874,3 +879,15 @@ def test_supports_audio_input(model, expected_bool):
assert supports_pc == expected_bool
def test_is_base64_encoded_2():
from litellm.utils import is_base64_encoded
assert (
is_base64_encoded(
s="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8/x+AAwMCAO+ip1sAAAAASUVORK5CYII="
)
is True
)
assert is_base64_encoded(s="Dog") is False