forked from phoenix/litellm-mirror
Litellm dev 11 11 2024 (#6693)
* fix(__init__.py): add 'watsonx_text' as mapped llm api route Fixes https://github.com/BerriAI/litellm/issues/6663 * fix(opentelemetry.py): fix passing parallel tool calls to otel Fixes https://github.com/BerriAI/litellm/issues/6677 * refactor(test_opentelemetry_unit_tests.py): create a base set of unit tests for all logging integrations - test for parallel tool call handling reduces bugs in repo * fix(__init__.py): update provider-model mapping to include all known provider-model mappings Fixes https://github.com/BerriAI/litellm/issues/6669 * feat(anthropic): support passing document in llm api call * docs(anthropic.md): add pdf anthropic call to docs + expose new 'supports_pdf_input' function * fix(factory.py): fix linting error
This commit is contained in:
parent
b8ae08b8eb
commit
f59cb46e71
21 changed files with 533 additions and 2264 deletions
|
@ -169,3 +169,11 @@ def test_get_llm_provider_hosted_vllm():
|
|||
assert custom_llm_provider == "hosted_vllm"
|
||||
assert model == "llama-3.1-70b-instruct"
|
||||
assert dynamic_api_key == ""
|
||||
|
||||
|
||||
def test_get_llm_provider_watson_text():
|
||||
model, custom_llm_provider, dynamic_api_key, api_base = litellm.get_llm_provider(
|
||||
model="watsonx_text/watson-text-to-speech",
|
||||
)
|
||||
assert custom_llm_provider == "watsonx_text"
|
||||
assert model == "watson-text-to-speech"
|
||||
|
|
|
@ -1,11 +0,0 @@
|
|||
import os, sys, traceback
|
||||
|
||||
sys.path.insert(
|
||||
0, os.path.abspath("../..")
|
||||
) # Adds the parent directory to the system path
|
||||
import litellm
|
||||
from litellm import get_model_list
|
||||
|
||||
print(get_model_list())
|
||||
print(get_model_list())
|
||||
# print(litellm.model_list)
|
|
@ -1,41 +0,0 @@
|
|||
# What is this?
|
||||
## Unit tests for opentelemetry integration
|
||||
|
||||
# What is this?
|
||||
## Unit test for presidio pii masking
|
||||
import sys, os, asyncio, time, random
|
||||
from datetime import datetime
|
||||
import traceback
|
||||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv()
|
||||
import os
|
||||
import asyncio
|
||||
|
||||
sys.path.insert(
|
||||
0, os.path.abspath("../..")
|
||||
) # Adds the parent directory to the system path
|
||||
import pytest
|
||||
import litellm
|
||||
from unittest.mock import patch, MagicMock, AsyncMock
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_opentelemetry_integration():
|
||||
"""
|
||||
Unit test to confirm the parent otel span is ended
|
||||
"""
|
||||
|
||||
parent_otel_span = MagicMock()
|
||||
litellm.callbacks = ["otel"]
|
||||
|
||||
await litellm.acompletion(
|
||||
model="gpt-3.5-turbo",
|
||||
messages=[{"role": "user", "content": "Hello, world!"}],
|
||||
mock_response="Hey!",
|
||||
metadata={"litellm_parent_otel_span": parent_otel_span},
|
||||
)
|
||||
|
||||
await asyncio.sleep(1)
|
||||
|
||||
parent_otel_span.end.assert_called_once()
|
|
@ -943,3 +943,24 @@ def test_validate_chat_completion_user_messages(messages, expected_bool):
|
|||
## Invalid message
|
||||
with pytest.raises(Exception):
|
||||
validate_chat_completion_user_messages(messages=messages)
|
||||
|
||||
|
||||
def test_models_by_provider():
|
||||
"""
|
||||
Make sure all providers from model map are in the valid providers list
|
||||
"""
|
||||
from litellm import models_by_provider
|
||||
|
||||
providers = set()
|
||||
for k, v in litellm.model_cost.items():
|
||||
if "_" in v["litellm_provider"] and "-" in v["litellm_provider"]:
|
||||
continue
|
||||
elif k == "sample_spec":
|
||||
continue
|
||||
elif v["litellm_provider"] == "sagemaker":
|
||||
continue
|
||||
else:
|
||||
providers.add(v["litellm_provider"])
|
||||
|
||||
for provider in providers:
|
||||
assert provider in models_by_provider.keys()
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue