forked from phoenix/litellm-mirror
test: replace gpt-3.5-turbo-0613 (deprecated model) (#5794)
This commit is contained in:
parent
4e03e1509f
commit
6051086322
9 changed files with 41 additions and 34 deletions
|
@ -1,3 +1,3 @@
|
|||
{
|
||||
"gpt-3.5-turbo-0613": 7.7e-05
|
||||
"gpt-3.5-turbo": 7.7e-05
|
||||
}
|
|
@ -71,7 +71,7 @@ async def test_content_policy_exception_openai():
|
|||
# this is ony a test - we needed some way to invoke the exception :(
|
||||
litellm.set_verbose = True
|
||||
response = await litellm.acompletion(
|
||||
model="gpt-3.5-turbo-0613",
|
||||
model="gpt-3.5-turbo",
|
||||
stream=True,
|
||||
messages=[
|
||||
{"role": "user", "content": "Gimme the lyrics to Don't Stop Me Now"}
|
||||
|
|
|
@ -1057,9 +1057,9 @@ def test_router_region_pre_call_check(allowed_model_region):
|
|||
def test_function_calling():
|
||||
model_list = [
|
||||
{
|
||||
"model_name": "gpt-3.5-turbo-0613",
|
||||
"model_name": "gpt-3.5-turbo",
|
||||
"litellm_params": {
|
||||
"model": "gpt-3.5-turbo-0613",
|
||||
"model": "gpt-3.5-turbo",
|
||||
"api_key": os.getenv("OPENAI_API_KEY"),
|
||||
},
|
||||
"tpm": 100000,
|
||||
|
@ -1088,7 +1088,7 @@ def test_function_calling():
|
|||
|
||||
router = Router(model_list=model_list)
|
||||
response = router.completion(
|
||||
model="gpt-3.5-turbo-0613", messages=messages, functions=functions
|
||||
model="gpt-3.5-turbo", messages=messages, functions=functions
|
||||
)
|
||||
router.reset()
|
||||
print(response)
|
||||
|
@ -1104,7 +1104,7 @@ def test_function_calling_on_router():
|
|||
{
|
||||
"model_name": "gpt-3.5-turbo",
|
||||
"litellm_params": {
|
||||
"model": "gpt-3.5-turbo-0613",
|
||||
"model": "gpt-3.5-turbo",
|
||||
"api_key": os.getenv("OPENAI_API_KEY"),
|
||||
},
|
||||
},
|
||||
|
|
|
@ -30,7 +30,7 @@ async def test_router_async_caching_with_ssl_url():
|
|||
{
|
||||
"model_name": "gpt-3.5-turbo",
|
||||
"litellm_params": {
|
||||
"model": "gpt-3.5-turbo-0613",
|
||||
"model": "gpt-3.5-turbo",
|
||||
"api_key": os.getenv("OPENAI_API_KEY"),
|
||||
},
|
||||
"tpm": 100000,
|
||||
|
@ -57,7 +57,7 @@ def test_router_sync_caching_with_ssl_url():
|
|||
{
|
||||
"model_name": "gpt-3.5-turbo",
|
||||
"litellm_params": {
|
||||
"model": "gpt-3.5-turbo-0613",
|
||||
"model": "gpt-3.5-turbo",
|
||||
"api_key": os.getenv("OPENAI_API_KEY"),
|
||||
},
|
||||
"tpm": 100000,
|
||||
|
@ -84,7 +84,7 @@ async def test_acompletion_caching_on_router():
|
|||
{
|
||||
"model_name": "gpt-3.5-turbo",
|
||||
"litellm_params": {
|
||||
"model": "gpt-3.5-turbo-0613",
|
||||
"model": "gpt-3.5-turbo",
|
||||
"api_key": os.getenv("OPENAI_API_KEY"),
|
||||
},
|
||||
"tpm": 100000,
|
||||
|
@ -201,7 +201,7 @@ async def test_acompletion_caching_with_ttl_on_router():
|
|||
{
|
||||
"model_name": "gpt-3.5-turbo",
|
||||
"litellm_params": {
|
||||
"model": "gpt-3.5-turbo-0613",
|
||||
"model": "gpt-3.5-turbo",
|
||||
"api_key": os.getenv("OPENAI_API_KEY"),
|
||||
},
|
||||
"tpm": 100000,
|
||||
|
@ -266,8 +266,9 @@ async def test_acompletion_caching_on_router_caching_groups():
|
|||
{
|
||||
"model_name": "openai-gpt-3.5-turbo",
|
||||
"litellm_params": {
|
||||
"model": "gpt-3.5-turbo-0613",
|
||||
"model": "gpt-3.5-turbo",
|
||||
"api_key": os.getenv("OPENAI_API_KEY"),
|
||||
"mock_response": "Hello world",
|
||||
},
|
||||
"tpm": 100000,
|
||||
"rpm": 10000,
|
||||
|
|
|
@ -32,7 +32,7 @@ async def test_router_init():
|
|||
{
|
||||
"model_name": "gpt-3.5-turbo",
|
||||
"litellm_params": {
|
||||
"model": "gpt-3.5-turbo-0613",
|
||||
"model": "gpt-3.5-turbo",
|
||||
"api_key": os.getenv("OPENAI_API_KEY"),
|
||||
},
|
||||
"model_info": {"id": "1234"},
|
||||
|
|
|
@ -1,18 +1,24 @@
|
|||
# Tests for router.get_available_deployment
|
||||
# specifically test if it can pick the correct LLM when rpm/tpm set
|
||||
# These are fast Tests, and make no API calls
|
||||
import sys, os, time
|
||||
import traceback, asyncio
|
||||
import asyncio
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
|
||||
import pytest
|
||||
|
||||
sys.path.insert(
|
||||
0, os.path.abspath("../..")
|
||||
) # Adds the parent directory to the system path
|
||||
from collections import defaultdict
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
import litellm
|
||||
from litellm import Router
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from collections import defaultdict
|
||||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv()
|
||||
|
||||
|
@ -27,7 +33,7 @@ def test_weighted_selection_router():
|
|||
{
|
||||
"model_name": "gpt-3.5-turbo",
|
||||
"litellm_params": {
|
||||
"model": "gpt-3.5-turbo-0613",
|
||||
"model": "gpt-3.5-turbo",
|
||||
"api_key": os.getenv("OPENAI_API_KEY"),
|
||||
"rpm": 6,
|
||||
},
|
||||
|
@ -83,7 +89,7 @@ def test_weighted_selection_router_tpm():
|
|||
{
|
||||
"model_name": "gpt-3.5-turbo",
|
||||
"litellm_params": {
|
||||
"model": "gpt-3.5-turbo-0613",
|
||||
"model": "gpt-3.5-turbo",
|
||||
"api_key": os.getenv("OPENAI_API_KEY"),
|
||||
"tpm": 5,
|
||||
},
|
||||
|
@ -139,7 +145,7 @@ def test_weighted_selection_router_tpm_as_router_param():
|
|||
{
|
||||
"model_name": "gpt-3.5-turbo",
|
||||
"litellm_params": {
|
||||
"model": "gpt-3.5-turbo-0613",
|
||||
"model": "gpt-3.5-turbo",
|
||||
"api_key": os.getenv("OPENAI_API_KEY"),
|
||||
},
|
||||
"tpm": 5,
|
||||
|
@ -195,7 +201,7 @@ def test_weighted_selection_router_rpm_as_router_param():
|
|||
{
|
||||
"model_name": "gpt-3.5-turbo",
|
||||
"litellm_params": {
|
||||
"model": "gpt-3.5-turbo-0613",
|
||||
"model": "gpt-3.5-turbo",
|
||||
"api_key": os.getenv("OPENAI_API_KEY"),
|
||||
},
|
||||
"rpm": 5,
|
||||
|
@ -252,7 +258,7 @@ def test_weighted_selection_router_no_rpm_set():
|
|||
{
|
||||
"model_name": "gpt-3.5-turbo",
|
||||
"litellm_params": {
|
||||
"model": "gpt-3.5-turbo-0613",
|
||||
"model": "gpt-3.5-turbo",
|
||||
"api_key": os.getenv("OPENAI_API_KEY"),
|
||||
"rpm": 6,
|
||||
},
|
||||
|
@ -311,7 +317,7 @@ def test_model_group_aliases():
|
|||
{
|
||||
"model_name": "gpt-3.5-turbo",
|
||||
"litellm_params": {
|
||||
"model": "gpt-3.5-turbo-0613",
|
||||
"model": "gpt-3.5-turbo",
|
||||
"api_key": os.getenv("OPENAI_API_KEY"),
|
||||
"tpm": 1,
|
||||
},
|
||||
|
@ -537,7 +543,7 @@ async def test_weighted_selection_router_async(rpm_list, tpm_list):
|
|||
{
|
||||
"model_name": "gpt-3.5-turbo",
|
||||
"litellm_params": {
|
||||
"model": "gpt-3.5-turbo-0613",
|
||||
"model": "gpt-3.5-turbo",
|
||||
"api_key": os.getenv("OPENAI_API_KEY"),
|
||||
"rpm": rpm_list[0],
|
||||
"tpm": tpm_list[0],
|
||||
|
@ -580,7 +586,7 @@ async def test_weighted_selection_router_async(rpm_list, tpm_list):
|
|||
else:
|
||||
# Assert both are used
|
||||
assert selection_counts["azure/chatgpt-v-2"] > 0
|
||||
assert selection_counts["gpt-3.5-turbo-0613"] > 0
|
||||
assert selection_counts["gpt-3.5-turbo"] > 0
|
||||
router.reset()
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
|
|
|
@ -2509,7 +2509,7 @@ final_openai_function_call_example = {
|
|||
"id": "chatcmpl-7zVNA4sXUftpIg6W8WlntCyeBj2JY",
|
||||
"object": "chat.completion",
|
||||
"created": 1694892960,
|
||||
"model": "gpt-3.5-turbo-0613",
|
||||
"model": "gpt-3.5-turbo",
|
||||
"choices": [
|
||||
{
|
||||
"index": 0,
|
||||
|
@ -2573,7 +2573,7 @@ first_openai_function_call_example = {
|
|||
"id": "chatcmpl-7zVRoE5HjHYsCMaVSNgOjzdhbS3P0",
|
||||
"object": "chat.completion.chunk",
|
||||
"created": 1694893248,
|
||||
"model": "gpt-3.5-turbo-0613",
|
||||
"model": "gpt-3.5-turbo",
|
||||
"choices": [
|
||||
{
|
||||
"index": 0,
|
||||
|
@ -2646,7 +2646,7 @@ second_function_call_chunk_format = {
|
|||
"id": "chatcmpl-7zVRoE5HjHYsCMaVSNgOjzdhbS3P0",
|
||||
"object": "chat.completion.chunk",
|
||||
"created": 1694893248,
|
||||
"model": "gpt-3.5-turbo-0613",
|
||||
"model": "gpt-3.5-turbo",
|
||||
"choices": [
|
||||
{
|
||||
"index": 0,
|
||||
|
@ -2690,7 +2690,7 @@ final_function_call_chunk_example = {
|
|||
"id": "chatcmpl-7zVRoE5HjHYsCMaVSNgOjzdhbS3P0",
|
||||
"object": "chat.completion.chunk",
|
||||
"created": 1694893248,
|
||||
"model": "gpt-3.5-turbo-0613",
|
||||
"model": "gpt-3.5-turbo",
|
||||
"choices": [{"index": 0, "delta": {}, "finish_reason": "function_call"}],
|
||||
}
|
||||
|
||||
|
@ -3476,7 +3476,7 @@ def test_unit_test_custom_stream_wrapper_openai():
|
|||
)
|
||||
],
|
||||
"created": 1721353246,
|
||||
"model": "gpt-3.5-turbo-0613",
|
||||
"model": "gpt-3.5-turbo",
|
||||
"object": "chat.completion.chunk",
|
||||
"system_fingerprint": None,
|
||||
"usage": None,
|
||||
|
|
|
@ -598,7 +598,7 @@ def test_get_llm_provider_ft_models():
|
|||
All ft prefixed models should map to OpenAI
|
||||
gpt-3.5-turbo-0125 (recommended),
|
||||
gpt-3.5-turbo-1106,
|
||||
gpt-3.5-turbo-0613,
|
||||
gpt-3.5-turbo,
|
||||
gpt-4-0613 (experimental)
|
||||
gpt-4o-2024-05-13.
|
||||
babbage-002, davinci-002,
|
||||
|
@ -610,13 +610,13 @@ def test_get_llm_provider_ft_models():
|
|||
model, custom_llm_provider, _, _ = get_llm_provider(model="ft:gpt-3.5-turbo-1106")
|
||||
assert custom_llm_provider == "openai"
|
||||
|
||||
model, custom_llm_provider, _, _ = get_llm_provider(model="ft:gpt-3.5-turbo-0613")
|
||||
model, custom_llm_provider, _, _ = get_llm_provider(model="ft:gpt-3.5-turbo")
|
||||
assert custom_llm_provider == "openai"
|
||||
|
||||
model, custom_llm_provider, _, _ = get_llm_provider(model="ft:gpt-4-0613")
|
||||
assert custom_llm_provider == "openai"
|
||||
|
||||
model, custom_llm_provider, _, _ = get_llm_provider(model="ft:gpt-3.5-turbo-0613")
|
||||
model, custom_llm_provider, _, _ = get_llm_provider(model="ft:gpt-3.5-turbo")
|
||||
assert custom_llm_provider == "openai"
|
||||
|
||||
model, custom_llm_provider, _, _ = get_llm_provider(model="ft:gpt-4o-2024-05-13")
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
"total_budget": 10,
|
||||
"current_cost": 7.3e-05,
|
||||
"model_cost": {
|
||||
"gpt-3.5-turbo-0613": 7.3e-05
|
||||
"gpt-3.5-turbo": 7.3e-05
|
||||
}
|
||||
},
|
||||
"12345": {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue