refactor: fix imports

This commit is contained in:
Krrish Dholakia 2024-09-28 13:28:01 -07:00
parent 3560f0ef2c
commit 5ad01e59f6
8 changed files with 19 additions and 15 deletions

View file

@ -8,6 +8,8 @@ load_dotenv()
import io
import os
from tests.local_testing.test_streaming import streaming_format_tests
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
@ -32,7 +34,7 @@ from litellm.llms.vertex_ai_and_google_ai_studio.gemini.vertex_and_google_ai_stu
_gemini_convert_messages_with_history,
)
from litellm.llms.vertex_ai_and_google_ai_studio.vertex_llm_base import VertexBase
from litellm.tests.test_streaming import streaming_format_tests
litellm.num_retries = 3
litellm.cache = None
@ -931,7 +933,7 @@ async def test_gemini_pro_function_calling_httpx(model, sync_mode):
pytest.fail("An unexpected exception occurred - {}".format(str(e)))
from litellm.tests.test_completion import response_format_tests
from tests.local_testing.test_completion import response_format_tests
@pytest.mark.parametrize(

View file

@ -9,6 +9,8 @@ load_dotenv()
import io
import os
from tests.local_testing.test_streaming import streaming_format_tests
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
@ -374,8 +376,6 @@ async def test_anthropic_api_prompt_caching_no_headers():
@pytest.mark.asyncio()
@pytest.mark.flaky(retries=3, delay=1)
async def test_anthropic_api_prompt_caching_streaming():
from litellm.tests.test_streaming import streaming_format_tests
response = await litellm.acompletion(
model="anthropic/claude-3-5-sonnet-20240620",
messages=[

View file

@ -829,7 +829,7 @@ def test_vertex_ai_embedding_completion_cost(caplog):
# """
# Relevant issue - https://github.com/BerriAI/litellm/issues/4630
# """
# from litellm.tests.test_amazing_vertex_completion import load_vertex_ai_credentials
# from tests.local_testing.test_amazing_vertex_completion import load_vertex_ai_credentials
# load_vertex_ai_credentials()
# os.environ["LITELLM_LOCAL_MODEL_COST_MAP"] = "True"

View file

@ -235,7 +235,7 @@ async def test_team_disable_guardrails(mock_acompletion, client_no_auth):
assert e.code == str(403)
from litellm.tests.test_custom_callback_input import CompletionCustomHandler
from tests.local_testing.test_custom_callback_input import CompletionCustomHandler
@mock_patch_acompletion()
@ -815,7 +815,7 @@ from litellm.proxy._types import (
)
from litellm.proxy.management_endpoints.internal_user_endpoints import new_user
from litellm.proxy.management_endpoints.team_endpoints import team_member_add
from litellm.tests.test_key_generate_prisma import prisma_client
from tests.local_testing.test_key_generate_prisma import prisma_client
@pytest.mark.parametrize(

View file

@ -9,6 +9,8 @@ load_dotenv()
import io
import os
from tests.local_testing.test_streaming import streaming_format_tests
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
@ -130,8 +132,6 @@ async def test_completion_sagemaker_messages_api(sync_mode):
@pytest.mark.flaky(retries=3, delay=1)
async def test_completion_sagemaker_stream(sync_mode, model):
try:
from litellm.tests.test_streaming import streaming_format_tests
litellm.set_verbose = False
print("testing sagemaker")
verbose_logger.setLevel(logging.DEBUG)

View file

@ -1145,7 +1145,9 @@ def test_completion_claude_stream_bad_key():
@pytest.mark.parametrize("provider", ["vertex_ai_beta"]) # ""
def test_vertex_ai_stream(provider):
from litellm.tests.test_amazing_vertex_completion import load_vertex_ai_credentials
from tests.local_testing.test_amazing_vertex_completion import (
load_vertex_ai_credentials,
)
load_vertex_ai_credentials()
litellm.set_verbose = True
@ -3949,7 +3951,7 @@ def test_unit_test_perplexity_citations_chunk():
@pytest.mark.flaky(retries=3, delay=1)
def test_streaming_tool_calls_valid_json_str(model):
if "vertex_ai" in model:
from litellm.tests.test_amazing_vertex_completion import (
from tests.local_testing.test_amazing_vertex_completion import (
load_vertex_ai_credentials,
)

View file

@ -4114,7 +4114,7 @@ async def test_async_text_completion_chat_model_stream():
async def test_completion_codestral_fim_api(model):
try:
if model == "vertex_ai/codestral@2405":
from litellm.tests.test_amazing_vertex_completion import (
from tests.local_testing.test_amazing_vertex_completion import (
load_vertex_ai_credentials,
)
@ -4158,7 +4158,7 @@ async def test_completion_codestral_fim_api(model):
async def test_completion_codestral_fim_api_stream(model):
try:
if model == "vertex_ai/codestral@2405":
from litellm.tests.test_amazing_vertex_completion import (
from tests.local_testing.test_amazing_vertex_completion import (
load_vertex_ai_credentials,
)

View file

@ -21,8 +21,8 @@ from litellm import (
get_modified_max_tokens,
token_counter,
)
from litellm.tests.large_text import text
from litellm.tests.messages_with_counts import (
from tests.local_testing.large_text import text
from tests.local_testing.messages_with_counts import (
MESSAGES_TEXT,
MESSAGES_WITH_IMAGES,
MESSAGES_WITH_TOOLS,