diff --git a/litellm/tests/test_configs/test_config_no_auth.yaml b/litellm/tests/test_configs/test_config_no_auth.yaml index 9d7aff570..9cc32bb0b 100644 --- a/litellm/tests/test_configs/test_config_no_auth.yaml +++ b/litellm/tests/test_configs/test_config_no_auth.yaml @@ -80,16 +80,6 @@ model_list: description: this is a test openai model id: 9b1ef341-322c-410a-8992-903987fef439 model_name: test_openai_models -- litellm_params: - model: bedrock/amazon.titan-embed-text-v1 - model_info: - mode: embedding - model_name: amazon-embeddings -- litellm_params: - model: sagemaker/berri-benchmarking-gpt-j-6b-fp16 - model_info: - mode: embedding - model_name: GPT-J 6B - Sagemaker Text Embedding (Internal) - litellm_params: model: dall-e-3 model_info: diff --git a/litellm/tests/test_proxy_server.py b/litellm/tests/test_proxy_server.py index caf32299f..3db4a980a 100644 --- a/litellm/tests/test_proxy_server.py +++ b/litellm/tests/test_proxy_server.py @@ -146,6 +146,7 @@ def test_bedrock_embedding(client_no_auth): pytest.fail(f"LiteLLM Proxy test failed. Exception - {str(e)}") +@pytest.mark.skip(reason="AWS Suspended Account") def test_sagemaker_embedding(client_no_auth): global headers from litellm.proxy.proxy_server import user_custom_auth