diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index d1d1d602ca..d88b28ba1f 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -2002,7 +2002,7 @@ "supports_tool_choice": true, "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/gemini-experimental" }, - "gemini-flash-experimental": { + "gemini-pro-flash": { "max_tokens": 8192, "max_input_tokens": 1000000, "max_output_tokens": 8192, diff --git a/litellm/tests/test_openai_batches_and_files.py b/litellm/tests/test_openai_batches_and_files.py index d55d868b34..7f79da8e2b 100644 --- a/litellm/tests/test_openai_batches_and_files.py +++ b/litellm/tests/test_openai_batches_and_files.py @@ -114,7 +114,7 @@ async def test_async_create_batch(provider): ) print("Response from creating file=", file_obj) - await asyncio.sleep(5) + await asyncio.sleep(10) batch_input_file_id = file_obj.id assert ( batch_input_file_id is not None