Delete fields in metadata that break Azure OpenAI batch

If `model_info` and `caching_groups` are present, then calls to /v1/batches fail with:

```pytb
Traceback (most recent call last):
  File "/Users/abramowi/Code/OpenSource/litellm/litellm/proxy/batches_endpoints/endpoints.py", line 120, in create_batch
    response = await llm_router.acreate_batch(**_create_batch_data)  # type: ignore
               ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/abramowi/Code/OpenSource/litellm/litellm/router.py", line 2754, in acreate_batch
    raise e
  File "/Users/abramowi/Code/OpenSource/litellm/litellm/router.py", line 2742, in acreate_batch
    response = await self.async_function_with_fallbacks(**kwargs)
               ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/abramowi/Code/OpenSource/litellm/litellm/router.py", line 3248, in async_function_with_fallbacks
    raise original_exception
  File "/Users/abramowi/Code/OpenSource/litellm/litellm/router.py", line 3062, in async_function_with_fallbacks
    response = await self.async_function_with_retries(*args, **kwargs)
               ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/abramowi/Code/OpenSource/litellm/litellm/router.py", line 3438, in async_function_with_retries
    raise original_exception
  File "/Users/abramowi/Code/OpenSource/litellm/litellm/router.py", line 3331, in async_function_with_retries
    response = await self.make_call(original_function, *args, **kwargs)
               ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/abramowi/Code/OpenSource/litellm/litellm/router.py", line 3447, in make_call
    response = await response
               ^^^^^^^^^^^^^^
  File "/Users/abramowi/Code/OpenSource/litellm/litellm/router.py", line 2842, in _acreate_batch
    raise e
  File "/Users/abramowi/Code/OpenSource/litellm/litellm/router.py", line 2829, in _acreate_batch
    response = await response  # type: ignore
               ^^^^^^^^^^^^^^
  File "/Users/abramowi/Code/OpenSource/litellm/litellm/utils.py", line 1441, in wrapper_async
    raise e
  File "/Users/abramowi/Code/OpenSource/litellm/litellm/utils.py", line 1300, in wrapper_async
    result = await original_function(*args, **kwargs)
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/abramowi/Code/OpenSource/litellm/litellm/batches/main.py", line 89, in acreate_batch
    raise e
  File "/Users/abramowi/Code/OpenSource/litellm/litellm/batches/main.py", line 83, in acreate_batch
    response = await init_response
               ^^^^^^^^^^^^^^^^^^^
  File "/Users/abramowi/Code/OpenSource/litellm/litellm/llms/azure/batches/handler.py", line 39, in acreate_batch
    response = await azure_client.batches.create(**create_batch_data)
               ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/abramowi/Code/OpenSource/litellm/venv/lib/python3.11/site-packages/openai/resources/batches.py", line 309, in create
    return await self._post(
           ^^^^^^^^^^^^^^^^^
  File "/Users/abramowi/Code/OpenSource/litellm/venv/lib/python3.11/site-packages/openai/_base_client.py", line 1768, in post
    return await self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/abramowi/Code/OpenSource/litellm/venv/lib/python3.11/site-packages/openai/_base_client.py", line 1461, in request
    return await self._request(
           ^^^^^^^^^^^^^^^^^^^^
  File "/Users/abramowi/Code/OpenSource/litellm/venv/lib/python3.11/site-packages/openai/_base_client.py", line 1563, in _request
    raise self._make_status_error_from_response(err.response) from None
openai.BadRequestError: Error code: 400 - {'error': {'code': 'UserError', 'severity': None, 'message': 'Error when parsing request; unable to deserialize request body', 'messageFormat': None, 'messageParameters': None, 'referenceCode': None, 'detailsUri': None, 'target': None, 'details': [], 'innerError': None, 'debugInfo': None, 'additionalInfo': None}, 'correlation': {'operation': '15b57b50944a622dcbe6ef6b1fe3b6da', 'request': '45f83bae4c62fc1e'}, 'environment': 'westus', 'location': 'westus', 'time': '2025-03-13T00:50:02.6689421+00:00', 'componentName': 'managed-batch-inference', 'statusCode': 400}
INFO:     127.0.0.1:57972 - "POST /v1/batches HTTP/1.1" 500 Internal Server Error
```

Possibly related to https://github.com/BerriAI/litellm/issues/5396
This commit is contained in:
Marc Abramowitz 2025-03-12 18:52:26 -07:00
parent 051f346fa6
commit cfe0a43959

View file

@ -35,6 +35,13 @@ class AzureBatchesAPI(BaseAzureLLM):
create_batch_data: CreateBatchRequest,
azure_client: AsyncAzureOpenAI,
) -> LiteLLMBatch:
metadata = create_batch_data.get("metadata", {})
if metadata:
if "model_info" in metadata:
del metadata["model_info"]
if "caching_groups" in metadata:
del metadata["caching_groups"]
response = await azure_client.batches.create(**create_batch_data)
return LiteLLMBatch(**response.model_dump())