mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 19:24:27 +00:00
(feat) add Vertex Batches API support in OpenAI format (#7032)
* working request * working transform * working request * transform vertex batch response * add _async_create_batch * move gcs functions to base * fix _get_content_from_openai_file * transform_openai_file_content_to_vertex_ai_file_content * fix transform vertex gcs bucket upload to OAI files format * working e2e test * _get_gcs_object_name * fix linting * add doc string * fix transform_gcs_bucket_response_to_openai_file_object * use vertex for batch endpoints * add batches support for vertex * test_vertex_batches_endpoint * test_vertex_batch_prediction * fix gcs bucket base auth * docs clean up batches * docs Batch API * docs vertex batches api * test_get_gcs_logging_config_without_service_account * undo change * fix vertex md * test_get_gcs_logging_config_without_service_account * ci/cd run again
This commit is contained in:
parent
dd5ccdd889
commit
0eef9df396
20 changed files with 1347 additions and 424 deletions
|
@ -59,6 +59,8 @@ def get_files_provider_config(
|
|||
custom_llm_provider: str,
|
||||
):
|
||||
global files_config
|
||||
if custom_llm_provider == "vertex_ai":
|
||||
return None
|
||||
if files_config is None:
|
||||
raise ValueError("files_config is not set, set it on your config.yaml file.")
|
||||
for setting in files_config:
|
||||
|
@ -212,9 +214,9 @@ async def create_file(
|
|||
if llm_provider_config is not None:
|
||||
# add llm_provider_config to data
|
||||
_create_file_request.update(llm_provider_config)
|
||||
|
||||
_create_file_request.pop("custom_llm_provider", None) # type: ignore
|
||||
# for now use custom_llm_provider=="openai" -> this will change as LiteLLM adds more providers for acreate_batch
|
||||
response = await litellm.acreate_file(**_create_file_request) # type: ignore
|
||||
response = await litellm.acreate_file(**_create_file_request, custom_llm_provider=custom_llm_provider) # type: ignore
|
||||
|
||||
### ALERTING ###
|
||||
asyncio.create_task(
|
||||
|
@ -239,7 +241,6 @@ async def create_file(
|
|||
model_region=getattr(user_api_key_dict, "allowed_model_region", ""),
|
||||
)
|
||||
)
|
||||
|
||||
return response
|
||||
except Exception as e:
|
||||
await proxy_logging_obj.post_call_failure_hook(
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue