mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
(feat) /batches
- track user_api_key_alias
, user_api_key_team_alias
etc for /batch requests (#7401)
* run azure testing on ci/cd * update docs on azure batches endpoints * add input azure.jsonl * refactor - use separate file for batches endpoints * fixes for passing custom llm provider to /batch endpoints * pass custom llm provider to files endpoints * update azure batches doc * add info for azure batches api * update batches endpoints * use simple helper for raising proxy exception * update config.yml * fix imports * add type hints to get_litellm_params * update get_litellm_params * update get_litellm_params * update get slp * QOL - stop double logging a create batch operations on custom loggers * re use slp from og event * _create_standard_logging_object_for_completed_batch * fix linting errors * reduce num changes in PR * update BATCH_STATUS_POLL_MAX_ATTEMPTS
This commit is contained in:
parent
47e12802df
commit
08a4c72692
9 changed files with 72 additions and 29 deletions
|
@ -19,13 +19,14 @@ from typing import Any, Coroutine, Dict, Literal, Optional, Union
|
|||
import httpx
|
||||
|
||||
import litellm
|
||||
from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
|
||||
from litellm.llms.azure.azure import AzureBatchesAPI
|
||||
from litellm.llms.openai.openai import OpenAIBatchesAPI
|
||||
from litellm.llms.vertex_ai.batches.handler import VertexAIBatchPrediction
|
||||
from litellm.secret_managers.main import get_secret_str
|
||||
from litellm.types.llms.openai import Batch, CreateBatchRequest, RetrieveBatchRequest
|
||||
from litellm.types.router import GenericLiteLLMParams
|
||||
from litellm.utils import client, supports_httpx_timeout
|
||||
from litellm.utils import client, get_litellm_params, supports_httpx_timeout
|
||||
|
||||
from .batch_utils import batches_async_logging
|
||||
|
||||
|
@ -114,9 +115,22 @@ def create_batch(
|
|||
try:
|
||||
optional_params = GenericLiteLLMParams(**kwargs)
|
||||
_is_async = kwargs.pop("acreate_batch", False) is True
|
||||
litellm_logging_obj: LiteLLMLoggingObj = kwargs.get("litellm_logging_obj", None)
|
||||
### TIMEOUT LOGIC ###
|
||||
timeout = optional_params.timeout or kwargs.get("request_timeout", 600) or 600
|
||||
# set timeout for 10 minutes by default
|
||||
litellm_params = get_litellm_params(
|
||||
custom_llm_provider=custom_llm_provider,
|
||||
litellm_call_id=kwargs.get("litellm_call_id", None),
|
||||
litellm_trace_id=kwargs.get("litellm_trace_id"),
|
||||
litellm_metadata=kwargs.get("litellm_metadata"),
|
||||
)
|
||||
litellm_logging_obj.update_environment_variables(
|
||||
model=None,
|
||||
user=None,
|
||||
optional_params=optional_params.model_dump(),
|
||||
litellm_params=litellm_params,
|
||||
custom_llm_provider=custom_llm_provider,
|
||||
)
|
||||
|
||||
if (
|
||||
timeout is not None
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue