mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
Litellm dev 02 10 2025 p2 (#8443)
* Fixed issue #8246 (#8250) * Fixed issue #8246 * Added unit tests for discard() and for remove_callback_from_list_by_object() * fix(openai.py): support dynamic passing of organization param to openai handles scenario where client-side org id is passed to openai --------- Co-authored-by: Erez Hadad <erezh@il.ibm.com>
This commit is contained in:
parent
47f46f92c8
commit
e26d7df91b
9 changed files with 112 additions and 5 deletions
|
@ -27,6 +27,7 @@ from typing_extensions import overload
|
|||
import litellm
|
||||
from litellm import LlmProviders
|
||||
from litellm._logging import verbose_logger
|
||||
from litellm.constants import DEFAULT_MAX_RETRIES
|
||||
from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
|
||||
from litellm.litellm_core_utils.logging_utils import track_llm_api_timing
|
||||
from litellm.llms.base_llm.base_model_iterator import BaseModelResponseIterator
|
||||
|
@ -320,6 +321,17 @@ class OpenAIChatCompletion(BaseLLM):
|
|||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
|
||||
def _set_dynamic_params_on_client(
|
||||
self,
|
||||
client: Union[OpenAI, AsyncOpenAI],
|
||||
organization: Optional[str] = None,
|
||||
max_retries: Optional[int] = None,
|
||||
):
|
||||
if organization is not None:
|
||||
client.organization = organization
|
||||
if max_retries is not None:
|
||||
client.max_retries = max_retries
|
||||
|
||||
def _get_openai_client(
|
||||
self,
|
||||
is_async: bool,
|
||||
|
@ -327,11 +339,10 @@ class OpenAIChatCompletion(BaseLLM):
|
|||
api_base: Optional[str] = None,
|
||||
api_version: Optional[str] = None,
|
||||
timeout: Union[float, httpx.Timeout] = httpx.Timeout(None),
|
||||
max_retries: Optional[int] = 2,
|
||||
max_retries: Optional[int] = DEFAULT_MAX_RETRIES,
|
||||
organization: Optional[str] = None,
|
||||
client: Optional[Union[OpenAI, AsyncOpenAI]] = None,
|
||||
):
|
||||
args = locals()
|
||||
if client is None:
|
||||
if not isinstance(max_retries, int):
|
||||
raise OpenAIError(
|
||||
|
@ -364,7 +375,6 @@ class OpenAIChatCompletion(BaseLLM):
|
|||
organization=organization,
|
||||
)
|
||||
else:
|
||||
|
||||
_new_client = OpenAI(
|
||||
api_key=api_key,
|
||||
base_url=api_base,
|
||||
|
@ -383,6 +393,11 @@ class OpenAIChatCompletion(BaseLLM):
|
|||
return _new_client
|
||||
|
||||
else:
|
||||
self._set_dynamic_params_on_client(
|
||||
client=client,
|
||||
organization=organization,
|
||||
max_retries=max_retries,
|
||||
)
|
||||
return client
|
||||
|
||||
@track_llm_api_timing()
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue