(feat) OpenAI set organization

This commit is contained in:
ishaan-jaff 2024-01-30 10:54:56 -08:00
parent 7fe8fff5d8
commit ae4e273db7

View file

@ -221,6 +221,7 @@ class OpenAIChatCompletion(BaseLLM):
headers: Optional[dict] = None, headers: Optional[dict] = None,
custom_prompt_dict: dict = {}, custom_prompt_dict: dict = {},
client=None, client=None,
organization: Optional[str] = None,
): ):
super().completion() super().completion()
exception_mapping_worked = False exception_mapping_worked = False
@ -254,6 +255,7 @@ class OpenAIChatCompletion(BaseLLM):
timeout=timeout, timeout=timeout,
client=client, client=client,
max_retries=max_retries, max_retries=max_retries,
organization=organization,
) )
else: else:
return self.acompletion( return self.acompletion(
@ -266,6 +268,7 @@ class OpenAIChatCompletion(BaseLLM):
timeout=timeout, timeout=timeout,
client=client, client=client,
max_retries=max_retries, max_retries=max_retries,
organization=organization,
) )
elif optional_params.get("stream", False): elif optional_params.get("stream", False):
return self.streaming( return self.streaming(
@ -278,6 +281,7 @@ class OpenAIChatCompletion(BaseLLM):
timeout=timeout, timeout=timeout,
client=client, client=client,
max_retries=max_retries, max_retries=max_retries,
organization=organization,
) )
else: else:
if not isinstance(max_retries, int): if not isinstance(max_retries, int):
@ -291,6 +295,7 @@ class OpenAIChatCompletion(BaseLLM):
http_client=litellm.client_session, http_client=litellm.client_session,
timeout=timeout, timeout=timeout,
max_retries=max_retries, max_retries=max_retries,
organization=organization,
) )
else: else:
openai_client = client openai_client = client
@ -358,6 +363,7 @@ class OpenAIChatCompletion(BaseLLM):
timeout: float, timeout: float,
api_key: Optional[str] = None, api_key: Optional[str] = None,
api_base: Optional[str] = None, api_base: Optional[str] = None,
organization: Optional[str] = None,
client=None, client=None,
max_retries=None, max_retries=None,
logging_obj=None, logging_obj=None,
@ -372,6 +378,7 @@ class OpenAIChatCompletion(BaseLLM):
http_client=litellm.aclient_session, http_client=litellm.aclient_session,
timeout=timeout, timeout=timeout,
max_retries=max_retries, max_retries=max_retries,
organization=organization,
) )
else: else:
openai_aclient = client openai_aclient = client
@ -412,6 +419,7 @@ class OpenAIChatCompletion(BaseLLM):
model: str, model: str,
api_key: Optional[str] = None, api_key: Optional[str] = None,
api_base: Optional[str] = None, api_base: Optional[str] = None,
organization: Optional[str] = None,
client=None, client=None,
max_retries=None, max_retries=None,
headers=None, headers=None,
@ -423,6 +431,7 @@ class OpenAIChatCompletion(BaseLLM):
http_client=litellm.client_session, http_client=litellm.client_session,
timeout=timeout, timeout=timeout,
max_retries=max_retries, max_retries=max_retries,
organization=organization,
) )
else: else:
openai_client = client openai_client = client
@ -454,6 +463,7 @@ class OpenAIChatCompletion(BaseLLM):
model: str, model: str,
api_key: Optional[str] = None, api_key: Optional[str] = None,
api_base: Optional[str] = None, api_base: Optional[str] = None,
organization: Optional[str] = None,
client=None, client=None,
max_retries=None, max_retries=None,
headers=None, headers=None,
@ -467,6 +477,7 @@ class OpenAIChatCompletion(BaseLLM):
http_client=litellm.aclient_session, http_client=litellm.aclient_session,
timeout=timeout, timeout=timeout,
max_retries=max_retries, max_retries=max_retries,
organization=organization,
) )
else: else:
openai_aclient = client openai_aclient = client