forked from phoenix/litellm-mirror
(feat+test) use passed OpenAI client
This commit is contained in:
parent
8ac7801283
commit
f4a7760ea1
2 changed files with 59 additions and 17 deletions
|
@ -174,7 +174,9 @@ class OpenAIChatCompletion(BaseLLM):
|
||||||
litellm_params=None,
|
litellm_params=None,
|
||||||
logger_fn=None,
|
logger_fn=None,
|
||||||
headers: Optional[dict]=None,
|
headers: Optional[dict]=None,
|
||||||
custom_prompt_dict: dict={}):
|
custom_prompt_dict: dict={},
|
||||||
|
client=None
|
||||||
|
):
|
||||||
super().completion()
|
super().completion()
|
||||||
exception_mapping_worked = False
|
exception_mapping_worked = False
|
||||||
try:
|
try:
|
||||||
|
@ -203,16 +205,19 @@ class OpenAIChatCompletion(BaseLLM):
|
||||||
try:
|
try:
|
||||||
if acompletion is True:
|
if acompletion is True:
|
||||||
if optional_params.get("stream", False):
|
if optional_params.get("stream", False):
|
||||||
return self.async_streaming(logging_obj=logging_obj, data=data, model=model, api_base=api_base, api_key=api_key, timeout=timeout)
|
return self.async_streaming(logging_obj=logging_obj, data=data, model=model, api_base=api_base, api_key=api_key, timeout=timeout, client=client)
|
||||||
else:
|
else:
|
||||||
return self.acompletion(data=data, model_response=model_response, api_base=api_base, api_key=api_key, timeout=timeout)
|
return self.acompletion(data=data, model_response=model_response, api_base=api_base, api_key=api_key, timeout=timeout, client=client)
|
||||||
elif optional_params.get("stream", False):
|
elif optional_params.get("stream", False):
|
||||||
return self.streaming(logging_obj=logging_obj, data=data, model=model, api_base=api_base, api_key=api_key, timeout=timeout)
|
return self.streaming(logging_obj=logging_obj, data=data, model=model, api_base=api_base, api_key=api_key, timeout=timeout, client=client)
|
||||||
else:
|
else:
|
||||||
max_retries = data.pop("max_retries", 2)
|
max_retries = data.pop("max_retries", 2)
|
||||||
if not isinstance(max_retries, int):
|
if not isinstance(max_retries, int):
|
||||||
raise OpenAIError(status_code=422, message="max retries must be an int")
|
raise OpenAIError(status_code=422, message="max retries must be an int")
|
||||||
|
if client is None:
|
||||||
openai_client = OpenAI(api_key=api_key, base_url=api_base, http_client=litellm.client_session, timeout=timeout, max_retries=max_retries)
|
openai_client = OpenAI(api_key=api_key, base_url=api_base, http_client=litellm.client_session, timeout=timeout, max_retries=max_retries)
|
||||||
|
else:
|
||||||
|
openai_client = client
|
||||||
response = openai_client.chat.completions.create(**data) # type: ignore
|
response = openai_client.chat.completions.create(**data) # type: ignore
|
||||||
logging_obj.post_call(
|
logging_obj.post_call(
|
||||||
input=None,
|
input=None,
|
||||||
|
@ -251,10 +256,15 @@ class OpenAIChatCompletion(BaseLLM):
|
||||||
model_response: ModelResponse,
|
model_response: ModelResponse,
|
||||||
timeout: float,
|
timeout: float,
|
||||||
api_key: Optional[str]=None,
|
api_key: Optional[str]=None,
|
||||||
api_base: Optional[str]=None):
|
api_base: Optional[str]=None,
|
||||||
|
client=None
|
||||||
|
):
|
||||||
response = None
|
response = None
|
||||||
try:
|
try:
|
||||||
|
if client is None:
|
||||||
openai_aclient = AsyncOpenAI(api_key=api_key, base_url=api_base, http_client=litellm.aclient_session, timeout=timeout, max_retries=data.pop("max_retries", 2))
|
openai_aclient = AsyncOpenAI(api_key=api_key, base_url=api_base, http_client=litellm.aclient_session, timeout=timeout, max_retries=data.pop("max_retries", 2))
|
||||||
|
else:
|
||||||
|
openai_aclient = client
|
||||||
response = await openai_aclient.chat.completions.create(**data)
|
response = await openai_aclient.chat.completions.create(**data)
|
||||||
return convert_to_model_response_object(response_object=json.loads(response.model_dump_json()), model_response_object=model_response)
|
return convert_to_model_response_object(response_object=json.loads(response.model_dump_json()), model_response_object=model_response)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
@ -272,9 +282,13 @@ class OpenAIChatCompletion(BaseLLM):
|
||||||
data: dict,
|
data: dict,
|
||||||
model: str,
|
model: str,
|
||||||
api_key: Optional[str]=None,
|
api_key: Optional[str]=None,
|
||||||
api_base: Optional[str]=None
|
api_base: Optional[str]=None,
|
||||||
|
client = None,
|
||||||
):
|
):
|
||||||
|
if client is None:
|
||||||
openai_client = OpenAI(api_key=api_key, base_url=api_base, http_client=litellm.client_session, timeout=timeout, max_retries=data.pop("max_retries", 2))
|
openai_client = OpenAI(api_key=api_key, base_url=api_base, http_client=litellm.client_session, timeout=timeout, max_retries=data.pop("max_retries", 2))
|
||||||
|
else:
|
||||||
|
openai_client = client
|
||||||
response = openai_client.chat.completions.create(**data)
|
response = openai_client.chat.completions.create(**data)
|
||||||
streamwrapper = CustomStreamWrapper(completion_stream=response, model=model, custom_llm_provider="openai",logging_obj=logging_obj)
|
streamwrapper = CustomStreamWrapper(completion_stream=response, model=model, custom_llm_provider="openai",logging_obj=logging_obj)
|
||||||
for transformed_chunk in streamwrapper:
|
for transformed_chunk in streamwrapper:
|
||||||
|
@ -286,10 +300,14 @@ class OpenAIChatCompletion(BaseLLM):
|
||||||
data: dict,
|
data: dict,
|
||||||
model: str,
|
model: str,
|
||||||
api_key: Optional[str]=None,
|
api_key: Optional[str]=None,
|
||||||
api_base: Optional[str]=None):
|
api_base: Optional[str]=None,
|
||||||
|
client=None):
|
||||||
response = None
|
response = None
|
||||||
try:
|
try:
|
||||||
|
if client is None:
|
||||||
openai_aclient = AsyncOpenAI(api_key=api_key, base_url=api_base, http_client=litellm.aclient_session, timeout=timeout, max_retries=data.pop("max_retries", 2))
|
openai_aclient = AsyncOpenAI(api_key=api_key, base_url=api_base, http_client=litellm.aclient_session, timeout=timeout, max_retries=data.pop("max_retries", 2))
|
||||||
|
else:
|
||||||
|
openai_aclient = client
|
||||||
response = await openai_aclient.chat.completions.create(**data)
|
response = await openai_aclient.chat.completions.create(**data)
|
||||||
streamwrapper = CustomStreamWrapper(completion_stream=response, model=model, custom_llm_provider="openai",logging_obj=logging_obj)
|
streamwrapper = CustomStreamWrapper(completion_stream=response, model=model, custom_llm_provider="openai",logging_obj=logging_obj)
|
||||||
async for transformed_chunk in streamwrapper:
|
async for transformed_chunk in streamwrapper:
|
||||||
|
@ -312,6 +330,7 @@ class OpenAIChatCompletion(BaseLLM):
|
||||||
model_response: Optional[litellm.utils.EmbeddingResponse] = None,
|
model_response: Optional[litellm.utils.EmbeddingResponse] = None,
|
||||||
logging_obj=None,
|
logging_obj=None,
|
||||||
optional_params=None,
|
optional_params=None,
|
||||||
|
client=None,
|
||||||
):
|
):
|
||||||
super().embedding()
|
super().embedding()
|
||||||
exception_mapping_worked = False
|
exception_mapping_worked = False
|
||||||
|
@ -325,8 +344,10 @@ class OpenAIChatCompletion(BaseLLM):
|
||||||
max_retries = data.pop("max_retries", 2)
|
max_retries = data.pop("max_retries", 2)
|
||||||
if not isinstance(max_retries, int):
|
if not isinstance(max_retries, int):
|
||||||
raise OpenAIError(status_code=422, message="max retries must be an int")
|
raise OpenAIError(status_code=422, message="max retries must be an int")
|
||||||
openai_client = OpenAI(api_key=api_key, base_url=api_base, http_client=litellm.client_session, max_retries=max_retries, timeout=timeout)
|
if client is None:
|
||||||
|
openai_client = OpenAI(api_key=api_key, base_url=api_base, http_client=litellm.client_session, timeout=timeout, max_retries=max_retries)
|
||||||
|
else:
|
||||||
|
openai_client = client
|
||||||
## LOGGING
|
## LOGGING
|
||||||
logging_obj.pre_call(
|
logging_obj.pre_call(
|
||||||
input=input,
|
input=input,
|
||||||
|
|
|
@ -606,10 +606,31 @@ async def test_re_use_azure_async_client():
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
pytest.fail("got Exception", e)
|
pytest.fail("got Exception", e)
|
||||||
|
|
||||||
import asyncio
|
# import asyncio
|
||||||
asyncio.run(
|
# asyncio.run(
|
||||||
test_re_use_azure_async_client()
|
# test_re_use_azure_async_client()
|
||||||
|
# )
|
||||||
|
|
||||||
|
|
||||||
|
def test_re_use_openaiClient():
|
||||||
|
try:
|
||||||
|
print("gpt-3.5 with client test\n\n")
|
||||||
|
litellm.set_verbose=True
|
||||||
|
import openai
|
||||||
|
client = openai.OpenAI(
|
||||||
|
api_key=os.environ["OPENAI_API_KEY"],
|
||||||
)
|
)
|
||||||
|
## Test OpenAI call
|
||||||
|
for _ in range(2):
|
||||||
|
response = litellm.completion(
|
||||||
|
model="gpt-3.5-turbo",
|
||||||
|
messages=messages,
|
||||||
|
client=client
|
||||||
|
)
|
||||||
|
print(f"response: {response}")
|
||||||
|
except Exception as e:
|
||||||
|
pytest.fail("got Exception", e)
|
||||||
|
test_re_use_openaiClient()
|
||||||
|
|
||||||
def test_completion_azure():
|
def test_completion_azure():
|
||||||
try:
|
try:
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue