forked from phoenix/litellm-mirror
LiteLLM Minor Fixes & Improvements (10/16/2024) (#6265)
* fix(caching_handler.py): handle positional arguments in add cache logic Fixes https://github.com/BerriAI/litellm/issues/6264 * feat(litellm_pre_call_utils.py): allow forwarding openai org id to backend client https://github.com/BerriAI/litellm/issues/6237 * docs(configs.md): add 'forward_openai_org_id' to docs * fix(proxy_server.py): return model info if user_model is set Fixes https://github.com/BerriAI/litellm/issues/6233 * fix(hosted_vllm/chat/transformation.py): don't set tools unless non-none * fix(openai.py): improve debug log for openai 'str' error Addresses https://github.com/BerriAI/litellm/issues/6272 * fix(proxy_server.py): fix linting error * fix(proxy_server.py): fix linting errors * test: skip WIP test * docs(openai.md): add docs on passing openai org id from client to openai
This commit is contained in:
parent
43878bd2a0
commit
38a9a106d2
14 changed files with 371 additions and 47 deletions
|
@ -590,6 +590,7 @@ class OpenAIChatCompletion(BaseLLM):
|
|||
- call chat.completions.create.with_raw_response when litellm.return_response_headers is True
|
||||
- call chat.completions.create by default
|
||||
"""
|
||||
raw_response = None
|
||||
try:
|
||||
raw_response = openai_client.chat.completions.with_raw_response.create(
|
||||
**data, timeout=timeout
|
||||
|
@ -602,7 +603,14 @@ class OpenAIChatCompletion(BaseLLM):
|
|||
response = raw_response.parse()
|
||||
return headers, response
|
||||
except Exception as e:
|
||||
raise e
|
||||
if raw_response is not None:
|
||||
raise Exception(
|
||||
"error - {}, Received response - {}, Type of response - {}".format(
|
||||
e, raw_response, type(raw_response)
|
||||
)
|
||||
)
|
||||
else:
|
||||
raise e
|
||||
|
||||
def completion( # type: ignore
|
||||
self,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue