mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
LiteLLM Minor Fixes & Improvements (10/16/2024) (#6265)
* fix(caching_handler.py): handle positional arguments in add cache logic Fixes https://github.com/BerriAI/litellm/issues/6264 * feat(litellm_pre_call_utils.py): allow forwarding openai org id to backend client https://github.com/BerriAI/litellm/issues/6237 * docs(configs.md): add 'forward_openai_org_id' to docs * fix(proxy_server.py): return model info if user_model is set Fixes https://github.com/BerriAI/litellm/issues/6233 * fix(hosted_vllm/chat/transformation.py): don't set tools unless non-none * fix(openai.py): improve debug log for openai 'str' error Addresses https://github.com/BerriAI/litellm/issues/6272 * fix(proxy_server.py): fix linting error * fix(proxy_server.py): fix linting errors * test: skip WIP test * docs(openai.md): add docs on passing openai org id from client to openai
This commit is contained in:
parent
43878bd2a0
commit
38a9a106d2
14 changed files with 371 additions and 47 deletions
|
@ -927,7 +927,7 @@ def client(original_function):
|
|||
)
|
||||
|
||||
# [OPTIONAL] ADD TO CACHE
|
||||
_llm_caching_handler._sync_set_cache(
|
||||
_llm_caching_handler.sync_set_cache(
|
||||
result=result,
|
||||
args=args,
|
||||
kwargs=kwargs,
|
||||
|
@ -1126,7 +1126,7 @@ def client(original_function):
|
|||
)
|
||||
|
||||
## Add response to cache
|
||||
await _llm_caching_handler._async_set_cache(
|
||||
await _llm_caching_handler.async_set_cache(
|
||||
result=result,
|
||||
original_function=original_function,
|
||||
kwargs=kwargs,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue