mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 02:34:29 +00:00
Add litellm\tests\test_batch_completion_return_exceptions.py
This commit is contained in:
parent
64d229caaa
commit
a7ec1772b1
3 changed files with 33 additions and 1 deletions
2
.gitignore
vendored
2
.gitignore
vendored
|
@ -52,3 +52,5 @@ litellm/proxy/_new_secret_config.yaml
|
||||||
litellm/proxy/_new_secret_config.yaml
|
litellm/proxy/_new_secret_config.yaml
|
||||||
litellm/proxy/_super_secret_config.yaml
|
litellm/proxy/_super_secret_config.yaml
|
||||||
litellm/proxy/_super_secret_config.yaml
|
litellm/proxy/_super_secret_config.yaml
|
||||||
|
.python-version
|
||||||
|
litellm/llms/tokenizers/9b5ad71b2ce5302211f9c61530b329a4922fc6a4
|
||||||
|
|
|
@ -2143,7 +2143,7 @@ def completion(
|
||||||
"""
|
"""
|
||||||
assume input to custom LLM api bases follow this format:
|
assume input to custom LLM api bases follow this format:
|
||||||
resp = requests.post(
|
resp = requests.post(
|
||||||
api_base,
|
api_base,
|
||||||
json={
|
json={
|
||||||
'model': 'meta-llama/Llama-2-13b-hf', # model name
|
'model': 'meta-llama/Llama-2-13b-hf', # model name
|
||||||
'params': {
|
'params': {
|
||||||
|
@ -2280,6 +2280,7 @@ def batch_completion(
|
||||||
deployment_id=None,
|
deployment_id=None,
|
||||||
request_timeout: Optional[int] = None,
|
request_timeout: Optional[int] = None,
|
||||||
timeout: Optional[int] = 600,
|
timeout: Optional[int] = 600,
|
||||||
|
return_exceptions: bool = False,
|
||||||
# Optional liteLLM function params
|
# Optional liteLLM function params
|
||||||
**kwargs,
|
**kwargs,
|
||||||
):
|
):
|
||||||
|
|
29
litellm/tests/test_batch_completion_return_exceptions.py
Normal file
29
litellm/tests/test_batch_completion_return_exceptions.py
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
"""Test batch_completion's return_exceptions."""
|
||||||
|
import pytest
|
||||||
|
import litellm
|
||||||
|
|
||||||
|
msg1 = [{"role": "user", "content": "hi 1"}]
|
||||||
|
msg2 = [{"role": "user", "content": "hi 2"}]
|
||||||
|
|
||||||
|
|
||||||
|
def test_batch_completion_return_exceptions_default():
|
||||||
|
"""Test batch_completion's return_exceptions."""
|
||||||
|
with pytest.raises(Exception):
|
||||||
|
_ = litellm.batch_completion(
|
||||||
|
model="gpt-3.5-turbo",
|
||||||
|
messages=[msg1, msg2],
|
||||||
|
api_key="sk_xxx", # deliberately set invalid key
|
||||||
|
# return_exceptions=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def test_batch_completion_return_exceptions_true():
|
||||||
|
"""Test batch_completion's return_exceptions."""
|
||||||
|
res = litellm.batch_completion(
|
||||||
|
model="gpt-3.5-turbo",
|
||||||
|
messages=[msg1, msg2],
|
||||||
|
api_key="sk_xxx", # deliberately set invalid key
|
||||||
|
return_exceptions=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert isinstance(res[0], litellm.exceptions.AuthenticationError)
|
Loading…
Add table
Add a link
Reference in a new issue