Revert "Add return_exceptions to litellm.batch_completion"

This commit is contained in:
Ishaan Jaff 2024-05-04 13:01:17 -07:00 committed by GitHub
parent d968dedd77
commit df8e33739d
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 2 additions and 45 deletions

2
.gitignore vendored
View file

@ -52,5 +52,3 @@ litellm/proxy/_new_secret_config.yaml
litellm/proxy/_new_secret_config.yaml
litellm/proxy/_super_secret_config.yaml
litellm/proxy/_super_secret_config.yaml
.python-version
litellm/llms/tokenizers/9b5ad71b2ce5302211f9c61530b329a4922fc6a4

View file

@ -2165,7 +2165,7 @@ def completion(
"""
assume input to custom LLM api bases follow this format:
resp = requests.post(
api_base,
api_base,
json={
'model': 'meta-llama/Llama-2-13b-hf', # model name
'params': {
@ -2302,7 +2302,6 @@ def batch_completion(
deployment_id=None,
request_timeout: Optional[int] = None,
timeout: Optional[int] = 600,
return_exceptions: bool = False,
# Optional liteLLM function params
**kwargs,
):
@ -2326,7 +2325,6 @@ def batch_completion(
user (str, optional): The user string for generating completions. Defaults to "".
deployment_id (optional): The deployment ID for generating completions. Defaults to None.
request_timeout (int, optional): The request timeout for generating completions. Defaults to None.
return_exceptions (bool): Whether to return exceptions and partial results when exceptions occur. Defaults to False.
Returns:
list: A list of completion results.
@ -2385,17 +2383,7 @@ def batch_completion(
completions.append(future)
# Retrieve the results from the futures
# results = [future.result() for future in completions]
if return_exceptions:
results = []
for future in completions:
try:
results.append(future.result())
except Exception as exc:
results.append(exc)
else:
results = [future.result() for future in completions]
results = [future.result() for future in completions]
return results

View file

@ -1,29 +0,0 @@
"""Test batch_completion's return_exceptions."""
import pytest
import litellm
msg1 = [{"role": "user", "content": "hi 1"}]
msg2 = [{"role": "user", "content": "hi 2"}]
def test_batch_completion_return_exceptions_default():
"""Test batch_completion's return_exceptions."""
with pytest.raises(Exception):
_ = litellm.batch_completion(
model="gpt-3.5-turbo",
messages=[msg1, msg2],
api_key="sk_xxx", # deliberately set invalid key
# return_exceptions=False,
)
def test_batch_completion_return_exceptions_true():
"""Test batch_completion's return_exceptions."""
res = litellm.batch_completion(
model="gpt-3.5-turbo",
messages=[msg1, msg2],
api_key="sk_xxx", # deliberately set invalid key
return_exceptions=True,
)
assert isinstance(res[0], litellm.exceptions.AuthenticationError)