litellm-mirror/tests/local_testing/test_bad_params.py
Ishaan Jaff 05b0d2026f
(feat) Add cost tracking for /batches requests OpenAI (#7384)
* add basic logging for create`batch`

* add create_batch as a call type

* add basic dd logging for batches

* basic batch creation logging on DD

* batch endpoints add cost calc

* fix batches_async_logging

* separate folder for batches testing

* new job for batches tests

* test batches logging

* fix validation logic

* add vertex_batch_completions.jsonl

* test test_async_create_batch

* test_async_create_batch

* update tests

* test_completion_with_no_model

* remove dead code

* update load_vertex_ai_credentials

* test_avertex_batch_prediction

* update get async httpx client

* fix get_async_httpx_client

* update test_avertex_batch_prediction

* fix batches testing config.yaml

* add google deps

* fix vertex files handler
2024-12-23 17:47:26 -08:00

106 lines
3.1 KiB
Python

#### What this tests ####
# This tests chaos monkeys - if random parts of the system are broken / things aren't sent correctly - what happens.
# Expect to add more edge cases to this over time.
import os
import sys
import traceback
import pytest
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
import litellm
from litellm import completion, embedding
from litellm.utils import Message
# litellm.set_verbose = True
user_message = "Hello, how are you?"
messages = [{"content": user_message, "role": "user"}]
model_val = None
def test_completion_with_no_model():
# test on empty
with pytest.raises(TypeError):
response = completion(messages=messages)
def test_completion_with_empty_model():
# test on empty
try:
response = completion(model=model_val, messages=messages)
except Exception as e:
print(f"error occurred: {e}")
pass
def test_completion_invalid_param_cohere():
try:
litellm.set_verbose = True
response = completion(model="command-nightly", messages=messages, seed=12)
pytest.fail(f"This should have failed cohere does not support `seed` parameter")
except Exception as e:
assert isinstance(e, litellm.UnsupportedParamsError)
print("got an exception=", str(e))
if " cohere does not support parameters: {'seed': 12}" in str(e):
pass
else:
pytest.fail(f"An error occurred {e}")
def test_completion_function_call_cohere():
try:
response = completion(
model="command-nightly", messages=messages, functions=["TEST-FUNCTION"]
)
pytest.fail(f"An error occurred {e}")
except Exception as e:
print(e)
pass
def test_completion_function_call_openai():
try:
messages = [{"role": "user", "content": "What is the weather like in Boston?"}]
response = completion(
model="gpt-3.5-turbo",
messages=messages,
functions=[
{
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
},
},
"required": ["location"],
},
}
],
)
print(f"response: {response}")
except Exception:
pass
# test_completion_function_call_openai()
def test_completion_with_no_provider():
# test on empty
try:
model = "cerebras/btlm-3b-8k-base"
response = completion(model=model, messages=messages)
except Exception as e:
print(f"error occurred: {e}")
pass