mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 19:24:27 +00:00
(feat) Add cost tracking for /batches requests OpenAI (#7384)
* add basic logging for create`batch` * add create_batch as a call type * add basic dd logging for batches * basic batch creation logging on DD * batch endpoints add cost calc * fix batches_async_logging * separate folder for batches testing * new job for batches tests * test batches logging * fix validation logic * add vertex_batch_completions.jsonl * test test_async_create_batch * test_async_create_batch * update tests * test_completion_with_no_model * remove dead code * update load_vertex_ai_credentials * test_avertex_batch_prediction * update get async httpx client * fix get_async_httpx_client * update test_avertex_batch_prediction * fix batches testing config.yaml * add google deps * fix vertex files handler
This commit is contained in:
parent
9d66976162
commit
00544b97c8
13 changed files with 649 additions and 78 deletions
|
@ -23,7 +23,7 @@ model_val = None
|
|||
|
||||
def test_completion_with_no_model():
|
||||
# test on empty
|
||||
with pytest.raises(ValueError):
|
||||
with pytest.raises(TypeError):
|
||||
response = completion(messages=messages)
|
||||
|
||||
|
||||
|
@ -36,39 +36,6 @@ def test_completion_with_empty_model():
|
|||
pass
|
||||
|
||||
|
||||
# def test_completion_catch_nlp_exception():
|
||||
# TEMP commented out NLP cloud API is unstable
|
||||
# try:
|
||||
# response = completion(model="dolphin", messages=messages, functions=[
|
||||
# {
|
||||
# "name": "get_current_weather",
|
||||
# "description": "Get the current weather in a given location",
|
||||
# "parameters": {
|
||||
# "type": "object",
|
||||
# "properties": {
|
||||
# "location": {
|
||||
# "type": "string",
|
||||
# "description": "The city and state, e.g. San Francisco, CA"
|
||||
# },
|
||||
# "unit": {
|
||||
# "type": "string",
|
||||
# "enum": ["celsius", "fahrenheit"]
|
||||
# }
|
||||
# },
|
||||
# "required": ["location"]
|
||||
# }
|
||||
# }
|
||||
# ])
|
||||
|
||||
# except Exception as e:
|
||||
# if "Function calling is not supported by nlp_cloud" in str(e):
|
||||
# pass
|
||||
# else:
|
||||
# pytest.fail(f'An error occurred {e}')
|
||||
|
||||
# test_completion_catch_nlp_exception()
|
||||
|
||||
|
||||
def test_completion_invalid_param_cohere():
|
||||
try:
|
||||
litellm.set_verbose = True
|
||||
|
@ -94,9 +61,6 @@ def test_completion_function_call_cohere():
|
|||
pass
|
||||
|
||||
|
||||
# test_completion_function_call_cohere()
|
||||
|
||||
|
||||
def test_completion_function_call_openai():
|
||||
try:
|
||||
messages = [{"role": "user", "content": "What is the weather like in Boston?"}]
|
||||
|
@ -140,17 +104,3 @@ def test_completion_with_no_provider():
|
|||
except Exception as e:
|
||||
print(f"error occurred: {e}")
|
||||
pass
|
||||
|
||||
|
||||
# test_completion_with_no_provider()
|
||||
# # bad key
|
||||
# temp_key = os.environ.get("OPENAI_API_KEY")
|
||||
# os.environ["OPENAI_API_KEY"] = "bad-key"
|
||||
# # test on openai completion call
|
||||
# try:
|
||||
# response = completion(model="gpt-3.5-turbo", messages=messages)
|
||||
# print(f"response: {response}")
|
||||
# except Exception:
|
||||
# print(f"error occurred: {traceback.format_exc()}")
|
||||
# pass
|
||||
# os.environ["OPENAI_API_KEY"] = str(temp_key) # this passes linting#5
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue