From f967d6af5ec3088eb9bcf515d0998e129ed09a10 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Tue, 24 Oct 2023 15:54:14 -0700 Subject: [PATCH] fix(init.py): fix linting errors --- litellm/__init__.py | 2 +- litellm/tests/test_utils.py | 2 -- litellm/tests/test_vertex.py | 52 ------------------------------------ 3 files changed, 1 insertion(+), 55 deletions(-) delete mode 100644 litellm/tests/test_vertex.py diff --git a/litellm/__init__.py b/litellm/__init__.py index ecc1603ce4..29560a1aab 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -47,7 +47,7 @@ model_fallbacks: Optional[List] = None model_cost_map_url: str = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json" ############################################# -def get_model_cost_map(url: Optional[str]=None): +def get_model_cost_map(url: str): try: response = requests.get(url) response.raise_for_status() # Raise an exception if request is unsuccessful diff --git a/litellm/tests/test_utils.py b/litellm/tests/test_utils.py index c0cdc8ad48..0e325b7381 100644 --- a/litellm/tests/test_utils.py +++ b/litellm/tests/test_utils.py @@ -39,8 +39,6 @@ def test_multiple_messages_trimming(): {"role": "user", "content": "This is another long message that will also exceed the limit."} ] trimmed_messages = trim_messages(messages=messages, model="gpt-3.5-turbo", max_tokens=20) - print("Trimmed messages") - print(trimmed_messages) # print(get_token_count(messages=trimmed_messages, model="gpt-3.5-turbo")) assert(get_token_count(messages=trimmed_messages, model="gpt-3.5-turbo")) <= 20 # test_multiple_messages_trimming() diff --git a/litellm/tests/test_vertex.py b/litellm/tests/test_vertex.py deleted file mode 100644 index 01088ec893..0000000000 --- a/litellm/tests/test_vertex.py +++ /dev/null @@ -1,52 +0,0 @@ -# import sys, os -# import traceback -# from dotenv import load_dotenv -# load_dotenv() -# import os -# sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path -# import pytest -# import litellm -# from litellm import embedding, completion - - -# litellm.vertex_project = "hardy-device-386718" -# litellm.vertex_location = "us-central1" -# litellm.set_verbose = True - -# user_message = "what's the weather in SF " -# messages = [{ "content": user_message,"role": "user"}] -# def logger_fn(user_model_dict): -# print(f"user_model_dict: {user_model_dict}") - -# # chat-bison -# # response = completion(model="chat-bison", messages=messages, temperature=0.5, top_p=0.1) -# # print(response) - -# #text-bison - -# response = completion(model="text-bison", messages=messages) -# print(response) - -# response = completion(model="text-bison@001", messages=messages, temperature=0.1, logger_fn=logger_fn) -# print(response) - -# response = completion(model="text-bison", messages=messages, temperature=0.4, top_p=0.1, logger_fn=logger_fn) -# print(response) - -# response = completion(model="text-bison", messages=messages, temperature=0.8, top_p=0.4, top_k=30, logger_fn=logger_fn) -# print(response) - -# response = completion(model="text-bison@001", messages=messages, temperature=0.8, top_p=0.4, top_k=30, logger_fn=logger_fn) -# print(response) - -# # chat_model = ChatModel.from_pretrained("chat-bison@001") -# # parameters = { -# # "temperature": 0.2, -# # "max_output_tokens": 256, -# # "top_p": 0.8, -# # "top_k": 40 -# # } - -# # chat = chat_model.start_chat() -# # response = chat.send_message("who are u? write a sentence", **parameters) -# # print(f"Response from Model: {response.text}")