fix(init.py): fix linting errors

This commit is contained in:
Krrish Dholakia 2023-10-24 15:54:14 -07:00
parent b68f083e10
commit f967d6af5e
3 changed files with 1 additions and 55 deletions

View file

@ -47,7 +47,7 @@ model_fallbacks: Optional[List] = None
model_cost_map_url: str = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json"
#############################################
def get_model_cost_map(url: Optional[str]=None):
def get_model_cost_map(url: str):
try:
response = requests.get(url)
response.raise_for_status() # Raise an exception if request is unsuccessful

View file

@ -39,8 +39,6 @@ def test_multiple_messages_trimming():
{"role": "user", "content": "This is another long message that will also exceed the limit."}
]
trimmed_messages = trim_messages(messages=messages, model="gpt-3.5-turbo", max_tokens=20)
print("Trimmed messages")
print(trimmed_messages)
# print(get_token_count(messages=trimmed_messages, model="gpt-3.5-turbo"))
assert(get_token_count(messages=trimmed_messages, model="gpt-3.5-turbo")) <= 20
# test_multiple_messages_trimming()

View file

@ -1,52 +0,0 @@
# import sys, os
# import traceback
# from dotenv import load_dotenv
# load_dotenv()
# import os
# sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path
# import pytest
# import litellm
# from litellm import embedding, completion
# litellm.vertex_project = "hardy-device-386718"
# litellm.vertex_location = "us-central1"
# litellm.set_verbose = True
# user_message = "what's the weather in SF "
# messages = [{ "content": user_message,"role": "user"}]
# def logger_fn(user_model_dict):
# print(f"user_model_dict: {user_model_dict}")
# # chat-bison
# # response = completion(model="chat-bison", messages=messages, temperature=0.5, top_p=0.1)
# # print(response)
# #text-bison
# response = completion(model="text-bison", messages=messages)
# print(response)
# response = completion(model="text-bison@001", messages=messages, temperature=0.1, logger_fn=logger_fn)
# print(response)
# response = completion(model="text-bison", messages=messages, temperature=0.4, top_p=0.1, logger_fn=logger_fn)
# print(response)
# response = completion(model="text-bison", messages=messages, temperature=0.8, top_p=0.4, top_k=30, logger_fn=logger_fn)
# print(response)
# response = completion(model="text-bison@001", messages=messages, temperature=0.8, top_p=0.4, top_k=30, logger_fn=logger_fn)
# print(response)
# # chat_model = ChatModel.from_pretrained("chat-bison@001")
# # parameters = {
# # "temperature": 0.2,
# # "max_output_tokens": 256,
# # "top_p": 0.8,
# # "top_k": 40
# # }
# # chat = chat_model.start_chat()
# # response = chat.send_message("who are u? write a sentence", **parameters)
# # print(f"Response from Model: {response.text}")