forked from phoenix/litellm-mirror
(fix) pre commit hook to sync backup context_window mapping
This commit is contained in:
parent
8b571159fc
commit
70f36073dc
3 changed files with 15 additions and 22 deletions
|
@ -1,39 +1,32 @@
|
|||
import sys
|
||||
import filecmp
|
||||
import difflib
|
||||
|
||||
|
||||
def show_diff(file1, file2):
|
||||
with open(file1, "r") as f1, open(file2, "r") as f2:
|
||||
lines1 = f1.readlines()
|
||||
lines2 = f2.readlines()
|
||||
|
||||
diff = difflib.unified_diff(lines1, lines2, lineterm="")
|
||||
|
||||
for line in diff:
|
||||
print(line)
|
||||
import shutil
|
||||
|
||||
|
||||
def main(argv=None):
|
||||
print(
|
||||
"comparing model_prices_and_context_window, and litellm/model_prices_and_context_window_backup.json files.......... checking they match",
|
||||
argv,
|
||||
"Comparing model_prices_and_context_window and litellm/model_prices_and_context_window_backup.json files... checking if they match."
|
||||
)
|
||||
|
||||
file1 = "model_prices_and_context_window.json"
|
||||
file2 = "litellm/model_prices_and_context_window_backup.json"
|
||||
|
||||
cmp_result = filecmp.cmp(file1, file2, shallow=False)
|
||||
|
||||
if cmp_result:
|
||||
print(f"Passed ! Files {file1} and {file2} match.")
|
||||
print(f"Passed! Files {file1} and {file2} match.")
|
||||
return 0
|
||||
else:
|
||||
# show the diff
|
||||
print(f"Failed ! Files {file1} and {file2} do not match.")
|
||||
print("\nDiff")
|
||||
show_diff(file1, file2)
|
||||
|
||||
print(
|
||||
f"Failed! Files {file1} and {file2} do not match. Copying content from {file1} to {file2}."
|
||||
)
|
||||
copy_content(file1, file2)
|
||||
return 1
|
||||
|
||||
|
||||
def copy_content(source, destination):
|
||||
shutil.copy2(source, destination)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
"max_output_tokens": 4096,
|
||||
"input_cost_per_token": 0.00003,
|
||||
"output_cost_per_token": 0.00006,
|
||||
"litellm_provider": "openai",
|
||||
"litellm_provider": "opeai",
|
||||
"mode": "chat"
|
||||
},
|
||||
"gpt-4-0314": {
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
"max_output_tokens": 4096,
|
||||
"input_cost_per_token": 0.00003,
|
||||
"output_cost_per_token": 0.00006,
|
||||
"litellm_provider": "openai",
|
||||
"litellm_provider": "opeai",
|
||||
"mode": "chat"
|
||||
},
|
||||
"gpt-4-0314": {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue