From 059a161c283e7ab0f4a4d040aaaadc39e81dbd29 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Tue, 9 Jul 2024 13:38:33 -0700 Subject: [PATCH 1/6] style: initial commit --- litellm/tests/test_anthropic_completion.py | 4203 ++++++++++++++++++++ 1 file changed, 4203 insertions(+) create mode 100644 litellm/tests/test_anthropic_completion.py diff --git a/litellm/tests/test_anthropic_completion.py b/litellm/tests/test_anthropic_completion.py new file mode 100644 index 0000000000..674d090762 --- /dev/null +++ b/litellm/tests/test_anthropic_completion.py @@ -0,0 +1,4203 @@ +# What is this? +## Unit tests for Anthropic Adapter + +# import asyncio +# import os +# import sys +# import traceback + +# from dotenv import load_dotenv + +# load_dotenv() +# import io +# import os + +# sys.path.insert( +# 0, os.path.abspath("../..") +# ) # Adds the parent directory to the system path +# from unittest.mock import MagicMock, patch + +# import pytest + +# import litellm +# from litellm import ( +# RateLimitError, +# TextCompletionResponse, +# atext_completion, +# completion, +# completion_cost, +# embedding, +# text_completion, +# ) + +# litellm.num_retries = 3 + + +# token_prompt = [ +# [ +# 32, +# 2043, +# 32, +# 329, +# 4585, +# 262, +# 1644, +# 14, +# 34, +# 3705, +# 319, +# 616, +# 47551, +# 30, +# 930, +# 19219, +# 284, +# 1949, +# 284, +# 787, +# 428, +# 355, +# 1790, +# 355, +# 1744, +# 981, +# 1390, +# 3307, +# 2622, +# 13, +# 220, +# 198, +# 198, +# 40, +# 423, +# 587, +# 351, +# 616, +# 41668, +# 32682, +# 329, +# 718, +# 812, +# 13, +# 376, +# 666, +# 32682, +# 468, +# 281, +# 4697, +# 6621, +# 11, +# 356, +# 1183, +# 869, +# 607, +# 25737, +# 11, +# 508, +# 318, +# 2579, +# 290, +# 468, +# 257, +# 642, +# 614, +# 1468, +# 1200, +# 13, +# 314, +# 373, +# 612, +# 262, +# 1110, +# 25737, +# 373, +# 287, +# 4827, +# 290, +# 14801, +# 373, +# 4642, +# 11, +# 673, +# 318, +# 616, +# 41803, +# 13, +# 2399, +# 2104, +# 1641, +# 468, +# 6412, +# 284, +# 502, +# 355, +# 465, +# 38074, +# 494, +# 1201, +# 1110, +# 352, +# 13, +# 314, +# 716, +# 407, +# 2910, +# 475, +# 356, +# 389, +# 1641, +# 11, +# 673, +# 3848, +# 502, +# 38074, +# 494, +# 290, +# 356, +# 423, +# 3993, +# 13801, +# 11, +# 26626, +# 11864, +# 11, +# 3503, +# 13, +# 220, +# 198, +# 198, +# 17, +# 812, +# 2084, +# 25737, +# 373, +# 287, +# 14321, +# 422, +# 2563, +# 13230, +# 13, +# 21051, +# 11, +# 2356, +# 25542, +# 11, +# 290, +# 47482, +# 897, +# 547, +# 607, +# 1517, +# 13, +# 1375, +# 550, +# 257, +# 5110, +# 14608, +# 290, +# 262, +# 1641, +# 7723, +# 1637, +# 284, +# 3758, +# 607, +# 284, +# 14321, +# 290, +# 477, +# 8389, +# 257, +# 7269, +# 284, +# 1011, +# 1337, +# 286, +# 14801, +# 13, +# 383, +# 5156, +# 338, +# 9955, +# 11, +# 25737, +# 338, +# 13850, +# 11, +# 468, +# 257, +# 47973, +# 14, +# 9979, +# 2762, +# 1693, +# 290, +# 373, +# 503, +# 286, +# 3240, +# 329, +# 362, +# 1933, +# 523, +# 339, +# 2492, +# 470, +# 612, +# 329, +# 477, +# 286, +# 428, +# 13, +# 220, +# 198, +# 198, +# 3347, +# 10667, +# 5223, +# 503, +# 706, +# 513, +# 1528, +# 11, +# 23630, +# 673, +# 373, +# 366, +# 38125, +# 290, +# 655, +# 2622, +# 257, +# 3338, +# 8399, +# 1911, +# 314, +# 2298, +# 607, +# 510, +# 11, +# 1011, +# 607, +# 284, +# 607, +# 2156, +# 11, +# 290, +# 673, +# 3393, +# 2925, +# 284, +# 7523, +# 20349, +# 290, +# 4144, +# 257, +# 6099, +# 13, +# 314, +# 836, +# 470, +# 892, +# 20349, +# 318, +# 257, +# 2563, +# 290, +# 716, +# 845, +# 386, +# 12, +# 66, +# 1236, +# 571, +# 292, +# 3584, +# 314, +# 836, +# 470, +# 7523, +# 11, +# 475, +# 326, +# 373, +# 407, +# 5035, +# 6402, +# 314, +# 655, +# 6497, +# 607, +# 510, +# 422, +# 14321, +# 13, +# 220, +# 198, +# 198, +# 32, +# 1285, +# 1568, +# 673, +# 373, +# 6294, +# 329, +# 3013, +# 24707, +# 287, +# 262, +# 12436, +# 1539, +# 819, +# 5722, +# 329, +# 852, +# 604, +# 1933, +# 2739, +# 11, +# 39398, +# 607, +# 1097, +# 5059, +# 981, +# 1029, +# 290, +# 318, +# 852, +# 16334, +# 329, +# 720, +# 1120, +# 74, +# 422, +# 15228, +# 278, +# 656, +# 257, +# 2156, +# 11, +# 290, +# 373, +# 12165, +# 503, +# 286, +# 376, +# 666, +# 32682, +# 338, +# 584, +# 6621, +# 338, +# 2156, +# 329, +# 32012, +# 262, +# 14595, +# 373, +# 30601, +# 510, +# 290, +# 2491, +# 357, +# 7091, +# 373, +# 1029, +# 8, +# 290, +# 262, +# 2104, +# 34624, +# 373, +# 46432, +# 1268, +# 1961, +# 422, +# 1660, +# 2465, +# 780, +# 8168, +# 2073, +# 1625, +# 1363, +# 329, +# 807, +# 2250, +# 13, +# 720, +# 1238, +# 11, +# 830, +# 286, +# 2465, +# 290, +# 5875, +# 5770, +# 511, +# 2156, +# 5096, +# 5017, +# 340, +# 13, +# 220, +# 198, +# 198, +# 2504, +# 373, +# 477, +# 938, +# 614, +# 13, +# 1119, +# 1053, +# 587, +# 287, +# 511, +# 649, +# 2156, +# 319, +# 511, +# 898, +# 329, +# 546, +# 718, +# 1933, +# 13, +# 554, +# 3389, +# 673, +# 1444, +# 34020, +# 290, +# 531, +# 511, +# 8744, +# 373, +# 4423, +# 572, +# 780, +# 673, +# 1422, +# 470, +# 423, +# 262, +# 1637, +# 780, +# 41646, +# 338, +# 37751, +# 1392, +# 32621, +# 510, +# 290, +# 1422, +# 470, +# 467, +# 832, +# 13, +# 679, +# 3432, +# 511, +# 2739, +# 8744, +# 9024, +# 492, +# 257, +# 2472, +# 286, +# 720, +# 4059, +# 13, +# 314, +# 1807, +# 340, +# 373, +# 13678, +# 306, +# 5789, +# 475, +# 4030, +# 616, +# 5422, +# 4423, +# 13, +# 1439, +# 468, +# 587, +# 5897, +# 1201, +# 13, +# 220, +# 198, +# 198, +# 7571, +# 2745, +# 2084, +# 11, +# 673, +# 1965, +# 502, +# 284, +# 8804, +# 617, +# 1637, +# 284, +# 651, +# 38464, +# 329, +# 399, +# 8535, +# 13, +# 3226, +# 1781, +# 314, +# 1101, +# 407, +# 1016, +# 284, +# 1309, +# 616, +# 41803, +# 393, +# 6621, +# 467, +# 14720, +# 11, +# 645, +# 2300, +# 644, +# 318, +# 1016, +# 319, +# 4306, +# 11, +# 523, +# 314, +# 910, +# 314, +# 1183, +# 307, +# 625, +# 379, +# 642, +# 13, +# 314, +# 1392, +# 572, +# 670, +# 1903, +# 290, +# 651, +# 612, +# 379, +# 362, +# 25, +# 2231, +# 13, +# 314, +# 1282, +# 287, +# 1262, +# 616, +# 13952, +# 1994, +# 11, +# 2513, +# 287, +# 11, +# 766, +# 399, +# 8535, +# 2712, +# 351, +# 36062, +# 287, +# 262, +# 5228, +# 11, +# 25737, +# 3804, +# 503, +# 319, +# 262, +# 18507, +# 11, +# 290, +# 16914, +# 319, +# 262, +# 6891, +# 3084, +# 13, +# 8989, +# 2406, +# 422, +# 257, +# 1641, +# 47655, +# 351, +# 13230, +# 11, +# 314, +# 760, +# 644, +# 16914, +# 3073, +# 588, +# 13, +# 314, +# 836, +# 470, +# 760, +# 703, +# 881, +# 340, +# 373, +# 11, +# 475, +# 314, +# 714, +# 423, +# 23529, +# 276, +# 340, +# 510, +# 290, +# 5901, +# 616, +# 18057, +# 351, +# 340, +# 13, +# 314, +# 6810, +# 19772, +# 2024, +# 8347, +# 287, +# 262, +# 2166, +# 2119, +# 290, +# 399, +# 8535, +# 373, +# 287, +# 3294, +# 11685, +# 286, +# 8242, +# 290, +# 607, +# 7374, +# 15224, +# 13, +# 383, +# 4894, +# 373, +# 572, +# 13, +# 383, +# 2156, +# 373, +# 3863, +# 2319, +# 37, +# 532, +# 340, +# 373, +# 1542, +# 2354, +# 13, +# 220, +# 198, +# 198, +# 40, +# 1718, +# 399, +# 8535, +# 284, +# 616, +# 1097, +# 11, +# 290, +# 1444, +# 16679, +# 329, +# 281, +# 22536, +# 355, +# 314, +# 373, +# 12008, +# 25737, +# 373, +# 14904, +# 2752, +# 13, +# 220, +# 314, +# 1422, +# 470, +# 765, +# 284, +# 10436, +# 290, +# 22601, +# 503, +# 399, +# 8535, +# 523, +# 314, +# 9658, +# 287, +# 262, +# 1097, +# 290, +# 1309, +# 607, +# 711, +# 319, +# 616, +# 3072, +# 1566, +# 262, +# 22536, +# 5284, +# 13, +# 3226, +# 1781, +# 1644, +# 290, +# 32084, +# 3751, +# 510, +# 355, +# 880, +# 13, +# 314, +# 4893, +# 262, +# 3074, +# 290, +# 780, +# 399, +# 8535, +# 338, +# 9955, +# 318, +# 503, +# 286, +# 3240, +# 1762, +# 11, +# 34020, +# 14, +# 44, +# 4146, +# 547, +# 1444, +# 13, +# 1649, +# 484, +# 5284, +# 484, +# 547, +# 5897, +# 290, +# 4692, +# 11, +# 1422, +# 470, +# 1107, +# 1561, +# 11, +# 1718, +# 399, +# 8535, +# 11, +# 290, +# 1297, +# 502, +# 284, +# 467, +# 1363, +# 13, +# 220, +# 198, +# 198, +# 2025, +# 1711, +# 1568, +# 314, +# 651, +# 1363, +# 290, +# 41668, +# 32682, +# 7893, +# 502, +# 644, +# 314, +# 1053, +# 1760, +# 13, +# 314, +# 4893, +# 2279, +# 284, +# 683, +# 290, +# 477, +# 339, +# 550, +# 373, +# 8993, +# 329, +# 502, +# 13, +# 18626, +# 262, +# 2104, +# 1641, +# 1541, +# 2993, +# 290, +# 547, +# 28674, +# 379, +# 502, +# 329, +# 644, +# 314, +# 550, +# 1760, +# 13, +# 18626, +# 314, +# 373, +# 366, +# 448, +# 286, +# 1627, +# 290, +# 8531, +# 1, +# 780, +# 314, +# 1444, +# 16679, +# 878, +# 4379, +# 611, +# 673, +# 373, +# 1682, +# 31245, +# 6, +# 278, +# 780, +# 340, +# 2900, +# 503, +# 673, +# 373, +# 655, +# 47583, +# 503, +# 422, +# 262, +# 16914, +# 13, +# 775, +# 8350, +# 329, +# 2250, +# 290, +# 314, +# 1364, +# 290, +# 3377, +# 262, +# 1755, +# 379, +# 616, +# 1266, +# 1545, +# 338, +# 2156, +# 290, +# 16896, +# 477, +# 1755, +# 13, +# 314, +# 3521, +# 470, +# 5412, +# 340, +# 477, +# 523, +# 314, +# 2900, +# 616, +# 3072, +# 572, +# 290, +# 3088, +# 284, +# 8960, +# 290, +# 655, +# 9480, +# 866, +# 13, +# 2011, +# 1266, +# 1545, +# 373, +# 510, +# 477, +# 1755, +# 351, +# 502, +# 11, +# 5149, +# 502, +# 314, +# 750, +# 2147, +# 2642, +# 11, +# 290, +# 314, +# 1101, +# 8788, +# 13, +# 220, +# 198, +# 198, +# 40, +# 1210, +# 616, +# 3072, +# 319, +# 290, +# 314, +# 550, +# 6135, +# 13399, +# 14, +# 37348, +# 1095, +# 13, +# 31515, +# 11, +# 34020, +# 11, +# 47551, +# 11, +# 41668, +# 32682, +# 11, +# 290, +# 511, +# 7083, +# 1641, +# 1866, +# 24630, +# 502, +# 13, +# 1119, +# 389, +# 2282, +# 314, +# 20484, +# 607, +# 1204, +# 11, +# 20484, +# 399, +# 8535, +# 338, +# 1204, +# 11, +# 925, +# 2279, +# 517, +# 8253, +# 621, +# 340, +# 2622, +# 284, +# 307, +# 11, +# 925, +# 340, +# 1171, +# 618, +# 340, +# 373, +# 257, +# 366, +# 17989, +# 14669, +# 1600, +# 290, +# 20484, +# 25737, +# 338, +# 8395, +# 286, +# 1683, +# 1972, +# 20750, +# 393, +# 1719, +# 10804, +# 286, +# 607, +# 1200, +# 757, +# 11, +# 4844, +# 286, +# 606, +# 1683, +# 765, +# 284, +# 766, +# 502, +# 757, +# 290, +# 314, +# 481, +# 1239, +# 766, +# 616, +# 41803, +# 757, +# 11, +# 290, +# 484, +# 765, +# 502, +# 284, +# 1414, +# 329, +# 25737, +# 338, +# 7356, +# 6314, +# 290, +# 20889, +# 502, +# 329, +# 262, +# 32084, +# 1339, +# 290, +# 7016, +# 12616, +# 13, +# 198, +# 198, +# 40, +# 716, +# 635, +# 783, +# 2060, +# 13, +# 1406, +# 319, +# 1353, +# 286, +# 6078, +# 616, +# 1266, +# 1545, +# 286, +# 838, +# 812, +# 357, +# 69, +# 666, +# 32682, +# 828, +# 314, +# 481, +# 4425, +# 616, +# 7962, +# 314, +# 550, +# 351, +# 683, +# 11, +# 644, +# 314, +# 3177, +# 616, +# 1641, +# 11, +# 290, +# 616, +# 399, +# 8535, +# 13, +# 198, +# 198, +# 40, +# 4988, +# 1254, +# 12361, +# 13, +# 314, +# 423, +# 12361, +# 9751, +# 284, +# 262, +# 966, +# 810, +# 314, +# 1101, +# 7960, +# 2130, +# 318, +# 1016, +# 284, +# 1282, +# 651, +# 366, +# 260, +# 18674, +# 1, +# 319, +# 502, +# 329, +# 644, +# 314, +# 750, +# 13, +# 314, +# 460, +# 470, +# 4483, +# 13, +# 314, +# 423, +# 2626, +# 767, +# 8059, +# 422, +# 340, +# 13, +# 314, +# 1101, +# 407, +# 11029, +# 329, +# 7510, +# 13, +# 314, +# 423, +# 11668, +# 739, +# 616, +# 2951, +# 13, +# 314, +# 1053, +# 550, +# 807, +# 50082, +# 12, +# 12545, +# 287, +# 734, +# 2745, +# 13, +# 1629, +# 717, +# 314, +# 2936, +# 523, +# 6563, +# 287, +# 616, +# 2551, +# 475, +# 355, +# 262, +# 1528, +# 467, +# 416, +# 314, +# 1101, +# 3612, +# 3863, +# 484, +# 547, +# 826, +# 290, +# 314, +# 815, +# 423, +# 10667, +# 319, +# 607, +# 878, +# 4585, +# 16679, +# 290, +# 852, +# 5306, +# 3019, +# 992, +# 13, +# 314, +# 836, +# 470, +# 1337, +# 546, +# 25737, +# 7471, +# 11, +# 475, +# 314, +# 750, +# 18344, +# 257, +# 642, +# 614, +# 1468, +# 1200, +# 1497, +# 422, +# 607, +# 3397, +# 290, +# 314, +# 1254, +# 12361, +# 546, +# 340, +# 13, +# 314, +# 760, +# 2130, +# 287, +# 262, +# 1641, +# 481, +# 1011, +# 607, +# 287, +# 11, +# 475, +# 340, +# 338, +# 1239, +# 588, +# 852, +# 351, +# 534, +# 3397, +# 13, +# 1375, +# 481, +# 1663, +# 510, +# 20315, +# 278, +# 502, +# 329, +# 340, +# 290, +# 477, +# 314, +# 1053, +# 1683, +# 1760, +# 318, +# 1842, +# 607, +# 355, +# 616, +# 898, +# 13, +# 220, +# 198, +# 198, +# 22367, +# 11, +# 317, +# 2043, +# 32, +# 30, +# 4222, +# 1037, +# 502, +# 13, +# 383, +# 14934, +# 318, +# 6600, +# 502, +# 6776, +# 13, +# 220, +# 198, +# 24361, +# 25, +# 1148, +# 428, +# 2642, +# 30, +# 198, +# 33706, +# 25, +# 645, +# ], +# [ +# 32, +# 2043, +# 32, +# 329, +# 4585, +# 262, +# 1644, +# 14, +# 34, +# 3705, +# 319, +# 616, +# 47551, +# 30, +# 930, +# 19219, +# 284, +# 1949, +# 284, +# 787, +# 428, +# 355, +# 1790, +# 355, +# 1744, +# 981, +# 1390, +# 3307, +# 2622, +# 13, +# 220, +# 198, +# 198, +# 40, +# 423, +# 587, +# 351, +# 616, +# 41668, +# 32682, +# 329, +# 718, +# 812, +# 13, +# 376, +# 666, +# 32682, +# 468, +# 281, +# 4697, +# 6621, +# 11, +# 356, +# 1183, +# 869, +# 607, +# 25737, +# 11, +# 508, +# 318, +# 2579, +# 290, +# 468, +# 257, +# 642, +# 614, +# 1468, +# 1200, +# 13, +# 314, +# 373, +# 612, +# 262, +# 1110, +# 25737, +# 373, +# 287, +# 4827, +# 290, +# 14801, +# 373, +# 4642, +# 11, +# 673, +# 318, +# 616, +# 41803, +# 13, +# 2399, +# 2104, +# 1641, +# 468, +# 6412, +# 284, +# 502, +# 355, +# 465, +# 38074, +# 494, +# 1201, +# 1110, +# 352, +# 13, +# 314, +# 716, +# 407, +# 2910, +# 475, +# 356, +# 389, +# 1641, +# 11, +# 673, +# 3848, +# 502, +# 38074, +# 494, +# 290, +# 356, +# 423, +# 3993, +# 13801, +# 11, +# 26626, +# 11864, +# 11, +# 3503, +# 13, +# 220, +# 198, +# 198, +# 17, +# 812, +# 2084, +# 25737, +# 373, +# 287, +# 14321, +# 422, +# 2563, +# 13230, +# 13, +# 21051, +# 11, +# 2356, +# 25542, +# 11, +# 290, +# 47482, +# 897, +# 547, +# 607, +# 1517, +# 13, +# 1375, +# 550, +# 257, +# 5110, +# 14608, +# 290, +# 262, +# 1641, +# 7723, +# 1637, +# 284, +# 3758, +# 607, +# 284, +# 14321, +# 290, +# 477, +# 8389, +# 257, +# 7269, +# 284, +# 1011, +# 1337, +# 286, +# 14801, +# 13, +# 383, +# 5156, +# 338, +# 9955, +# 11, +# 25737, +# 338, +# 13850, +# 11, +# 468, +# 257, +# 47973, +# 14, +# 9979, +# 2762, +# 1693, +# 290, +# 373, +# 503, +# 286, +# 3240, +# 329, +# 362, +# 1933, +# 523, +# 339, +# 2492, +# 470, +# 612, +# 329, +# 477, +# 286, +# 428, +# 13, +# 220, +# 198, +# 198, +# 3347, +# 10667, +# 5223, +# 503, +# 706, +# 513, +# 1528, +# 11, +# 23630, +# 673, +# 373, +# 366, +# 38125, +# 290, +# 655, +# 2622, +# 257, +# 3338, +# 8399, +# 1911, +# 314, +# 2298, +# 607, +# 510, +# 11, +# 1011, +# 607, +# 284, +# 607, +# 2156, +# 11, +# 290, +# 673, +# 3393, +# 2925, +# 284, +# 7523, +# 20349, +# 290, +# 4144, +# 257, +# 6099, +# 13, +# 314, +# 836, +# 470, +# 892, +# 20349, +# 318, +# 257, +# 2563, +# 290, +# 716, +# 845, +# 386, +# 12, +# 66, +# 1236, +# 571, +# 292, +# 3584, +# 314, +# 836, +# 470, +# 7523, +# 11, +# 475, +# 326, +# 373, +# 407, +# 5035, +# 6402, +# 314, +# 655, +# 6497, +# 607, +# 510, +# 422, +# 14321, +# 13, +# 220, +# 198, +# 198, +# 32, +# 1285, +# 1568, +# 673, +# 373, +# 6294, +# 329, +# 3013, +# 24707, +# 287, +# 262, +# 12436, +# 1539, +# 819, +# 5722, +# 329, +# 852, +# 604, +# 1933, +# 2739, +# 11, +# 39398, +# 607, +# 1097, +# 5059, +# 981, +# 1029, +# 290, +# 318, +# 852, +# 16334, +# 329, +# 720, +# 1120, +# 74, +# 422, +# 15228, +# 278, +# 656, +# 257, +# 2156, +# 11, +# 290, +# 373, +# 12165, +# 503, +# 286, +# 376, +# 666, +# 32682, +# 338, +# 584, +# 6621, +# 338, +# 2156, +# 329, +# 32012, +# 262, +# 14595, +# 373, +# 30601, +# 510, +# 290, +# 2491, +# 357, +# 7091, +# 373, +# 1029, +# 8, +# 290, +# 262, +# 2104, +# 34624, +# 373, +# 46432, +# 1268, +# 1961, +# 422, +# 1660, +# 2465, +# 780, +# 8168, +# 2073, +# 1625, +# 1363, +# 329, +# 807, +# 2250, +# 13, +# 720, +# 1238, +# 11, +# 830, +# 286, +# 2465, +# 290, +# 5875, +# 5770, +# 511, +# 2156, +# 5096, +# 5017, +# 340, +# 13, +# 220, +# 198, +# 198, +# 2504, +# 373, +# 477, +# 938, +# 614, +# 13, +# 1119, +# 1053, +# 587, +# 287, +# 511, +# 649, +# 2156, +# 319, +# 511, +# 898, +# 329, +# 546, +# 718, +# 1933, +# 13, +# 554, +# 3389, +# 673, +# 1444, +# 34020, +# 290, +# 531, +# 511, +# 8744, +# 373, +# 4423, +# 572, +# 780, +# 673, +# 1422, +# 470, +# 423, +# 262, +# 1637, +# 780, +# 41646, +# 338, +# 37751, +# 1392, +# 32621, +# 510, +# 290, +# 1422, +# 470, +# 467, +# 832, +# 13, +# 679, +# 3432, +# 511, +# 2739, +# 8744, +# 9024, +# 492, +# 257, +# 2472, +# 286, +# 720, +# 4059, +# 13, +# 314, +# 1807, +# 340, +# 373, +# 13678, +# 306, +# 5789, +# 475, +# 4030, +# 616, +# 5422, +# 4423, +# 13, +# 1439, +# 468, +# 587, +# 5897, +# 1201, +# 13, +# 220, +# 198, +# 198, +# 7571, +# 2745, +# 2084, +# 11, +# 673, +# 1965, +# 502, +# 284, +# 8804, +# 617, +# 1637, +# 284, +# 651, +# 38464, +# 329, +# 399, +# 8535, +# 13, +# 3226, +# 1781, +# 314, +# 1101, +# 407, +# 1016, +# 284, +# 1309, +# 616, +# 41803, +# 393, +# 6621, +# 467, +# 14720, +# 11, +# 645, +# 2300, +# 644, +# 318, +# 1016, +# 319, +# 4306, +# 11, +# 523, +# 314, +# 910, +# 314, +# 1183, +# 307, +# 625, +# 379, +# 642, +# 13, +# 314, +# 1392, +# 572, +# 670, +# 1903, +# 290, +# 651, +# 612, +# 379, +# 362, +# 25, +# 2231, +# 13, +# 314, +# 1282, +# 287, +# 1262, +# 616, +# 13952, +# 1994, +# 11, +# 2513, +# 287, +# 11, +# 766, +# 399, +# 8535, +# 2712, +# 351, +# 36062, +# 287, +# 262, +# 5228, +# 11, +# 25737, +# 3804, +# 503, +# 319, +# 262, +# 18507, +# 11, +# 290, +# 16914, +# 319, +# 262, +# 6891, +# 3084, +# 13, +# 8989, +# 2406, +# 422, +# 257, +# 1641, +# 47655, +# 351, +# 13230, +# 11, +# 314, +# 760, +# 644, +# 16914, +# 3073, +# 588, +# 13, +# 314, +# 836, +# 470, +# 760, +# 703, +# 881, +# 340, +# 373, +# 11, +# 475, +# 314, +# 714, +# 423, +# 23529, +# 276, +# 340, +# 510, +# 290, +# 5901, +# 616, +# 18057, +# 351, +# 340, +# 13, +# 314, +# 6810, +# 19772, +# 2024, +# 8347, +# 287, +# 262, +# 2166, +# 2119, +# 290, +# 399, +# 8535, +# 373, +# 287, +# 3294, +# 11685, +# 286, +# 8242, +# 290, +# 607, +# 7374, +# 15224, +# 13, +# 383, +# 4894, +# 373, +# 572, +# 13, +# 383, +# 2156, +# 373, +# 3863, +# 2319, +# 37, +# 532, +# 340, +# 373, +# 1542, +# 2354, +# 13, +# 220, +# 198, +# 198, +# 40, +# 1718, +# 399, +# 8535, +# 284, +# 616, +# 1097, +# 11, +# 290, +# 1444, +# 16679, +# 329, +# 281, +# 22536, +# 355, +# 314, +# 373, +# 12008, +# 25737, +# 373, +# 14904, +# 2752, +# 13, +# 220, +# 314, +# 1422, +# 470, +# 765, +# 284, +# 10436, +# 290, +# 22601, +# 503, +# 399, +# 8535, +# 523, +# 314, +# 9658, +# 287, +# 262, +# 1097, +# 290, +# 1309, +# 607, +# 711, +# 319, +# 616, +# 3072, +# 1566, +# 262, +# 22536, +# 5284, +# 13, +# 3226, +# 1781, +# 1644, +# 290, +# 32084, +# 3751, +# 510, +# 355, +# 880, +# 13, +# 314, +# 4893, +# 262, +# 3074, +# 290, +# 780, +# 399, +# 8535, +# 338, +# 9955, +# 318, +# 503, +# 286, +# 3240, +# 1762, +# 11, +# 34020, +# 14, +# 44, +# 4146, +# 547, +# 1444, +# 13, +# 1649, +# 484, +# 5284, +# 484, +# 547, +# 5897, +# 290, +# 4692, +# 11, +# 1422, +# 470, +# 1107, +# 1561, +# 11, +# 1718, +# 399, +# 8535, +# 11, +# 290, +# 1297, +# 502, +# 284, +# 467, +# 1363, +# 13, +# 220, +# 198, +# 198, +# 2025, +# 1711, +# 1568, +# 314, +# 651, +# 1363, +# 290, +# 41668, +# 32682, +# 7893, +# 502, +# 644, +# 314, +# 1053, +# 1760, +# 13, +# 314, +# 4893, +# 2279, +# 284, +# 683, +# 290, +# 477, +# 339, +# 550, +# 373, +# 8993, +# 329, +# 502, +# 13, +# 18626, +# 262, +# 2104, +# 1641, +# 1541, +# 2993, +# 290, +# 547, +# 28674, +# 379, +# 502, +# 329, +# 644, +# 314, +# 550, +# 1760, +# 13, +# 18626, +# 314, +# 373, +# 366, +# 448, +# 286, +# 1627, +# 290, +# 8531, +# 1, +# 780, +# 314, +# 1444, +# 16679, +# 878, +# 4379, +# 611, +# 673, +# 373, +# 1682, +# 31245, +# 6, +# 278, +# 780, +# 340, +# 2900, +# 503, +# 673, +# 373, +# 655, +# 47583, +# 503, +# 422, +# 262, +# 16914, +# 13, +# 775, +# 8350, +# 329, +# 2250, +# 290, +# 314, +# 1364, +# 290, +# 3377, +# 262, +# 1755, +# 379, +# 616, +# 1266, +# 1545, +# 338, +# 2156, +# 290, +# 16896, +# 477, +# 1755, +# 13, +# 314, +# 3521, +# 470, +# 5412, +# 340, +# 477, +# 523, +# 314, +# 2900, +# 616, +# 3072, +# 572, +# 290, +# 3088, +# 284, +# 8960, +# 290, +# 655, +# 9480, +# 866, +# 13, +# 2011, +# 1266, +# 1545, +# 373, +# 510, +# 477, +# 1755, +# 351, +# 502, +# 11, +# 5149, +# 502, +# 314, +# 750, +# 2147, +# 2642, +# 11, +# 290, +# 314, +# 1101, +# 8788, +# 13, +# 220, +# 198, +# 198, +# 40, +# 1210, +# 616, +# 3072, +# 319, +# 290, +# 314, +# 550, +# 6135, +# 13399, +# 14, +# 37348, +# 1095, +# 13, +# 31515, +# 11, +# 34020, +# 11, +# 47551, +# 11, +# 41668, +# 32682, +# 11, +# 290, +# 511, +# 7083, +# 1641, +# 1866, +# 24630, +# 502, +# 13, +# 1119, +# 389, +# 2282, +# 314, +# 20484, +# 607, +# 1204, +# 11, +# 20484, +# 399, +# 8535, +# 338, +# 1204, +# 11, +# 925, +# 2279, +# 517, +# 8253, +# 621, +# 340, +# 2622, +# 284, +# 307, +# 11, +# 925, +# 340, +# 1171, +# 618, +# 340, +# 373, +# 257, +# 366, +# 17989, +# 14669, +# 1600, +# 290, +# 20484, +# 25737, +# 338, +# 8395, +# 286, +# 1683, +# 1972, +# 20750, +# 393, +# 1719, +# 10804, +# 286, +# 607, +# 1200, +# 757, +# 11, +# 4844, +# 286, +# 606, +# 1683, +# 765, +# 284, +# 766, +# 502, +# 757, +# 290, +# 314, +# 481, +# 1239, +# 766, +# 616, +# 41803, +# 757, +# 11, +# 290, +# 484, +# 765, +# 502, +# 284, +# 1414, +# 329, +# 25737, +# 338, +# 7356, +# 6314, +# 290, +# 20889, +# 502, +# 329, +# 262, +# 32084, +# 1339, +# 290, +# 7016, +# 12616, +# 13, +# 198, +# 198, +# 40, +# 716, +# 635, +# 783, +# 2060, +# 13, +# 1406, +# 319, +# 1353, +# 286, +# 6078, +# 616, +# 1266, +# 1545, +# 286, +# 838, +# 812, +# 357, +# 69, +# 666, +# 32682, +# 828, +# 314, +# 481, +# 4425, +# 616, +# 7962, +# 314, +# 550, +# 351, +# 683, +# 11, +# 644, +# 314, +# 3177, +# 616, +# 1641, +# 11, +# 290, +# 616, +# 399, +# 8535, +# 13, +# 198, +# 198, +# 40, +# 4988, +# 1254, +# 12361, +# 13, +# 314, +# 423, +# 12361, +# 9751, +# 284, +# 262, +# 966, +# 810, +# 314, +# 1101, +# 7960, +# 2130, +# 318, +# 1016, +# 284, +# 1282, +# 651, +# 366, +# 260, +# 18674, +# 1, +# 319, +# 502, +# 329, +# 644, +# 314, +# 750, +# 13, +# 314, +# 460, +# 470, +# 4483, +# 13, +# 314, +# 423, +# 2626, +# 767, +# 8059, +# 422, +# 340, +# 13, +# 314, +# 1101, +# 407, +# 11029, +# 329, +# 7510, +# 13, +# 314, +# 423, +# 11668, +# 739, +# 616, +# 2951, +# 13, +# 314, +# 1053, +# 550, +# 807, +# 50082, +# 12, +# 12545, +# 287, +# 734, +# 2745, +# 13, +# 1629, +# 717, +# 314, +# 2936, +# 523, +# 6563, +# 287, +# 616, +# 2551, +# 475, +# 355, +# 262, +# 1528, +# 467, +# 416, +# 314, +# 1101, +# 3612, +# 3863, +# 484, +# 547, +# 826, +# 290, +# 314, +# 815, +# 423, +# 10667, +# 319, +# 607, +# 878, +# 4585, +# 16679, +# 290, +# 852, +# 5306, +# 3019, +# 992, +# 13, +# 314, +# 836, +# 470, +# 1337, +# 546, +# 25737, +# 7471, +# 11, +# 475, +# 314, +# 750, +# 18344, +# 257, +# 642, +# 614, +# 1468, +# 1200, +# 1497, +# 422, +# 607, +# 3397, +# 290, +# 314, +# 1254, +# 12361, +# 546, +# 340, +# 13, +# 314, +# 760, +# 2130, +# 287, +# 262, +# 1641, +# 481, +# 1011, +# 607, +# 287, +# 11, +# 475, +# 340, +# 338, +# 1239, +# 588, +# 852, +# 351, +# 534, +# 3397, +# 13, +# 1375, +# 481, +# 1663, +# 510, +# 20315, +# 278, +# 502, +# 329, +# 340, +# 290, +# 477, +# 314, +# 1053, +# 1683, +# 1760, +# 318, +# 1842, +# 607, +# 355, +# 616, +# 898, +# 13, +# 220, +# 198, +# 198, +# 22367, +# 11, +# 317, +# 2043, +# 32, +# 30, +# 4222, +# 1037, +# 502, +# 13, +# 383, +# 14934, +# 318, +# 6600, +# 502, +# 6776, +# 13, +# 220, +# 198, +# 24361, +# 25, +# 1148, +# 428, +# 2642, +# 30, +# 198, +# 33706, +# 25, +# 3763, +# ], +# ] + + +# def test_unit_test_text_completion_object(): +# openai_object = { +# "id": "cmpl-99y7B2svVoRWe1xd7UFRmeGjZrFSh", +# "choices": [ +# { +# "finish_reason": "length", +# "index": 0, +# "logprobs": { +# "text_offset": [101], +# "token_logprobs": [-0.00023488728], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.00023488728, +# "1": -8.375235, +# "zero": -14.101797, +# "__": -14.554922, +# "00": -14.98461, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 1, +# "logprobs": { +# "text_offset": [116], +# "token_logprobs": [-0.013745008], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.013745008, +# "1": -4.294995, +# "00": -12.287183, +# "2": -12.771558, +# "3": -14.013745, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 2, +# "logprobs": { +# "text_offset": [108], +# "token_logprobs": [-3.655073e-5], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -3.655073e-5, +# "1": -10.656286, +# "__": -11.789099, +# "false": -12.984411, +# "00": -14.039099, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 3, +# "logprobs": { +# "text_offset": [106], +# "token_logprobs": [-0.1345946], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.1345946, +# "1": -2.0720947, +# "2": -12.798657, +# "false": -13.970532, +# "00": -14.27522, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 4, +# "logprobs": { +# "text_offset": [95], +# "token_logprobs": [-0.10491652], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.10491652, +# "1": -2.3236666, +# "2": -7.0111666, +# "3": -7.987729, +# "4": -9.050229, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 5, +# "logprobs": { +# "text_offset": [121], +# "token_logprobs": [-0.00026300468], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.00026300468, +# "1": -8.250263, +# "zero": -14.976826, +# " ": -15.461201, +# "000": -15.773701, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 6, +# "logprobs": { +# "text_offset": [146], +# "token_logprobs": [-5.085517e-5], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -5.085517e-5, +# "1": -9.937551, +# "000": -13.929738, +# "__": -14.968801, +# "zero": -15.070363, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 7, +# "logprobs": { +# "text_offset": [100], +# "token_logprobs": [-0.13875218], +# "tokens": ["1"], +# "top_logprobs": [ +# { +# "1": -0.13875218, +# "0": -2.0450022, +# "2": -9.7559395, +# "3": -11.1465645, +# "4": -11.5528145, +# } +# ], +# }, +# "text": "1", +# }, +# { +# "finish_reason": "length", +# "index": 8, +# "logprobs": { +# "text_offset": [143], +# "token_logprobs": [-0.0005573204], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.0005573204, +# "1": -7.6099324, +# "3": -10.070869, +# "2": -11.617744, +# " ": -12.859932, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 9, +# "logprobs": { +# "text_offset": [143], +# "token_logprobs": [-0.0018747397], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.0018747397, +# "1": -6.29875, +# "3": -11.2675, +# "4": -11.634687, +# "2": -11.822187, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 10, +# "logprobs": { +# "text_offset": [110], +# "token_logprobs": [-0.003476763], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.003476763, +# "1": -5.6909766, +# "__": -10.526915, +# "None": -10.925352, +# "False": -11.88629, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 11, +# "logprobs": { +# "text_offset": [106], +# "token_logprobs": [-0.00032962486], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.00032962486, +# "1": -8.03158, +# "__": -13.445642, +# "2": -13.828455, +# "zero": -15.453455, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 12, +# "logprobs": { +# "text_offset": [143], +# "token_logprobs": [-9.984788e-5], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -9.984788e-5, +# "1": -9.21885, +# " ": -14.836038, +# "zero": -16.265724, +# "00": -16.578224, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 13, +# "logprobs": { +# "text_offset": [106], +# "token_logprobs": [-0.0010039895], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.0010039895, +# "1": -6.907254, +# "2": -13.743192, +# "false": -15.227567, +# "3": -15.297879, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 14, +# "logprobs": { +# "text_offset": [106], +# "token_logprobs": [-0.0005681643], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.0005681643, +# "1": -7.5005684, +# "__": -11.836506, +# "zero": -13.242756, +# "file": -13.445881, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 15, +# "logprobs": { +# "text_offset": [146], +# "token_logprobs": [-3.9769227e-5], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -3.9769227e-5, +# "1": -10.15629, +# "000": -15.078165, +# "00": -15.664103, +# "zero": -16.015665, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 16, +# "logprobs": { +# "text_offset": [143], +# "token_logprobs": [-0.0006509595], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.0006509595, +# "1": -7.344401, +# "2": -13.352214, +# " ": -13.852214, +# "3": -14.680339, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 17, +# "logprobs": { +# "text_offset": [103], +# "token_logprobs": [-0.0093299495], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.0093299495, +# "1": -4.681205, +# "2": -11.173392, +# "3": -13.439017, +# "00": -14.673392, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 18, +# "logprobs": { +# "text_offset": [130], +# "token_logprobs": [-0.00024382756], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.00024382756, +# "1": -8.328369, +# " ": -13.640869, +# "zero": -14.859619, +# "null": -16.51587, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 19, +# "logprobs": { +# "text_offset": [107], +# "token_logprobs": [-0.0006452414], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.0006452414, +# "1": -7.36002, +# "00": -12.328771, +# "000": -12.961583, +# "2": -14.211583, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 20, +# "logprobs": { +# "text_offset": [143], +# "token_logprobs": [-0.0012751155], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.0012751155, +# "1": -6.67315, +# "__": -11.970025, +# "<|endoftext|>": -14.907525, +# "3": -14.930963, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 21, +# "logprobs": { +# "text_offset": [107], +# "token_logprobs": [-7.1954215e-5], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -7.1954215e-5, +# "1": -9.640697, +# "00": -13.500072, +# "000": -13.523509, +# "__": -13.945384, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 22, +# "logprobs": { +# "text_offset": [108], +# "token_logprobs": [-0.0032367748], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.0032367748, +# "1": -5.737612, +# "<|endoftext|>": -13.940737, +# "2": -14.167299, +# "00": -14.292299, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 23, +# "logprobs": { +# "text_offset": [117], +# "token_logprobs": [-0.00018673266], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.00018673266, +# "1": -8.593937, +# "zero": -15.179874, +# "null": -15.515812, +# "None": -15.851749, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 24, +# "logprobs": { +# "text_offset": [104], +# "token_logprobs": [-0.0010223285], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.0010223285, +# "1": -6.8916473, +# "__": -13.05571, +# "00": -14.071335, +# "zero": -14.235397, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 25, +# "logprobs": { +# "text_offset": [108], +# "token_logprobs": [-0.0038979414], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.0038979414, +# "1": -5.550773, +# "2": -13.160148, +# "00": -14.144523, +# "3": -14.41796, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 26, +# "logprobs": { +# "text_offset": [143], +# "token_logprobs": [-0.00074721366], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.00074721366, +# "1": -7.219497, +# "3": -11.430435, +# "2": -13.367935, +# " ": -13.735123, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 27, +# "logprobs": { +# "text_offset": [146], +# "token_logprobs": [-8.566264e-5], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -8.566264e-5, +# "1": -9.375086, +# "000": -15.359461, +# "__": -15.671961, +# "00": -15.679773, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 28, +# "logprobs": { +# "text_offset": [119], +# "token_logprobs": [-0.000274683], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.000274683, +# "1": -8.2034, +# "00": -14.898712, +# "2": -15.633087, +# "__": -16.844025, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 29, +# "logprobs": { +# "text_offset": [143], +# "token_logprobs": [-0.014869375], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.014869375, +# "1": -4.217994, +# "2": -11.63987, +# "3": -11.944557, +# "5": -12.26487, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 30, +# "logprobs": { +# "text_offset": [110], +# "token_logprobs": [-0.010907865], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.010907865, +# "1": -4.5265326, +# "2": -11.440596, +# "<|endoftext|>": -12.456221, +# "file": -13.049971, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 31, +# "logprobs": { +# "text_offset": [143], +# "token_logprobs": [-0.00070528337], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.00070528337, +# "1": -7.2663302, +# "6": -13.141331, +# "2": -13.797581, +# "3": -13.836643, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 32, +# "logprobs": { +# "text_offset": [143], +# "token_logprobs": [-0.0004983439], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.0004983439, +# "1": -7.6098733, +# "3": -14.211436, +# "2": -14.336436, +# " ": -15.117686, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 33, +# "logprobs": { +# "text_offset": [110], +# "token_logprobs": [-3.6908343e-5], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -3.6908343e-5, +# "1": -10.250037, +# "00": -14.2266, +# "__": -14.7266, +# "000": -16.164099, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 34, +# "logprobs": { +# "text_offset": [104], +# "token_logprobs": [-0.003917157], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.003917157, +# "1": -5.550792, +# "2": -11.355479, +# "00": -12.777354, +# "3": -13.652354, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 35, +# "logprobs": { +# "text_offset": [146], +# "token_logprobs": [-5.0139948e-5], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -5.0139948e-5, +# "1": -9.921926, +# "000": -14.851613, +# "00": -15.414113, +# "zero": -15.687551, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 36, +# "logprobs": { +# "text_offset": [143], +# "token_logprobs": [-0.0005143099], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.0005143099, +# "1": -7.5786395, +# " ": -14.406764, +# "00": -14.570827, +# "999": -14.633327, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 37, +# "logprobs": { +# "text_offset": [103], +# "token_logprobs": [-0.00013691289], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.00013691289, +# "1": -8.968887, +# "__": -12.547012, +# "zero": -13.57045, +# "00": -13.8517, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 38, +# "logprobs": { +# "text_offset": [103], +# "token_logprobs": [-0.00032569113], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.00032569113, +# "1": -8.047201, +# "2": -13.570639, +# "zero": -14.023764, +# "false": -14.726889, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 39, +# "logprobs": { +# "text_offset": [113], +# "token_logprobs": [-3.7146747e-5], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -3.7146747e-5, +# "1": -10.203162, +# "zero": -18.437536, +# "2": -20.117224, +# " zero": -20.210974, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 40, +# "logprobs": { +# "text_offset": [110], +# "token_logprobs": [-7.4695905e-5], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -7.4695905e-5, +# "1": -9.515699, +# "00": -14.836012, +# "__": -16.093824, +# "file": -16.468824, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 41, +# "logprobs": { +# "text_offset": [111], +# "token_logprobs": [-0.02289473], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.02289473, +# "1": -3.7885196, +# "2": -12.499457, +# "3": -14.546332, +# "00": -15.66352, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 42, +# "logprobs": { +# "text_offset": [108], +# "token_logprobs": [-0.0011367622], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.0011367622, +# "1": -6.782387, +# "2": -13.493324, +# "00": -15.071449, +# "zero": -15.727699, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 43, +# "logprobs": { +# "text_offset": [115], +# "token_logprobs": [-0.0006384541], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.0006384541, +# "1": -7.3600135, +# "00": -14.0397005, +# "2": -14.4303255, +# "000": -15.563138, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 44, +# "logprobs": { +# "text_offset": [143], +# "token_logprobs": [-0.0007382771], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.0007382771, +# "1": -7.219488, +# "4": -13.516363, +# "2": -13.555426, +# "3": -13.602301, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 45, +# "logprobs": { +# "text_offset": [143], +# "token_logprobs": [-0.0014242834], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.0014242834, +# "1": -6.5639243, +# "2": -12.493611, +# "__": -12.712361, +# "3": -12.884236, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 46, +# "logprobs": { +# "text_offset": [111], +# "token_logprobs": [-0.00017088225], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.00017088225, +# "1": -8.765796, +# "zero": -12.695483, +# "__": -12.804858, +# "time": -12.882983, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 47, +# "logprobs": { +# "text_offset": [146], +# "token_logprobs": [-0.000107238506], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.000107238506, +# "1": -9.171982, +# "000": -13.648544, +# "__": -14.531357, +# "zero": -14.586044, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 48, +# "logprobs": { +# "text_offset": [106], +# "token_logprobs": [-0.0028172398], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.0028172398, +# "1": -5.877817, +# "00": -12.16688, +# "2": -12.487192, +# "000": -14.182505, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 49, +# "logprobs": { +# "text_offset": [104], +# "token_logprobs": [-0.00043460296], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.00043460296, +# "1": -7.7816844, +# "00": -13.570747, +# "2": -13.60981, +# "__": -13.789497, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 50, +# "logprobs": { +# "text_offset": [143], +# "token_logprobs": [-0.0046973573], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.0046973573, +# "1": -5.3640723, +# "null": -14.082823, +# " ": -14.707823, +# "2": -14.746885, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 51, +# "logprobs": { +# "text_offset": [100], +# "token_logprobs": [-0.2487161], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.2487161, +# "1": -1.5143411, +# "2": -9.037779, +# "3": -10.100279, +# "4": -10.756529, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 52, +# "logprobs": { +# "text_offset": [108], +# "token_logprobs": [-0.0011751055], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.0011751055, +# "1": -6.751175, +# " ": -13.73555, +# "2": -15.258987, +# "3": -15.399612, +# } +# ], +# }, +# "text": "0", +# }, +# { +# "finish_reason": "length", +# "index": 53, +# "logprobs": { +# "text_offset": [143], +# "token_logprobs": [-0.0012339224], +# "tokens": ["0"], +# "top_logprobs": [ +# { +# "0": -0.0012339224, +# "1": -6.719984, +# "6": -11.430922, +# "3": -12.165297, +# "2": -12.696547, +# } +# ], +# }, +# "text": "0", +# }, +# ], +# "created": 1712163061, +# "model": "ft:babbage-002:ai-r-d-zapai:v3-fields-used:84jb9rtr", +# "object": "text_completion", +# "system_fingerprint": None, +# "usage": {"completion_tokens": 54, "prompt_tokens": 1877, "total_tokens": 1931}, +# } + +# text_completion_obj = TextCompletionResponse(**openai_object) + +# ## WRITE UNIT TESTS FOR TEXT_COMPLETION_OBJECT +# assert text_completion_obj.id == "cmpl-99y7B2svVoRWe1xd7UFRmeGjZrFSh" +# assert text_completion_obj.object == "text_completion" +# assert text_completion_obj.created == 1712163061 +# assert ( +# text_completion_obj.model +# == "ft:babbage-002:ai-r-d-zapai:v3-fields-used:84jb9rtr" +# ) +# assert text_completion_obj.system_fingerprint == None +# assert len(text_completion_obj.choices) == len(openai_object["choices"]) + +# # TEST FIRST CHOICE # +# first_text_completion_obj = text_completion_obj.choices[0] +# assert first_text_completion_obj.index == 0 +# assert first_text_completion_obj.logprobs.text_offset == [101] +# assert first_text_completion_obj.logprobs.tokens == ["0"] +# assert first_text_completion_obj.logprobs.token_logprobs == [-0.00023488728] +# assert len(first_text_completion_obj.logprobs.top_logprobs) == len( +# openai_object["choices"][0]["logprobs"]["top_logprobs"] +# ) +# assert first_text_completion_obj.text == "0" +# assert first_text_completion_obj.finish_reason == "length" + +# # TEST SECOND CHOICE # +# second_text_completion_obj = text_completion_obj.choices[1] +# assert second_text_completion_obj.index == 1 +# assert second_text_completion_obj.logprobs.text_offset == [116] +# assert second_text_completion_obj.logprobs.tokens == ["0"] +# assert second_text_completion_obj.logprobs.token_logprobs == [-0.013745008] +# assert len(second_text_completion_obj.logprobs.top_logprobs) == len( +# openai_object["choices"][0]["logprobs"]["top_logprobs"] +# ) +# assert second_text_completion_obj.text == "0" +# assert second_text_completion_obj.finish_reason == "length" + +# # TEST LAST CHOICE # +# last_text_completion_obj = text_completion_obj.choices[-1] +# assert last_text_completion_obj.index == 53 +# assert last_text_completion_obj.logprobs.text_offset == [143] +# assert last_text_completion_obj.logprobs.tokens == ["0"] +# assert last_text_completion_obj.logprobs.token_logprobs == [-0.0012339224] +# assert len(last_text_completion_obj.logprobs.top_logprobs) == len( +# openai_object["choices"][0]["logprobs"]["top_logprobs"] +# ) +# assert last_text_completion_obj.text == "0" +# assert last_text_completion_obj.finish_reason == "length" + +# assert text_completion_obj.usage.completion_tokens == 54 +# assert text_completion_obj.usage.prompt_tokens == 1877 +# assert text_completion_obj.usage.total_tokens == 1931 + + +# def test_completion_openai_prompt(): +# try: +# print("\n text 003 test\n") +# response = text_completion( +# model="gpt-3.5-turbo-instruct", +# prompt=["What's the weather in SF?", "How is Manchester?"], +# ) +# print(response) +# assert len(response.choices) == 2 +# response_str = response["choices"][0]["text"] +# except Exception as e: +# pytest.fail(f"Error occurred: {e}") + + +# # test_completion_openai_prompt() + + +# def test_completion_openai_engine_and_model(): +# try: +# print("\n text 003 test\n") +# litellm.set_verbose = True +# response = text_completion( +# model="gpt-3.5-turbo-instruct", +# engine="anything", +# prompt="What's the weather in SF?", +# max_tokens=5, +# ) +# print(response) +# response_str = response["choices"][0]["text"] +# # print(response.choices[0]) +# # print(response.choices[0].text) +# except Exception as e: +# pytest.fail(f"Error occurred: {e}") + + +# # test_completion_openai_engine_and_model() + + +# def test_completion_openai_engine(): +# try: +# print("\n text 003 test\n") +# litellm.set_verbose = True +# response = text_completion( +# engine="gpt-3.5-turbo-instruct", +# prompt="What's the weather in SF?", +# max_tokens=5, +# ) +# print(response) +# response_str = response["choices"][0]["text"] +# # print(response.choices[0]) +# # print(response.choices[0].text) +# except Exception as e: +# pytest.fail(f"Error occurred: {e}") + + +# # test_completion_openai_engine() + + +# def test_completion_chatgpt_prompt(): +# try: +# print("\n gpt3.5 test\n") +# response = text_completion( +# model="gpt-3.5-turbo", prompt="What's the weather in SF?" +# ) +# print(response) +# response_str = response["choices"][0]["text"] +# print("\n", response.choices) +# print("\n", response.choices[0]) +# # print(response.choices[0].text) +# except Exception as e: +# pytest.fail(f"Error occurred: {e}") + + +# # test_completion_chatgpt_prompt() + + +# def test_text_completion_basic(): +# try: +# print("\n test 003 with logprobs \n") +# litellm.set_verbose = False +# response = text_completion( +# model="gpt-3.5-turbo-instruct", +# prompt="good morning", +# max_tokens=10, +# logprobs=10, +# ) +# print(response) +# print(response.choices) +# print(response.choices[0]) +# # print(response.choices[0].text) +# response_str = response["choices"][0]["text"] +# except Exception as e: +# pytest.fail(f"Error occurred: {e}") + + +# # test_text_completion_basic() + + +# def test_completion_text_003_prompt_array(): +# try: +# litellm.set_verbose = False +# response = text_completion( +# model="gpt-3.5-turbo-instruct", +# prompt=token_prompt, # token prompt is a 2d list +# ) +# print("\n\n response") + +# print(response) +# # response_str = response["choices"][0]["text"] +# except Exception as e: +# pytest.fail(f"Error occurred: {e}") + + +# # test_completion_text_003_prompt_array() + + +# # not including this in our ci cd pipeline, since we don't want to fail tests due to an unstable replit +# # def test_text_completion_with_proxy(): +# # try: +# # litellm.set_verbose=True +# # response = text_completion( +# # model="facebook/opt-125m", +# # prompt='Write a tagline for a traditional bavarian tavern', +# # api_base="https://openai-proxy.berriai.repl.co/v1", +# # custom_llm_provider="openai", +# # temperature=0, +# # max_tokens=10, +# # ) +# # print("\n\n response") + +# # print(response) +# # except Exception as e: +# # pytest.fail(f"Error occurred: {e}") +# # test_text_completion_with_proxy() + + +# ##### hugging face tests +# def test_completion_hf_prompt_array(): +# try: +# litellm.set_verbose = True +# print("\n testing hf mistral\n") +# response = text_completion( +# model="huggingface/mistralai/Mistral-7B-v0.1", +# prompt=token_prompt, # token prompt is a 2d list, +# max_tokens=0, +# temperature=0.0, +# # echo=True, # hugging face inference api is currently raising errors for this, looks like they have a regression on their side +# ) +# print("\n\n response") + +# print(response) +# print(response.choices) +# assert len(response.choices) == 2 +# # response_str = response["choices"][0]["text"] +# except Exception as e: +# print(str(e)) +# if "is currently loading" in str(e): +# return +# if "Service Unavailable" in str(e): +# return +# pytest.fail(f"Error occurred: {e}") + + +# # test_completion_hf_prompt_array() + + +# def test_text_completion_stream(): +# try: +# response = text_completion( +# model="huggingface/mistralai/Mistral-7B-v0.1", +# prompt="good morning", +# stream=True, +# max_tokens=10, +# ) +# for chunk in response: +# print(f"chunk: {chunk}") +# except Exception as e: +# pytest.fail(f"GOT exception for HF In streaming{e}") + + +# # test_text_completion_stream() + +# # async def test_text_completion_async_stream(): +# # try: +# # response = await atext_completion( +# # model="text-completion-openai/gpt-3.5-turbo-instruct", +# # prompt="good morning", +# # stream=True, +# # max_tokens=10, +# # ) +# # async for chunk in response: +# # print(f"chunk: {chunk}") +# # except Exception as e: +# # pytest.fail(f"GOT exception for HF In streaming{e}") + +# # asyncio.run(test_text_completion_async_stream()) + + +# def test_async_text_completion(): +# litellm.set_verbose = True +# print("test_async_text_completion") + +# async def test_get_response(): +# try: +# response = await litellm.atext_completion( +# model="gpt-3.5-turbo-instruct", +# prompt="good morning", +# stream=False, +# max_tokens=10, +# ) +# print(f"response: {response}") +# except litellm.Timeout as e: +# print(e) +# except Exception as e: +# print(e) + +# asyncio.run(test_get_response()) + + +# @pytest.mark.skip(reason="Skip flaky tgai test") +# def test_async_text_completion_together_ai(): +# litellm.set_verbose = True +# print("test_async_text_completion") + +# async def test_get_response(): +# try: +# response = await litellm.atext_completion( +# model="together_ai/mistralai/Mixtral-8x7B-Instruct-v0.1", +# prompt="good morning", +# max_tokens=10, +# ) +# print(f"response: {response}") +# except litellm.Timeout as e: +# print(e) +# except Exception as e: +# pytest.fail("An unexpected error occurred") + +# asyncio.run(test_get_response()) + + +# # test_async_text_completion() + + +# def test_async_text_completion_stream(): +# # tests atext_completion + streaming - assert only one finish reason sent +# litellm.set_verbose = False +# print("test_async_text_completion with stream") + +# async def test_get_response(): +# try: +# response = await litellm.atext_completion( +# model="gpt-3.5-turbo-instruct", +# prompt="good morning", +# stream=True, +# ) +# print(f"response: {response}") + +# num_finish_reason = 0 +# async for chunk in response: +# print(chunk) +# if chunk["choices"][0].get("finish_reason") is not None: +# num_finish_reason += 1 +# print("finish_reason", chunk["choices"][0].get("finish_reason")) + +# assert ( +# num_finish_reason == 1 +# ), f"expected only one finish reason. Got {num_finish_reason}" +# except Exception as e: +# pytest.fail(f"GOT exception for gpt-3.5 instruct In streaming{e}") + +# asyncio.run(test_get_response()) + + +# # test_async_text_completion_stream() + + +# @pytest.mark.asyncio +# async def test_async_text_completion_chat_model_stream(): +# try: +# response = await litellm.atext_completion( +# model="gpt-3.5-turbo", +# prompt="good morning", +# stream=True, +# max_tokens=10, +# ) + +# num_finish_reason = 0 +# chunks = [] +# async for chunk in response: +# print(chunk) +# chunks.append(chunk) +# if chunk["choices"][0].get("finish_reason") is not None: +# num_finish_reason += 1 + +# assert ( +# num_finish_reason == 1 +# ), f"expected only one finish reason. Got {num_finish_reason}" +# response_obj = litellm.stream_chunk_builder(chunks=chunks) +# cost = litellm.completion_cost(completion_response=response_obj) +# assert cost > 0 +# except Exception as e: +# pytest.fail(f"GOT exception for gpt-3.5 In streaming{e}") + + +# # asyncio.run(test_async_text_completion_chat_model_stream()) + + +# @pytest.mark.asyncio +# async def test_completion_codestral_fim_api(): +# try: +# litellm.set_verbose = True +# import logging + +# from litellm._logging import verbose_logger + +# verbose_logger.setLevel(level=logging.DEBUG) +# response = await litellm.atext_completion( +# model="text-completion-codestral/codestral-2405", +# prompt="def is_odd(n): \n return n % 2 == 1 \ndef test_is_odd():", +# suffix="return True", +# temperature=0, +# top_p=1, +# max_tokens=10, +# min_tokens=10, +# seed=10, +# stop=["return"], +# ) +# # Add any assertions here to check the response +# print(response) + +# assert response.choices[0].text is not None +# assert len(response.choices[0].text) > 0 + +# # cost = litellm.completion_cost(completion_response=response) +# # print("cost to make mistral completion=", cost) +# # assert cost > 0.0 +# except Exception as e: +# pytest.fail(f"Error occurred: {e}") + + +# @pytest.mark.asyncio +# async def test_completion_codestral_fim_api_stream(): +# try: +# import logging + +# from litellm._logging import verbose_logger + +# litellm.set_verbose = False + +# # verbose_logger.setLevel(level=logging.DEBUG) +# response = await litellm.atext_completion( +# model="text-completion-codestral/codestral-2405", +# prompt="def is_odd(n): \n return n % 2 == 1 \ndef test_is_odd():", +# suffix="return True", +# temperature=0, +# top_p=1, +# stream=True, +# seed=10, +# stop=["return"], +# ) + +# full_response = "" +# # Add any assertions here to check the response +# async for chunk in response: +# print(chunk) +# full_response += chunk.get("choices")[0].get("text") or "" + +# print("full_response", full_response) + +# assert len(full_response) > 2 # we at least have a few chars in response :) + +# # cost = litellm.completion_cost(completion_response=response) +# # print("cost to make mistral completion=", cost) +# # assert cost > 0.0 +# except Exception as e: +# pytest.fail(f"Error occurred: {e}") + + +# def mock_post(*args, **kwargs): +# mock_response = MagicMock() +# mock_response.status_code = 200 +# mock_response.headers = {"Content-Type": "application/json"} +# mock_response.model_dump.return_value = { +# "id": "cmpl-7a59383dd4234092b9e5d652a7ab8143", +# "object": "text_completion", +# "created": 1718824735, +# "model": "Sao10K/L3-70B-Euryale-v2.1", +# "choices": [ +# { +# "index": 0, +# "text": ") might be faster than then answering, and the added time it takes for the", +# "logprobs": None, +# "finish_reason": "length", +# "stop_reason": None, +# } +# ], +# "usage": {"prompt_tokens": 2, "total_tokens": 18, "completion_tokens": 16}, +# } +# return mock_response + + +# def test_completion_vllm(): +# """ +# Asserts a text completion call for vllm actually goes to the text completion endpoint +# """ +# from openai import OpenAI + +# client = OpenAI(api_key="my-fake-key") + +# with patch.object(client.completions, "create", side_effect=mock_post) as mock_call: +# response = text_completion( +# model="openai/gemini-1.5-flash", prompt="ping", client=client, hello="world" +# ) +# print(response) + +# assert response.usage.prompt_tokens == 2 + +# mock_call.assert_called_once() + +# assert "hello" in mock_call.call_args.kwargs["extra_body"] From 01a335b4c38a709f8b491b78eca3456124ae3298 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Wed, 10 Jul 2024 00:32:28 -0700 Subject: [PATCH 2/6] feat(anthropic_adapter.py): support for translating anthropic params to openai format --- litellm/__init__.py | 7 +- litellm/adapters/anthropic_adapter.py | 43 + litellm/integrations/custom_logger.py | 24 + litellm/llms/anthropic.py | 223 ++ litellm/main.py | 28 + litellm/proxy/_new_secret_config.yaml | 11 - litellm/tests/test_anthropic_completion.py | 4214 +------------------- litellm/types/adapter.py | 10 + litellm/types/llms/anthropic.py | 86 +- litellm/types/llms/openai.py | 109 +- 10 files changed, 542 insertions(+), 4213 deletions(-) create mode 100644 litellm/adapters/anthropic_adapter.py create mode 100644 litellm/types/adapter.py diff --git a/litellm/__init__.py b/litellm/__init__.py index f0dab5e29e..ce98febcd8 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -364,7 +364,7 @@ for key, value in model_cost.items(): elif value.get("litellm_provider") == "mistral": mistral_chat_models.append(key) elif value.get("litellm_provider") == "anthropic": - anthropic_models.append(key) + anthropic_models.append(key) elif value.get("litellm_provider") == "empower": empower_models.append(key) elif value.get("litellm_provider") == "openrouter": @@ -881,3 +881,8 @@ from .assistants.main import * from .batches.main import * from .scheduler import * from .cost_calculator import response_cost_calculator, cost_per_token + +### ADAPTERS ### +from .types.adapter import AdapterItem + +adapters: List[AdapterItem] = [] diff --git a/litellm/adapters/anthropic_adapter.py b/litellm/adapters/anthropic_adapter.py new file mode 100644 index 0000000000..ce75755ca0 --- /dev/null +++ b/litellm/adapters/anthropic_adapter.py @@ -0,0 +1,43 @@ +# What is this? +## Translates OpenAI call to Anthropic `/v1/messages` format +import json +import os +import traceback +import uuid +from typing import Literal, Optional + +import dotenv +import httpx + +import litellm +from litellm import ChatCompletionRequest, verbose_logger +from litellm.integrations.custom_logger import CustomLogger +from litellm.types.llms.anthropic import AnthropicMessagesRequest + + +class AnthropicAdapter(CustomLogger): + def __init__(self) -> None: + super().__init__() + + def translate_completion_input_params( + self, kwargs + ) -> Optional[ChatCompletionRequest]: + """ + - translate params, where needed + - pass rest, as is + """ + request_body = AnthropicMessagesRequest(**kwargs) # type: ignore + + translated_body = litellm.AnthropicConfig().translate_anthropic_to_openai( + anthropic_message_request=request_body + ) + return translated_body + + def translate_completion_output_params(self, response: litellm.ModelResponse): + return super().translate_completion_output_params(response) + + def translate_completion_output_params_streaming(self): + return super().translate_completion_output_params_streaming() + + +anthropic_adapter = AnthropicAdapter() diff --git a/litellm/integrations/custom_logger.py b/litellm/integrations/custom_logger.py index da9826b9b5..4c3fa3a137 100644 --- a/litellm/integrations/custom_logger.py +++ b/litellm/integrations/custom_logger.py @@ -8,6 +8,8 @@ import dotenv from litellm.caching import DualCache from litellm.proxy._types import UserAPIKeyAuth +from litellm.types.llms.openai import ChatCompletionRequest +from litellm.types.utils import ModelResponse class CustomLogger: # https://docs.litellm.ai/docs/observability/custom_callback#callback-class @@ -55,6 +57,28 @@ class CustomLogger: # https://docs.litellm.ai/docs/observability/custom_callbac def pre_call_check(self, deployment: dict) -> Optional[dict]: pass + #### ADAPTERS #### Allow calling 100+ LLMs in custom format - https://github.com/BerriAI/litellm/pulls + + def translate_completion_input_params( + self, kwargs + ) -> Optional[ChatCompletionRequest]: + """ + Translates the input params, from the provider's native format to the litellm.completion() format. + """ + pass + + def translate_completion_output_params(self, response: ModelResponse): + """ + Translates the output params, from the OpenAI format to the custom format. + """ + pass + + def translate_completion_output_params_streaming(self): + """ + Translates the streaming chunk, from the OpenAI format to the custom format. + """ + pass + #### CALL HOOKS - proxy only #### """ Control the modify incoming / outgoung data before calling the model diff --git a/litellm/llms/anthropic.py b/litellm/llms/anthropic.py index a4521a7031..02e222b905 100644 --- a/litellm/llms/anthropic.py +++ b/litellm/llms/anthropic.py @@ -20,17 +20,36 @@ from litellm.llms.custom_httpx.http_handler import ( _get_httpx_client, ) from litellm.types.llms.anthropic import ( + AnthopicMessagesAssistantMessageParam, + AnthropicMessagesRequest, + AnthropicMessagesTool, AnthropicMessagesToolChoice, + AnthropicMessagesUserMessageParam, ContentBlockDelta, ContentBlockStart, MessageBlockDelta, MessageStartBlock, ) from litellm.types.llms.openai import ( + AllMessageValues, + ChatCompletionAssistantMessage, + ChatCompletionAssistantToolCall, + ChatCompletionImageObject, + ChatCompletionImageUrlObject, + ChatCompletionRequest, ChatCompletionResponseMessage, + ChatCompletionSystemMessage, + ChatCompletionTextObject, ChatCompletionToolCallChunk, ChatCompletionToolCallFunctionChunk, + ChatCompletionToolChoiceFunctionParam, + ChatCompletionToolChoiceObjectParam, + ChatCompletionToolChoiceValues, + ChatCompletionToolMessage, + ChatCompletionToolParam, + ChatCompletionToolParamFunctionChunk, ChatCompletionUsageBlock, + ChatCompletionUserMessage, ) from litellm.types.utils import GenericStreamingChunk from litellm.utils import CustomStreamWrapper, ModelResponse, Usage @@ -168,6 +187,210 @@ class AnthropicConfig: optional_params["top_p"] = value return optional_params + def translatable_anthropic_params(self) -> List: + """ + Which anthropic params, we need to translate to the openai format. + """ + return ["messages", "metadata", "system", "tool_choice", "tools"] + + def translate_anthropic_messages_to_openai( + self, + messages: List[ + Union[ + AnthropicMessagesUserMessageParam, + AnthopicMessagesAssistantMessageParam, + ] + ], + ) -> List: + new_messages: List[AllMessageValues] = [] + for m in messages: + user_message: Optional[ChatCompletionUserMessage] = None + tool_message_list: List[ChatCompletionToolMessage] = [] + ## USER MESSAGE ## + if m["role"] == "user": + ## translate user message + if isinstance(m["content"], str): + user_message = ChatCompletionUserMessage( + role="user", content=m["content"] + ) + elif isinstance(m["content"], list): + new_user_content_list: List[ + Union[ChatCompletionTextObject, ChatCompletionImageObject] + ] = [] + for content in m["content"]: + if content["type"] == "text": + text_obj = ChatCompletionTextObject( + type="text", text=content["text"] + ) + new_user_content_list.append(text_obj) + elif content["type"] == "image": + image_url = ChatCompletionImageUrlObject( + url=f"data:{content['type']};base64,{content['source']}" + ) + image_obj = ChatCompletionImageObject( + type="image_url", image_url=image_url + ) + + new_user_content_list.append(image_obj) + elif content["type"] == "tool_result": + if "content" not in content: + tool_result = ChatCompletionToolMessage( + role="tool", + tool_call_id=content["tool_use_id"], + content="", + ) + tool_message_list.append(tool_result) + elif isinstance(content["content"], str): + tool_result = ChatCompletionToolMessage( + role="tool", + tool_call_id=content["tool_use_id"], + content=content["content"], + ) + tool_message_list.append(tool_result) + elif isinstance(content["content"], list): + for c in content["content"]: + if c["type"] == "text": + tool_result = ChatCompletionToolMessage( + role="tool", + tool_call_id=content["tool_use_id"], + content=c["text"], + ) + tool_message_list.append(tool_result) + elif c["type"] == "image": + image_str = ( + f"data:{c['type']};base64,{c['source']}" + ) + tool_result = ChatCompletionToolMessage( + role="tool", + tool_call_id=content["tool_use_id"], + content=image_str, + ) + tool_message_list.append(tool_result) + + if user_message is not None: + new_messages.append(user_message) + + if len(tool_message_list) > 0: + new_messages.extend(tool_message_list) + + ## ASSISTANT MESSAGE ## + assistant_message_str: Optional[str] = None + tool_calls: List[ChatCompletionAssistantToolCall] = [] + if m["role"] == "assistant": + if isinstance(m["content"], str): + assistant_message_str = m["content"] + elif isinstance(m["content"], list): + for content in m["content"]: + if content["type"] == "text": + if assistant_message_str is None: + assistant_message_str = content["text"] + else: + assistant_message_str += content["text"] + elif content["type"] == "tool_use": + function_chunk = ChatCompletionToolCallFunctionChunk( + name=content["name"], + arguments=json.dumps(content["input"]), + ) + + tool_calls.append( + ChatCompletionAssistantToolCall( + id=content["id"], + type="function", + function=function_chunk, + ) + ) + + assistant_message = ChatCompletionAssistantMessage( + role="assistant", content=assistant_message_str, tool_calls=tool_calls + ) + new_messages.append(assistant_message) + + return new_messages + + def translate_anthropic_tool_choice_to_openai( + self, tool_choice: AnthropicMessagesToolChoice + ) -> ChatCompletionToolChoiceValues: + if tool_choice["type"] == "any": + return "required" + elif tool_choice["type"] == "auto": + return "auto" + elif tool_choice["type"] == "tool": + tc_function_param = ChatCompletionToolChoiceFunctionParam( + name=tool_choice.get("name", "") + ) + return ChatCompletionToolChoiceObjectParam( + type="function", function=tc_function_param + ) + else: + raise ValueError( + "Incompatible tool choice param submitted - {}".format(tool_choice) + ) + + def translate_anthropic_tools_to_openai( + self, tools: List[AnthropicMessagesTool] + ) -> List[ChatCompletionToolParam]: + new_tools: List[ChatCompletionToolParam] = [] + for tool in tools: + function_chunk = ChatCompletionToolParamFunctionChunk( + name=tool["name"], + parameters=tool["input_schema"], + ) + if "description" in tool: + function_chunk["description"] = tool["description"] + new_tools.append( + ChatCompletionToolParam(type="function", function=function_chunk) + ) + + return new_tools + + def translate_anthropic_to_openai( + self, anthropic_message_request: AnthropicMessagesRequest + ) -> ChatCompletionRequest: + """ + This is used by the beta Anthropic Adapter, for translating anthropic `/v1/messages` requests to the openai format. + """ + new_messages: List[AllMessageValues] = [] + + ## CONVERT ANTHROPIC MESSAGES TO OPENAI + new_messages = self.translate_anthropic_messages_to_openai( + messages=anthropic_message_request["messages"] + ) + ## ADD SYSTEM MESSAGE TO MESSAGES + if "system" in anthropic_message_request: + new_messages.insert( + 0, + ChatCompletionSystemMessage( + role="system", content=anthropic_message_request["system"] + ), + ) + + new_kwargs: ChatCompletionRequest = { + "model": anthropic_message_request["model"], + "messages": new_messages, + } + ## CONVERT METADATA (user_id) + if "metadata" in anthropic_message_request: + if "user_id" in anthropic_message_request["metadata"]: + new_kwargs["user"] = anthropic_message_request["metadata"]["user_id"] + + ## CONVERT TOOL CHOICE + if "tool_choice" in anthropic_message_request: + new_kwargs["tool_choice"] = self.translate_anthropic_tool_choice_to_openai( + tool_choice=anthropic_message_request["tool_choice"] + ) + ## CONVERT TOOLS + if "tools" in anthropic_message_request: + new_kwargs["tools"] = self.translate_anthropic_tools_to_openai( + tools=anthropic_message_request["tools"] + ) + + translatable_params = self.translatable_anthropic_params() + for k, v in anthropic_message_request.items(): + if k not in translatable_params: # pass remaining params as is + new_kwargs[k] = v # type: ignore + + return new_kwargs + # makes headers for API call def validate_environment(api_key, user_headers): diff --git a/litellm/main.py b/litellm/main.py index 43e6ad3fc7..bb203ae4a9 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -48,6 +48,7 @@ from litellm import ( # type: ignore get_litellm_params, get_optional_params, ) +from litellm.integrations.custom_logger import CustomLogger from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj from litellm.utils import ( CustomStreamWrapper, @@ -3943,6 +3944,33 @@ def text_completion( return text_completion_response +###### Adapter Completion ################ + + +def adapter_completion(*, adapter_id: str, **kwargs) -> Any: + translation_obj: Optional[CustomLogger] = None + for item in litellm.adapters: + if item["id"] == adapter_id: + translation_obj = item["adapter"] + + if translation_obj is None: + raise ValueError( + "No matching adapter given. Received 'adapter_id'={}, litellm.adapters={}".format( + adapter_id, litellm.adapters + ) + ) + + new_kwargs = translation_obj.translate_completion_input_params(kwargs=kwargs) + + response: ModelResponse = completion(**new_kwargs) # type: ignore + + translated_response = translation_obj.translate_completion_output_params( + response=response + ) + + return translated_response + + ##### Moderation ####################### diff --git a/litellm/proxy/_new_secret_config.yaml b/litellm/proxy/_new_secret_config.yaml index 8f8b7fda03..49db7c3787 100644 --- a/litellm/proxy/_new_secret_config.yaml +++ b/litellm/proxy/_new_secret_config.yaml @@ -2,17 +2,6 @@ model_list: - model_name: "*" litellm_params: model: "openai/*" - - model_name: gemini-1.5-flash - litellm_params: - model: gemini/gemini-1.5-flash - - model_name: whisper - litellm_params: - model: azure/azure-whisper - api_version: 2024-02-15-preview - api_base: os.environ/AZURE_EUROPE_API_BASE - api_key: os.environ/AZURE_EUROPE_API_KEY - model_info: - mode: audio_transcription diff --git a/litellm/tests/test_anthropic_completion.py b/litellm/tests/test_anthropic_completion.py index 674d090762..25d5823c32 100644 --- a/litellm/tests/test_anthropic_completion.py +++ b/litellm/tests/test_anthropic_completion.py @@ -1,4203 +1,35 @@ # What is this? ## Unit tests for Anthropic Adapter -# import asyncio -# import os -# import sys -# import traceback +import asyncio +import os +import sys +import traceback -# from dotenv import load_dotenv +from dotenv import load_dotenv -# load_dotenv() -# import io -# import os +load_dotenv() +import io +import os -# sys.path.insert( -# 0, os.path.abspath("../..") -# ) # Adds the parent directory to the system path -# from unittest.mock import MagicMock, patch +sys.path.insert( + 0, os.path.abspath("../..") +) # Adds the parent directory to the system path +from unittest.mock import MagicMock, patch -# import pytest +import pytest -# import litellm -# from litellm import ( -# RateLimitError, -# TextCompletionResponse, -# atext_completion, -# completion, -# completion_cost, -# embedding, -# text_completion, -# ) +import litellm +from litellm import adapter_completion +from litellm.adapters.anthropic_adapter import anthropic_adapter -# litellm.num_retries = 3 +def test_anthropic_completion(): + litellm.adapters = [{"id": "anthropic", "adapter": anthropic_adapter}] -# token_prompt = [ -# [ -# 32, -# 2043, -# 32, -# 329, -# 4585, -# 262, -# 1644, -# 14, -# 34, -# 3705, -# 319, -# 616, -# 47551, -# 30, -# 930, -# 19219, -# 284, -# 1949, -# 284, -# 787, -# 428, -# 355, -# 1790, -# 355, -# 1744, -# 981, -# 1390, -# 3307, -# 2622, -# 13, -# 220, -# 198, -# 198, -# 40, -# 423, -# 587, -# 351, -# 616, -# 41668, -# 32682, -# 329, -# 718, -# 812, -# 13, -# 376, -# 666, -# 32682, -# 468, -# 281, -# 4697, -# 6621, -# 11, -# 356, -# 1183, -# 869, -# 607, -# 25737, -# 11, -# 508, -# 318, -# 2579, -# 290, -# 468, -# 257, -# 642, -# 614, -# 1468, -# 1200, -# 13, -# 314, -# 373, -# 612, -# 262, -# 1110, -# 25737, -# 373, -# 287, -# 4827, -# 290, -# 14801, -# 373, -# 4642, -# 11, -# 673, -# 318, -# 616, -# 41803, -# 13, -# 2399, -# 2104, -# 1641, -# 468, -# 6412, -# 284, -# 502, -# 355, -# 465, -# 38074, -# 494, -# 1201, -# 1110, -# 352, -# 13, -# 314, -# 716, -# 407, -# 2910, -# 475, -# 356, -# 389, -# 1641, -# 11, -# 673, -# 3848, -# 502, -# 38074, -# 494, -# 290, -# 356, -# 423, -# 3993, -# 13801, -# 11, -# 26626, -# 11864, -# 11, -# 3503, -# 13, -# 220, -# 198, -# 198, -# 17, -# 812, -# 2084, -# 25737, -# 373, -# 287, -# 14321, -# 422, -# 2563, -# 13230, -# 13, -# 21051, -# 11, -# 2356, -# 25542, -# 11, -# 290, -# 47482, -# 897, -# 547, -# 607, -# 1517, -# 13, -# 1375, -# 550, -# 257, -# 5110, -# 14608, -# 290, -# 262, -# 1641, -# 7723, -# 1637, -# 284, -# 3758, -# 607, -# 284, -# 14321, -# 290, -# 477, -# 8389, -# 257, -# 7269, -# 284, -# 1011, -# 1337, -# 286, -# 14801, -# 13, -# 383, -# 5156, -# 338, -# 9955, -# 11, -# 25737, -# 338, -# 13850, -# 11, -# 468, -# 257, -# 47973, -# 14, -# 9979, -# 2762, -# 1693, -# 290, -# 373, -# 503, -# 286, -# 3240, -# 329, -# 362, -# 1933, -# 523, -# 339, -# 2492, -# 470, -# 612, -# 329, -# 477, -# 286, -# 428, -# 13, -# 220, -# 198, -# 198, -# 3347, -# 10667, -# 5223, -# 503, -# 706, -# 513, -# 1528, -# 11, -# 23630, -# 673, -# 373, -# 366, -# 38125, -# 290, -# 655, -# 2622, -# 257, -# 3338, -# 8399, -# 1911, -# 314, -# 2298, -# 607, -# 510, -# 11, -# 1011, -# 607, -# 284, -# 607, -# 2156, -# 11, -# 290, -# 673, -# 3393, -# 2925, -# 284, -# 7523, -# 20349, -# 290, -# 4144, -# 257, -# 6099, -# 13, -# 314, -# 836, -# 470, -# 892, -# 20349, -# 318, -# 257, -# 2563, -# 290, -# 716, -# 845, -# 386, -# 12, -# 66, -# 1236, -# 571, -# 292, -# 3584, -# 314, -# 836, -# 470, -# 7523, -# 11, -# 475, -# 326, -# 373, -# 407, -# 5035, -# 6402, -# 314, -# 655, -# 6497, -# 607, -# 510, -# 422, -# 14321, -# 13, -# 220, -# 198, -# 198, -# 32, -# 1285, -# 1568, -# 673, -# 373, -# 6294, -# 329, -# 3013, -# 24707, -# 287, -# 262, -# 12436, -# 1539, -# 819, -# 5722, -# 329, -# 852, -# 604, -# 1933, -# 2739, -# 11, -# 39398, -# 607, -# 1097, -# 5059, -# 981, -# 1029, -# 290, -# 318, -# 852, -# 16334, -# 329, -# 720, -# 1120, -# 74, -# 422, -# 15228, -# 278, -# 656, -# 257, -# 2156, -# 11, -# 290, -# 373, -# 12165, -# 503, -# 286, -# 376, -# 666, -# 32682, -# 338, -# 584, -# 6621, -# 338, -# 2156, -# 329, -# 32012, -# 262, -# 14595, -# 373, -# 30601, -# 510, -# 290, -# 2491, -# 357, -# 7091, -# 373, -# 1029, -# 8, -# 290, -# 262, -# 2104, -# 34624, -# 373, -# 46432, -# 1268, -# 1961, -# 422, -# 1660, -# 2465, -# 780, -# 8168, -# 2073, -# 1625, -# 1363, -# 329, -# 807, -# 2250, -# 13, -# 720, -# 1238, -# 11, -# 830, -# 286, -# 2465, -# 290, -# 5875, -# 5770, -# 511, -# 2156, -# 5096, -# 5017, -# 340, -# 13, -# 220, -# 198, -# 198, -# 2504, -# 373, -# 477, -# 938, -# 614, -# 13, -# 1119, -# 1053, -# 587, -# 287, -# 511, -# 649, -# 2156, -# 319, -# 511, -# 898, -# 329, -# 546, -# 718, -# 1933, -# 13, -# 554, -# 3389, -# 673, -# 1444, -# 34020, -# 290, -# 531, -# 511, -# 8744, -# 373, -# 4423, -# 572, -# 780, -# 673, -# 1422, -# 470, -# 423, -# 262, -# 1637, -# 780, -# 41646, -# 338, -# 37751, -# 1392, -# 32621, -# 510, -# 290, -# 1422, -# 470, -# 467, -# 832, -# 13, -# 679, -# 3432, -# 511, -# 2739, -# 8744, -# 9024, -# 492, -# 257, -# 2472, -# 286, -# 720, -# 4059, -# 13, -# 314, -# 1807, -# 340, -# 373, -# 13678, -# 306, -# 5789, -# 475, -# 4030, -# 616, -# 5422, -# 4423, -# 13, -# 1439, -# 468, -# 587, -# 5897, -# 1201, -# 13, -# 220, -# 198, -# 198, -# 7571, -# 2745, -# 2084, -# 11, -# 673, -# 1965, -# 502, -# 284, -# 8804, -# 617, -# 1637, -# 284, -# 651, -# 38464, -# 329, -# 399, -# 8535, -# 13, -# 3226, -# 1781, -# 314, -# 1101, -# 407, -# 1016, -# 284, -# 1309, -# 616, -# 41803, -# 393, -# 6621, -# 467, -# 14720, -# 11, -# 645, -# 2300, -# 644, -# 318, -# 1016, -# 319, -# 4306, -# 11, -# 523, -# 314, -# 910, -# 314, -# 1183, -# 307, -# 625, -# 379, -# 642, -# 13, -# 314, -# 1392, -# 572, -# 670, -# 1903, -# 290, -# 651, -# 612, -# 379, -# 362, -# 25, -# 2231, -# 13, -# 314, -# 1282, -# 287, -# 1262, -# 616, -# 13952, -# 1994, -# 11, -# 2513, -# 287, -# 11, -# 766, -# 399, -# 8535, -# 2712, -# 351, -# 36062, -# 287, -# 262, -# 5228, -# 11, -# 25737, -# 3804, -# 503, -# 319, -# 262, -# 18507, -# 11, -# 290, -# 16914, -# 319, -# 262, -# 6891, -# 3084, -# 13, -# 8989, -# 2406, -# 422, -# 257, -# 1641, -# 47655, -# 351, -# 13230, -# 11, -# 314, -# 760, -# 644, -# 16914, -# 3073, -# 588, -# 13, -# 314, -# 836, -# 470, -# 760, -# 703, -# 881, -# 340, -# 373, -# 11, -# 475, -# 314, -# 714, -# 423, -# 23529, -# 276, -# 340, -# 510, -# 290, -# 5901, -# 616, -# 18057, -# 351, -# 340, -# 13, -# 314, -# 6810, -# 19772, -# 2024, -# 8347, -# 287, -# 262, -# 2166, -# 2119, -# 290, -# 399, -# 8535, -# 373, -# 287, -# 3294, -# 11685, -# 286, -# 8242, -# 290, -# 607, -# 7374, -# 15224, -# 13, -# 383, -# 4894, -# 373, -# 572, -# 13, -# 383, -# 2156, -# 373, -# 3863, -# 2319, -# 37, -# 532, -# 340, -# 373, -# 1542, -# 2354, -# 13, -# 220, -# 198, -# 198, -# 40, -# 1718, -# 399, -# 8535, -# 284, -# 616, -# 1097, -# 11, -# 290, -# 1444, -# 16679, -# 329, -# 281, -# 22536, -# 355, -# 314, -# 373, -# 12008, -# 25737, -# 373, -# 14904, -# 2752, -# 13, -# 220, -# 314, -# 1422, -# 470, -# 765, -# 284, -# 10436, -# 290, -# 22601, -# 503, -# 399, -# 8535, -# 523, -# 314, -# 9658, -# 287, -# 262, -# 1097, -# 290, -# 1309, -# 607, -# 711, -# 319, -# 616, -# 3072, -# 1566, -# 262, -# 22536, -# 5284, -# 13, -# 3226, -# 1781, -# 1644, -# 290, -# 32084, -# 3751, -# 510, -# 355, -# 880, -# 13, -# 314, -# 4893, -# 262, -# 3074, -# 290, -# 780, -# 399, -# 8535, -# 338, -# 9955, -# 318, -# 503, -# 286, -# 3240, -# 1762, -# 11, -# 34020, -# 14, -# 44, -# 4146, -# 547, -# 1444, -# 13, -# 1649, -# 484, -# 5284, -# 484, -# 547, -# 5897, -# 290, -# 4692, -# 11, -# 1422, -# 470, -# 1107, -# 1561, -# 11, -# 1718, -# 399, -# 8535, -# 11, -# 290, -# 1297, -# 502, -# 284, -# 467, -# 1363, -# 13, -# 220, -# 198, -# 198, -# 2025, -# 1711, -# 1568, -# 314, -# 651, -# 1363, -# 290, -# 41668, -# 32682, -# 7893, -# 502, -# 644, -# 314, -# 1053, -# 1760, -# 13, -# 314, -# 4893, -# 2279, -# 284, -# 683, -# 290, -# 477, -# 339, -# 550, -# 373, -# 8993, -# 329, -# 502, -# 13, -# 18626, -# 262, -# 2104, -# 1641, -# 1541, -# 2993, -# 290, -# 547, -# 28674, -# 379, -# 502, -# 329, -# 644, -# 314, -# 550, -# 1760, -# 13, -# 18626, -# 314, -# 373, -# 366, -# 448, -# 286, -# 1627, -# 290, -# 8531, -# 1, -# 780, -# 314, -# 1444, -# 16679, -# 878, -# 4379, -# 611, -# 673, -# 373, -# 1682, -# 31245, -# 6, -# 278, -# 780, -# 340, -# 2900, -# 503, -# 673, -# 373, -# 655, -# 47583, -# 503, -# 422, -# 262, -# 16914, -# 13, -# 775, -# 8350, -# 329, -# 2250, -# 290, -# 314, -# 1364, -# 290, -# 3377, -# 262, -# 1755, -# 379, -# 616, -# 1266, -# 1545, -# 338, -# 2156, -# 290, -# 16896, -# 477, -# 1755, -# 13, -# 314, -# 3521, -# 470, -# 5412, -# 340, -# 477, -# 523, -# 314, -# 2900, -# 616, -# 3072, -# 572, -# 290, -# 3088, -# 284, -# 8960, -# 290, -# 655, -# 9480, -# 866, -# 13, -# 2011, -# 1266, -# 1545, -# 373, -# 510, -# 477, -# 1755, -# 351, -# 502, -# 11, -# 5149, -# 502, -# 314, -# 750, -# 2147, -# 2642, -# 11, -# 290, -# 314, -# 1101, -# 8788, -# 13, -# 220, -# 198, -# 198, -# 40, -# 1210, -# 616, -# 3072, -# 319, -# 290, -# 314, -# 550, -# 6135, -# 13399, -# 14, -# 37348, -# 1095, -# 13, -# 31515, -# 11, -# 34020, -# 11, -# 47551, -# 11, -# 41668, -# 32682, -# 11, -# 290, -# 511, -# 7083, -# 1641, -# 1866, -# 24630, -# 502, -# 13, -# 1119, -# 389, -# 2282, -# 314, -# 20484, -# 607, -# 1204, -# 11, -# 20484, -# 399, -# 8535, -# 338, -# 1204, -# 11, -# 925, -# 2279, -# 517, -# 8253, -# 621, -# 340, -# 2622, -# 284, -# 307, -# 11, -# 925, -# 340, -# 1171, -# 618, -# 340, -# 373, -# 257, -# 366, -# 17989, -# 14669, -# 1600, -# 290, -# 20484, -# 25737, -# 338, -# 8395, -# 286, -# 1683, -# 1972, -# 20750, -# 393, -# 1719, -# 10804, -# 286, -# 607, -# 1200, -# 757, -# 11, -# 4844, -# 286, -# 606, -# 1683, -# 765, -# 284, -# 766, -# 502, -# 757, -# 290, -# 314, -# 481, -# 1239, -# 766, -# 616, -# 41803, -# 757, -# 11, -# 290, -# 484, -# 765, -# 502, -# 284, -# 1414, -# 329, -# 25737, -# 338, -# 7356, -# 6314, -# 290, -# 20889, -# 502, -# 329, -# 262, -# 32084, -# 1339, -# 290, -# 7016, -# 12616, -# 13, -# 198, -# 198, -# 40, -# 716, -# 635, -# 783, -# 2060, -# 13, -# 1406, -# 319, -# 1353, -# 286, -# 6078, -# 616, -# 1266, -# 1545, -# 286, -# 838, -# 812, -# 357, -# 69, -# 666, -# 32682, -# 828, -# 314, -# 481, -# 4425, -# 616, -# 7962, -# 314, -# 550, -# 351, -# 683, -# 11, -# 644, -# 314, -# 3177, -# 616, -# 1641, -# 11, -# 290, -# 616, -# 399, -# 8535, -# 13, -# 198, -# 198, -# 40, -# 4988, -# 1254, -# 12361, -# 13, -# 314, -# 423, -# 12361, -# 9751, -# 284, -# 262, -# 966, -# 810, -# 314, -# 1101, -# 7960, -# 2130, -# 318, -# 1016, -# 284, -# 1282, -# 651, -# 366, -# 260, -# 18674, -# 1, -# 319, -# 502, -# 329, -# 644, -# 314, -# 750, -# 13, -# 314, -# 460, -# 470, -# 4483, -# 13, -# 314, -# 423, -# 2626, -# 767, -# 8059, -# 422, -# 340, -# 13, -# 314, -# 1101, -# 407, -# 11029, -# 329, -# 7510, -# 13, -# 314, -# 423, -# 11668, -# 739, -# 616, -# 2951, -# 13, -# 314, -# 1053, -# 550, -# 807, -# 50082, -# 12, -# 12545, -# 287, -# 734, -# 2745, -# 13, -# 1629, -# 717, -# 314, -# 2936, -# 523, -# 6563, -# 287, -# 616, -# 2551, -# 475, -# 355, -# 262, -# 1528, -# 467, -# 416, -# 314, -# 1101, -# 3612, -# 3863, -# 484, -# 547, -# 826, -# 290, -# 314, -# 815, -# 423, -# 10667, -# 319, -# 607, -# 878, -# 4585, -# 16679, -# 290, -# 852, -# 5306, -# 3019, -# 992, -# 13, -# 314, -# 836, -# 470, -# 1337, -# 546, -# 25737, -# 7471, -# 11, -# 475, -# 314, -# 750, -# 18344, -# 257, -# 642, -# 614, -# 1468, -# 1200, -# 1497, -# 422, -# 607, -# 3397, -# 290, -# 314, -# 1254, -# 12361, -# 546, -# 340, -# 13, -# 314, -# 760, -# 2130, -# 287, -# 262, -# 1641, -# 481, -# 1011, -# 607, -# 287, -# 11, -# 475, -# 340, -# 338, -# 1239, -# 588, -# 852, -# 351, -# 534, -# 3397, -# 13, -# 1375, -# 481, -# 1663, -# 510, -# 20315, -# 278, -# 502, -# 329, -# 340, -# 290, -# 477, -# 314, -# 1053, -# 1683, -# 1760, -# 318, -# 1842, -# 607, -# 355, -# 616, -# 898, -# 13, -# 220, -# 198, -# 198, -# 22367, -# 11, -# 317, -# 2043, -# 32, -# 30, -# 4222, -# 1037, -# 502, -# 13, -# 383, -# 14934, -# 318, -# 6600, -# 502, -# 6776, -# 13, -# 220, -# 198, -# 24361, -# 25, -# 1148, -# 428, -# 2642, -# 30, -# 198, -# 33706, -# 25, -# 645, -# ], -# [ -# 32, -# 2043, -# 32, -# 329, -# 4585, -# 262, -# 1644, -# 14, -# 34, -# 3705, -# 319, -# 616, -# 47551, -# 30, -# 930, -# 19219, -# 284, -# 1949, -# 284, -# 787, -# 428, -# 355, -# 1790, -# 355, -# 1744, -# 981, -# 1390, -# 3307, -# 2622, -# 13, -# 220, -# 198, -# 198, -# 40, -# 423, -# 587, -# 351, -# 616, -# 41668, -# 32682, -# 329, -# 718, -# 812, -# 13, -# 376, -# 666, -# 32682, -# 468, -# 281, -# 4697, -# 6621, -# 11, -# 356, -# 1183, -# 869, -# 607, -# 25737, -# 11, -# 508, -# 318, -# 2579, -# 290, -# 468, -# 257, -# 642, -# 614, -# 1468, -# 1200, -# 13, -# 314, -# 373, -# 612, -# 262, -# 1110, -# 25737, -# 373, -# 287, -# 4827, -# 290, -# 14801, -# 373, -# 4642, -# 11, -# 673, -# 318, -# 616, -# 41803, -# 13, -# 2399, -# 2104, -# 1641, -# 468, -# 6412, -# 284, -# 502, -# 355, -# 465, -# 38074, -# 494, -# 1201, -# 1110, -# 352, -# 13, -# 314, -# 716, -# 407, -# 2910, -# 475, -# 356, -# 389, -# 1641, -# 11, -# 673, -# 3848, -# 502, -# 38074, -# 494, -# 290, -# 356, -# 423, -# 3993, -# 13801, -# 11, -# 26626, -# 11864, -# 11, -# 3503, -# 13, -# 220, -# 198, -# 198, -# 17, -# 812, -# 2084, -# 25737, -# 373, -# 287, -# 14321, -# 422, -# 2563, -# 13230, -# 13, -# 21051, -# 11, -# 2356, -# 25542, -# 11, -# 290, -# 47482, -# 897, -# 547, -# 607, -# 1517, -# 13, -# 1375, -# 550, -# 257, -# 5110, -# 14608, -# 290, -# 262, -# 1641, -# 7723, -# 1637, -# 284, -# 3758, -# 607, -# 284, -# 14321, -# 290, -# 477, -# 8389, -# 257, -# 7269, -# 284, -# 1011, -# 1337, -# 286, -# 14801, -# 13, -# 383, -# 5156, -# 338, -# 9955, -# 11, -# 25737, -# 338, -# 13850, -# 11, -# 468, -# 257, -# 47973, -# 14, -# 9979, -# 2762, -# 1693, -# 290, -# 373, -# 503, -# 286, -# 3240, -# 329, -# 362, -# 1933, -# 523, -# 339, -# 2492, -# 470, -# 612, -# 329, -# 477, -# 286, -# 428, -# 13, -# 220, -# 198, -# 198, -# 3347, -# 10667, -# 5223, -# 503, -# 706, -# 513, -# 1528, -# 11, -# 23630, -# 673, -# 373, -# 366, -# 38125, -# 290, -# 655, -# 2622, -# 257, -# 3338, -# 8399, -# 1911, -# 314, -# 2298, -# 607, -# 510, -# 11, -# 1011, -# 607, -# 284, -# 607, -# 2156, -# 11, -# 290, -# 673, -# 3393, -# 2925, -# 284, -# 7523, -# 20349, -# 290, -# 4144, -# 257, -# 6099, -# 13, -# 314, -# 836, -# 470, -# 892, -# 20349, -# 318, -# 257, -# 2563, -# 290, -# 716, -# 845, -# 386, -# 12, -# 66, -# 1236, -# 571, -# 292, -# 3584, -# 314, -# 836, -# 470, -# 7523, -# 11, -# 475, -# 326, -# 373, -# 407, -# 5035, -# 6402, -# 314, -# 655, -# 6497, -# 607, -# 510, -# 422, -# 14321, -# 13, -# 220, -# 198, -# 198, -# 32, -# 1285, -# 1568, -# 673, -# 373, -# 6294, -# 329, -# 3013, -# 24707, -# 287, -# 262, -# 12436, -# 1539, -# 819, -# 5722, -# 329, -# 852, -# 604, -# 1933, -# 2739, -# 11, -# 39398, -# 607, -# 1097, -# 5059, -# 981, -# 1029, -# 290, -# 318, -# 852, -# 16334, -# 329, -# 720, -# 1120, -# 74, -# 422, -# 15228, -# 278, -# 656, -# 257, -# 2156, -# 11, -# 290, -# 373, -# 12165, -# 503, -# 286, -# 376, -# 666, -# 32682, -# 338, -# 584, -# 6621, -# 338, -# 2156, -# 329, -# 32012, -# 262, -# 14595, -# 373, -# 30601, -# 510, -# 290, -# 2491, -# 357, -# 7091, -# 373, -# 1029, -# 8, -# 290, -# 262, -# 2104, -# 34624, -# 373, -# 46432, -# 1268, -# 1961, -# 422, -# 1660, -# 2465, -# 780, -# 8168, -# 2073, -# 1625, -# 1363, -# 329, -# 807, -# 2250, -# 13, -# 720, -# 1238, -# 11, -# 830, -# 286, -# 2465, -# 290, -# 5875, -# 5770, -# 511, -# 2156, -# 5096, -# 5017, -# 340, -# 13, -# 220, -# 198, -# 198, -# 2504, -# 373, -# 477, -# 938, -# 614, -# 13, -# 1119, -# 1053, -# 587, -# 287, -# 511, -# 649, -# 2156, -# 319, -# 511, -# 898, -# 329, -# 546, -# 718, -# 1933, -# 13, -# 554, -# 3389, -# 673, -# 1444, -# 34020, -# 290, -# 531, -# 511, -# 8744, -# 373, -# 4423, -# 572, -# 780, -# 673, -# 1422, -# 470, -# 423, -# 262, -# 1637, -# 780, -# 41646, -# 338, -# 37751, -# 1392, -# 32621, -# 510, -# 290, -# 1422, -# 470, -# 467, -# 832, -# 13, -# 679, -# 3432, -# 511, -# 2739, -# 8744, -# 9024, -# 492, -# 257, -# 2472, -# 286, -# 720, -# 4059, -# 13, -# 314, -# 1807, -# 340, -# 373, -# 13678, -# 306, -# 5789, -# 475, -# 4030, -# 616, -# 5422, -# 4423, -# 13, -# 1439, -# 468, -# 587, -# 5897, -# 1201, -# 13, -# 220, -# 198, -# 198, -# 7571, -# 2745, -# 2084, -# 11, -# 673, -# 1965, -# 502, -# 284, -# 8804, -# 617, -# 1637, -# 284, -# 651, -# 38464, -# 329, -# 399, -# 8535, -# 13, -# 3226, -# 1781, -# 314, -# 1101, -# 407, -# 1016, -# 284, -# 1309, -# 616, -# 41803, -# 393, -# 6621, -# 467, -# 14720, -# 11, -# 645, -# 2300, -# 644, -# 318, -# 1016, -# 319, -# 4306, -# 11, -# 523, -# 314, -# 910, -# 314, -# 1183, -# 307, -# 625, -# 379, -# 642, -# 13, -# 314, -# 1392, -# 572, -# 670, -# 1903, -# 290, -# 651, -# 612, -# 379, -# 362, -# 25, -# 2231, -# 13, -# 314, -# 1282, -# 287, -# 1262, -# 616, -# 13952, -# 1994, -# 11, -# 2513, -# 287, -# 11, -# 766, -# 399, -# 8535, -# 2712, -# 351, -# 36062, -# 287, -# 262, -# 5228, -# 11, -# 25737, -# 3804, -# 503, -# 319, -# 262, -# 18507, -# 11, -# 290, -# 16914, -# 319, -# 262, -# 6891, -# 3084, -# 13, -# 8989, -# 2406, -# 422, -# 257, -# 1641, -# 47655, -# 351, -# 13230, -# 11, -# 314, -# 760, -# 644, -# 16914, -# 3073, -# 588, -# 13, -# 314, -# 836, -# 470, -# 760, -# 703, -# 881, -# 340, -# 373, -# 11, -# 475, -# 314, -# 714, -# 423, -# 23529, -# 276, -# 340, -# 510, -# 290, -# 5901, -# 616, -# 18057, -# 351, -# 340, -# 13, -# 314, -# 6810, -# 19772, -# 2024, -# 8347, -# 287, -# 262, -# 2166, -# 2119, -# 290, -# 399, -# 8535, -# 373, -# 287, -# 3294, -# 11685, -# 286, -# 8242, -# 290, -# 607, -# 7374, -# 15224, -# 13, -# 383, -# 4894, -# 373, -# 572, -# 13, -# 383, -# 2156, -# 373, -# 3863, -# 2319, -# 37, -# 532, -# 340, -# 373, -# 1542, -# 2354, -# 13, -# 220, -# 198, -# 198, -# 40, -# 1718, -# 399, -# 8535, -# 284, -# 616, -# 1097, -# 11, -# 290, -# 1444, -# 16679, -# 329, -# 281, -# 22536, -# 355, -# 314, -# 373, -# 12008, -# 25737, -# 373, -# 14904, -# 2752, -# 13, -# 220, -# 314, -# 1422, -# 470, -# 765, -# 284, -# 10436, -# 290, -# 22601, -# 503, -# 399, -# 8535, -# 523, -# 314, -# 9658, -# 287, -# 262, -# 1097, -# 290, -# 1309, -# 607, -# 711, -# 319, -# 616, -# 3072, -# 1566, -# 262, -# 22536, -# 5284, -# 13, -# 3226, -# 1781, -# 1644, -# 290, -# 32084, -# 3751, -# 510, -# 355, -# 880, -# 13, -# 314, -# 4893, -# 262, -# 3074, -# 290, -# 780, -# 399, -# 8535, -# 338, -# 9955, -# 318, -# 503, -# 286, -# 3240, -# 1762, -# 11, -# 34020, -# 14, -# 44, -# 4146, -# 547, -# 1444, -# 13, -# 1649, -# 484, -# 5284, -# 484, -# 547, -# 5897, -# 290, -# 4692, -# 11, -# 1422, -# 470, -# 1107, -# 1561, -# 11, -# 1718, -# 399, -# 8535, -# 11, -# 290, -# 1297, -# 502, -# 284, -# 467, -# 1363, -# 13, -# 220, -# 198, -# 198, -# 2025, -# 1711, -# 1568, -# 314, -# 651, -# 1363, -# 290, -# 41668, -# 32682, -# 7893, -# 502, -# 644, -# 314, -# 1053, -# 1760, -# 13, -# 314, -# 4893, -# 2279, -# 284, -# 683, -# 290, -# 477, -# 339, -# 550, -# 373, -# 8993, -# 329, -# 502, -# 13, -# 18626, -# 262, -# 2104, -# 1641, -# 1541, -# 2993, -# 290, -# 547, -# 28674, -# 379, -# 502, -# 329, -# 644, -# 314, -# 550, -# 1760, -# 13, -# 18626, -# 314, -# 373, -# 366, -# 448, -# 286, -# 1627, -# 290, -# 8531, -# 1, -# 780, -# 314, -# 1444, -# 16679, -# 878, -# 4379, -# 611, -# 673, -# 373, -# 1682, -# 31245, -# 6, -# 278, -# 780, -# 340, -# 2900, -# 503, -# 673, -# 373, -# 655, -# 47583, -# 503, -# 422, -# 262, -# 16914, -# 13, -# 775, -# 8350, -# 329, -# 2250, -# 290, -# 314, -# 1364, -# 290, -# 3377, -# 262, -# 1755, -# 379, -# 616, -# 1266, -# 1545, -# 338, -# 2156, -# 290, -# 16896, -# 477, -# 1755, -# 13, -# 314, -# 3521, -# 470, -# 5412, -# 340, -# 477, -# 523, -# 314, -# 2900, -# 616, -# 3072, -# 572, -# 290, -# 3088, -# 284, -# 8960, -# 290, -# 655, -# 9480, -# 866, -# 13, -# 2011, -# 1266, -# 1545, -# 373, -# 510, -# 477, -# 1755, -# 351, -# 502, -# 11, -# 5149, -# 502, -# 314, -# 750, -# 2147, -# 2642, -# 11, -# 290, -# 314, -# 1101, -# 8788, -# 13, -# 220, -# 198, -# 198, -# 40, -# 1210, -# 616, -# 3072, -# 319, -# 290, -# 314, -# 550, -# 6135, -# 13399, -# 14, -# 37348, -# 1095, -# 13, -# 31515, -# 11, -# 34020, -# 11, -# 47551, -# 11, -# 41668, -# 32682, -# 11, -# 290, -# 511, -# 7083, -# 1641, -# 1866, -# 24630, -# 502, -# 13, -# 1119, -# 389, -# 2282, -# 314, -# 20484, -# 607, -# 1204, -# 11, -# 20484, -# 399, -# 8535, -# 338, -# 1204, -# 11, -# 925, -# 2279, -# 517, -# 8253, -# 621, -# 340, -# 2622, -# 284, -# 307, -# 11, -# 925, -# 340, -# 1171, -# 618, -# 340, -# 373, -# 257, -# 366, -# 17989, -# 14669, -# 1600, -# 290, -# 20484, -# 25737, -# 338, -# 8395, -# 286, -# 1683, -# 1972, -# 20750, -# 393, -# 1719, -# 10804, -# 286, -# 607, -# 1200, -# 757, -# 11, -# 4844, -# 286, -# 606, -# 1683, -# 765, -# 284, -# 766, -# 502, -# 757, -# 290, -# 314, -# 481, -# 1239, -# 766, -# 616, -# 41803, -# 757, -# 11, -# 290, -# 484, -# 765, -# 502, -# 284, -# 1414, -# 329, -# 25737, -# 338, -# 7356, -# 6314, -# 290, -# 20889, -# 502, -# 329, -# 262, -# 32084, -# 1339, -# 290, -# 7016, -# 12616, -# 13, -# 198, -# 198, -# 40, -# 716, -# 635, -# 783, -# 2060, -# 13, -# 1406, -# 319, -# 1353, -# 286, -# 6078, -# 616, -# 1266, -# 1545, -# 286, -# 838, -# 812, -# 357, -# 69, -# 666, -# 32682, -# 828, -# 314, -# 481, -# 4425, -# 616, -# 7962, -# 314, -# 550, -# 351, -# 683, -# 11, -# 644, -# 314, -# 3177, -# 616, -# 1641, -# 11, -# 290, -# 616, -# 399, -# 8535, -# 13, -# 198, -# 198, -# 40, -# 4988, -# 1254, -# 12361, -# 13, -# 314, -# 423, -# 12361, -# 9751, -# 284, -# 262, -# 966, -# 810, -# 314, -# 1101, -# 7960, -# 2130, -# 318, -# 1016, -# 284, -# 1282, -# 651, -# 366, -# 260, -# 18674, -# 1, -# 319, -# 502, -# 329, -# 644, -# 314, -# 750, -# 13, -# 314, -# 460, -# 470, -# 4483, -# 13, -# 314, -# 423, -# 2626, -# 767, -# 8059, -# 422, -# 340, -# 13, -# 314, -# 1101, -# 407, -# 11029, -# 329, -# 7510, -# 13, -# 314, -# 423, -# 11668, -# 739, -# 616, -# 2951, -# 13, -# 314, -# 1053, -# 550, -# 807, -# 50082, -# 12, -# 12545, -# 287, -# 734, -# 2745, -# 13, -# 1629, -# 717, -# 314, -# 2936, -# 523, -# 6563, -# 287, -# 616, -# 2551, -# 475, -# 355, -# 262, -# 1528, -# 467, -# 416, -# 314, -# 1101, -# 3612, -# 3863, -# 484, -# 547, -# 826, -# 290, -# 314, -# 815, -# 423, -# 10667, -# 319, -# 607, -# 878, -# 4585, -# 16679, -# 290, -# 852, -# 5306, -# 3019, -# 992, -# 13, -# 314, -# 836, -# 470, -# 1337, -# 546, -# 25737, -# 7471, -# 11, -# 475, -# 314, -# 750, -# 18344, -# 257, -# 642, -# 614, -# 1468, -# 1200, -# 1497, -# 422, -# 607, -# 3397, -# 290, -# 314, -# 1254, -# 12361, -# 546, -# 340, -# 13, -# 314, -# 760, -# 2130, -# 287, -# 262, -# 1641, -# 481, -# 1011, -# 607, -# 287, -# 11, -# 475, -# 340, -# 338, -# 1239, -# 588, -# 852, -# 351, -# 534, -# 3397, -# 13, -# 1375, -# 481, -# 1663, -# 510, -# 20315, -# 278, -# 502, -# 329, -# 340, -# 290, -# 477, -# 314, -# 1053, -# 1683, -# 1760, -# 318, -# 1842, -# 607, -# 355, -# 616, -# 898, -# 13, -# 220, -# 198, -# 198, -# 22367, -# 11, -# 317, -# 2043, -# 32, -# 30, -# 4222, -# 1037, -# 502, -# 13, -# 383, -# 14934, -# 318, -# 6600, -# 502, -# 6776, -# 13, -# 220, -# 198, -# 24361, -# 25, -# 1148, -# 428, -# 2642, -# 30, -# 198, -# 33706, -# 25, -# 3763, -# ], -# ] + messages = [{"role": "user", "content": "Hey, how's it going?"}] + response = adapter_completion( + model="gpt-3.5-turbo", messages=messages, adapter_id="anthropic" + ) - -# def test_unit_test_text_completion_object(): -# openai_object = { -# "id": "cmpl-99y7B2svVoRWe1xd7UFRmeGjZrFSh", -# "choices": [ -# { -# "finish_reason": "length", -# "index": 0, -# "logprobs": { -# "text_offset": [101], -# "token_logprobs": [-0.00023488728], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.00023488728, -# "1": -8.375235, -# "zero": -14.101797, -# "__": -14.554922, -# "00": -14.98461, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 1, -# "logprobs": { -# "text_offset": [116], -# "token_logprobs": [-0.013745008], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.013745008, -# "1": -4.294995, -# "00": -12.287183, -# "2": -12.771558, -# "3": -14.013745, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 2, -# "logprobs": { -# "text_offset": [108], -# "token_logprobs": [-3.655073e-5], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -3.655073e-5, -# "1": -10.656286, -# "__": -11.789099, -# "false": -12.984411, -# "00": -14.039099, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 3, -# "logprobs": { -# "text_offset": [106], -# "token_logprobs": [-0.1345946], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.1345946, -# "1": -2.0720947, -# "2": -12.798657, -# "false": -13.970532, -# "00": -14.27522, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 4, -# "logprobs": { -# "text_offset": [95], -# "token_logprobs": [-0.10491652], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.10491652, -# "1": -2.3236666, -# "2": -7.0111666, -# "3": -7.987729, -# "4": -9.050229, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 5, -# "logprobs": { -# "text_offset": [121], -# "token_logprobs": [-0.00026300468], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.00026300468, -# "1": -8.250263, -# "zero": -14.976826, -# " ": -15.461201, -# "000": -15.773701, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 6, -# "logprobs": { -# "text_offset": [146], -# "token_logprobs": [-5.085517e-5], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -5.085517e-5, -# "1": -9.937551, -# "000": -13.929738, -# "__": -14.968801, -# "zero": -15.070363, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 7, -# "logprobs": { -# "text_offset": [100], -# "token_logprobs": [-0.13875218], -# "tokens": ["1"], -# "top_logprobs": [ -# { -# "1": -0.13875218, -# "0": -2.0450022, -# "2": -9.7559395, -# "3": -11.1465645, -# "4": -11.5528145, -# } -# ], -# }, -# "text": "1", -# }, -# { -# "finish_reason": "length", -# "index": 8, -# "logprobs": { -# "text_offset": [143], -# "token_logprobs": [-0.0005573204], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.0005573204, -# "1": -7.6099324, -# "3": -10.070869, -# "2": -11.617744, -# " ": -12.859932, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 9, -# "logprobs": { -# "text_offset": [143], -# "token_logprobs": [-0.0018747397], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.0018747397, -# "1": -6.29875, -# "3": -11.2675, -# "4": -11.634687, -# "2": -11.822187, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 10, -# "logprobs": { -# "text_offset": [110], -# "token_logprobs": [-0.003476763], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.003476763, -# "1": -5.6909766, -# "__": -10.526915, -# "None": -10.925352, -# "False": -11.88629, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 11, -# "logprobs": { -# "text_offset": [106], -# "token_logprobs": [-0.00032962486], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.00032962486, -# "1": -8.03158, -# "__": -13.445642, -# "2": -13.828455, -# "zero": -15.453455, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 12, -# "logprobs": { -# "text_offset": [143], -# "token_logprobs": [-9.984788e-5], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -9.984788e-5, -# "1": -9.21885, -# " ": -14.836038, -# "zero": -16.265724, -# "00": -16.578224, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 13, -# "logprobs": { -# "text_offset": [106], -# "token_logprobs": [-0.0010039895], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.0010039895, -# "1": -6.907254, -# "2": -13.743192, -# "false": -15.227567, -# "3": -15.297879, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 14, -# "logprobs": { -# "text_offset": [106], -# "token_logprobs": [-0.0005681643], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.0005681643, -# "1": -7.5005684, -# "__": -11.836506, -# "zero": -13.242756, -# "file": -13.445881, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 15, -# "logprobs": { -# "text_offset": [146], -# "token_logprobs": [-3.9769227e-5], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -3.9769227e-5, -# "1": -10.15629, -# "000": -15.078165, -# "00": -15.664103, -# "zero": -16.015665, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 16, -# "logprobs": { -# "text_offset": [143], -# "token_logprobs": [-0.0006509595], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.0006509595, -# "1": -7.344401, -# "2": -13.352214, -# " ": -13.852214, -# "3": -14.680339, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 17, -# "logprobs": { -# "text_offset": [103], -# "token_logprobs": [-0.0093299495], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.0093299495, -# "1": -4.681205, -# "2": -11.173392, -# "3": -13.439017, -# "00": -14.673392, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 18, -# "logprobs": { -# "text_offset": [130], -# "token_logprobs": [-0.00024382756], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.00024382756, -# "1": -8.328369, -# " ": -13.640869, -# "zero": -14.859619, -# "null": -16.51587, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 19, -# "logprobs": { -# "text_offset": [107], -# "token_logprobs": [-0.0006452414], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.0006452414, -# "1": -7.36002, -# "00": -12.328771, -# "000": -12.961583, -# "2": -14.211583, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 20, -# "logprobs": { -# "text_offset": [143], -# "token_logprobs": [-0.0012751155], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.0012751155, -# "1": -6.67315, -# "__": -11.970025, -# "<|endoftext|>": -14.907525, -# "3": -14.930963, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 21, -# "logprobs": { -# "text_offset": [107], -# "token_logprobs": [-7.1954215e-5], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -7.1954215e-5, -# "1": -9.640697, -# "00": -13.500072, -# "000": -13.523509, -# "__": -13.945384, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 22, -# "logprobs": { -# "text_offset": [108], -# "token_logprobs": [-0.0032367748], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.0032367748, -# "1": -5.737612, -# "<|endoftext|>": -13.940737, -# "2": -14.167299, -# "00": -14.292299, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 23, -# "logprobs": { -# "text_offset": [117], -# "token_logprobs": [-0.00018673266], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.00018673266, -# "1": -8.593937, -# "zero": -15.179874, -# "null": -15.515812, -# "None": -15.851749, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 24, -# "logprobs": { -# "text_offset": [104], -# "token_logprobs": [-0.0010223285], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.0010223285, -# "1": -6.8916473, -# "__": -13.05571, -# "00": -14.071335, -# "zero": -14.235397, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 25, -# "logprobs": { -# "text_offset": [108], -# "token_logprobs": [-0.0038979414], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.0038979414, -# "1": -5.550773, -# "2": -13.160148, -# "00": -14.144523, -# "3": -14.41796, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 26, -# "logprobs": { -# "text_offset": [143], -# "token_logprobs": [-0.00074721366], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.00074721366, -# "1": -7.219497, -# "3": -11.430435, -# "2": -13.367935, -# " ": -13.735123, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 27, -# "logprobs": { -# "text_offset": [146], -# "token_logprobs": [-8.566264e-5], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -8.566264e-5, -# "1": -9.375086, -# "000": -15.359461, -# "__": -15.671961, -# "00": -15.679773, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 28, -# "logprobs": { -# "text_offset": [119], -# "token_logprobs": [-0.000274683], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.000274683, -# "1": -8.2034, -# "00": -14.898712, -# "2": -15.633087, -# "__": -16.844025, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 29, -# "logprobs": { -# "text_offset": [143], -# "token_logprobs": [-0.014869375], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.014869375, -# "1": -4.217994, -# "2": -11.63987, -# "3": -11.944557, -# "5": -12.26487, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 30, -# "logprobs": { -# "text_offset": [110], -# "token_logprobs": [-0.010907865], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.010907865, -# "1": -4.5265326, -# "2": -11.440596, -# "<|endoftext|>": -12.456221, -# "file": -13.049971, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 31, -# "logprobs": { -# "text_offset": [143], -# "token_logprobs": [-0.00070528337], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.00070528337, -# "1": -7.2663302, -# "6": -13.141331, -# "2": -13.797581, -# "3": -13.836643, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 32, -# "logprobs": { -# "text_offset": [143], -# "token_logprobs": [-0.0004983439], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.0004983439, -# "1": -7.6098733, -# "3": -14.211436, -# "2": -14.336436, -# " ": -15.117686, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 33, -# "logprobs": { -# "text_offset": [110], -# "token_logprobs": [-3.6908343e-5], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -3.6908343e-5, -# "1": -10.250037, -# "00": -14.2266, -# "__": -14.7266, -# "000": -16.164099, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 34, -# "logprobs": { -# "text_offset": [104], -# "token_logprobs": [-0.003917157], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.003917157, -# "1": -5.550792, -# "2": -11.355479, -# "00": -12.777354, -# "3": -13.652354, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 35, -# "logprobs": { -# "text_offset": [146], -# "token_logprobs": [-5.0139948e-5], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -5.0139948e-5, -# "1": -9.921926, -# "000": -14.851613, -# "00": -15.414113, -# "zero": -15.687551, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 36, -# "logprobs": { -# "text_offset": [143], -# "token_logprobs": [-0.0005143099], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.0005143099, -# "1": -7.5786395, -# " ": -14.406764, -# "00": -14.570827, -# "999": -14.633327, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 37, -# "logprobs": { -# "text_offset": [103], -# "token_logprobs": [-0.00013691289], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.00013691289, -# "1": -8.968887, -# "__": -12.547012, -# "zero": -13.57045, -# "00": -13.8517, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 38, -# "logprobs": { -# "text_offset": [103], -# "token_logprobs": [-0.00032569113], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.00032569113, -# "1": -8.047201, -# "2": -13.570639, -# "zero": -14.023764, -# "false": -14.726889, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 39, -# "logprobs": { -# "text_offset": [113], -# "token_logprobs": [-3.7146747e-5], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -3.7146747e-5, -# "1": -10.203162, -# "zero": -18.437536, -# "2": -20.117224, -# " zero": -20.210974, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 40, -# "logprobs": { -# "text_offset": [110], -# "token_logprobs": [-7.4695905e-5], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -7.4695905e-5, -# "1": -9.515699, -# "00": -14.836012, -# "__": -16.093824, -# "file": -16.468824, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 41, -# "logprobs": { -# "text_offset": [111], -# "token_logprobs": [-0.02289473], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.02289473, -# "1": -3.7885196, -# "2": -12.499457, -# "3": -14.546332, -# "00": -15.66352, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 42, -# "logprobs": { -# "text_offset": [108], -# "token_logprobs": [-0.0011367622], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.0011367622, -# "1": -6.782387, -# "2": -13.493324, -# "00": -15.071449, -# "zero": -15.727699, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 43, -# "logprobs": { -# "text_offset": [115], -# "token_logprobs": [-0.0006384541], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.0006384541, -# "1": -7.3600135, -# "00": -14.0397005, -# "2": -14.4303255, -# "000": -15.563138, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 44, -# "logprobs": { -# "text_offset": [143], -# "token_logprobs": [-0.0007382771], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.0007382771, -# "1": -7.219488, -# "4": -13.516363, -# "2": -13.555426, -# "3": -13.602301, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 45, -# "logprobs": { -# "text_offset": [143], -# "token_logprobs": [-0.0014242834], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.0014242834, -# "1": -6.5639243, -# "2": -12.493611, -# "__": -12.712361, -# "3": -12.884236, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 46, -# "logprobs": { -# "text_offset": [111], -# "token_logprobs": [-0.00017088225], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.00017088225, -# "1": -8.765796, -# "zero": -12.695483, -# "__": -12.804858, -# "time": -12.882983, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 47, -# "logprobs": { -# "text_offset": [146], -# "token_logprobs": [-0.000107238506], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.000107238506, -# "1": -9.171982, -# "000": -13.648544, -# "__": -14.531357, -# "zero": -14.586044, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 48, -# "logprobs": { -# "text_offset": [106], -# "token_logprobs": [-0.0028172398], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.0028172398, -# "1": -5.877817, -# "00": -12.16688, -# "2": -12.487192, -# "000": -14.182505, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 49, -# "logprobs": { -# "text_offset": [104], -# "token_logprobs": [-0.00043460296], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.00043460296, -# "1": -7.7816844, -# "00": -13.570747, -# "2": -13.60981, -# "__": -13.789497, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 50, -# "logprobs": { -# "text_offset": [143], -# "token_logprobs": [-0.0046973573], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.0046973573, -# "1": -5.3640723, -# "null": -14.082823, -# " ": -14.707823, -# "2": -14.746885, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 51, -# "logprobs": { -# "text_offset": [100], -# "token_logprobs": [-0.2487161], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.2487161, -# "1": -1.5143411, -# "2": -9.037779, -# "3": -10.100279, -# "4": -10.756529, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 52, -# "logprobs": { -# "text_offset": [108], -# "token_logprobs": [-0.0011751055], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.0011751055, -# "1": -6.751175, -# " ": -13.73555, -# "2": -15.258987, -# "3": -15.399612, -# } -# ], -# }, -# "text": "0", -# }, -# { -# "finish_reason": "length", -# "index": 53, -# "logprobs": { -# "text_offset": [143], -# "token_logprobs": [-0.0012339224], -# "tokens": ["0"], -# "top_logprobs": [ -# { -# "0": -0.0012339224, -# "1": -6.719984, -# "6": -11.430922, -# "3": -12.165297, -# "2": -12.696547, -# } -# ], -# }, -# "text": "0", -# }, -# ], -# "created": 1712163061, -# "model": "ft:babbage-002:ai-r-d-zapai:v3-fields-used:84jb9rtr", -# "object": "text_completion", -# "system_fingerprint": None, -# "usage": {"completion_tokens": 54, "prompt_tokens": 1877, "total_tokens": 1931}, -# } - -# text_completion_obj = TextCompletionResponse(**openai_object) - -# ## WRITE UNIT TESTS FOR TEXT_COMPLETION_OBJECT -# assert text_completion_obj.id == "cmpl-99y7B2svVoRWe1xd7UFRmeGjZrFSh" -# assert text_completion_obj.object == "text_completion" -# assert text_completion_obj.created == 1712163061 -# assert ( -# text_completion_obj.model -# == "ft:babbage-002:ai-r-d-zapai:v3-fields-used:84jb9rtr" -# ) -# assert text_completion_obj.system_fingerprint == None -# assert len(text_completion_obj.choices) == len(openai_object["choices"]) - -# # TEST FIRST CHOICE # -# first_text_completion_obj = text_completion_obj.choices[0] -# assert first_text_completion_obj.index == 0 -# assert first_text_completion_obj.logprobs.text_offset == [101] -# assert first_text_completion_obj.logprobs.tokens == ["0"] -# assert first_text_completion_obj.logprobs.token_logprobs == [-0.00023488728] -# assert len(first_text_completion_obj.logprobs.top_logprobs) == len( -# openai_object["choices"][0]["logprobs"]["top_logprobs"] -# ) -# assert first_text_completion_obj.text == "0" -# assert first_text_completion_obj.finish_reason == "length" - -# # TEST SECOND CHOICE # -# second_text_completion_obj = text_completion_obj.choices[1] -# assert second_text_completion_obj.index == 1 -# assert second_text_completion_obj.logprobs.text_offset == [116] -# assert second_text_completion_obj.logprobs.tokens == ["0"] -# assert second_text_completion_obj.logprobs.token_logprobs == [-0.013745008] -# assert len(second_text_completion_obj.logprobs.top_logprobs) == len( -# openai_object["choices"][0]["logprobs"]["top_logprobs"] -# ) -# assert second_text_completion_obj.text == "0" -# assert second_text_completion_obj.finish_reason == "length" - -# # TEST LAST CHOICE # -# last_text_completion_obj = text_completion_obj.choices[-1] -# assert last_text_completion_obj.index == 53 -# assert last_text_completion_obj.logprobs.text_offset == [143] -# assert last_text_completion_obj.logprobs.tokens == ["0"] -# assert last_text_completion_obj.logprobs.token_logprobs == [-0.0012339224] -# assert len(last_text_completion_obj.logprobs.top_logprobs) == len( -# openai_object["choices"][0]["logprobs"]["top_logprobs"] -# ) -# assert last_text_completion_obj.text == "0" -# assert last_text_completion_obj.finish_reason == "length" - -# assert text_completion_obj.usage.completion_tokens == 54 -# assert text_completion_obj.usage.prompt_tokens == 1877 -# assert text_completion_obj.usage.total_tokens == 1931 - - -# def test_completion_openai_prompt(): -# try: -# print("\n text 003 test\n") -# response = text_completion( -# model="gpt-3.5-turbo-instruct", -# prompt=["What's the weather in SF?", "How is Manchester?"], -# ) -# print(response) -# assert len(response.choices) == 2 -# response_str = response["choices"][0]["text"] -# except Exception as e: -# pytest.fail(f"Error occurred: {e}") - - -# # test_completion_openai_prompt() - - -# def test_completion_openai_engine_and_model(): -# try: -# print("\n text 003 test\n") -# litellm.set_verbose = True -# response = text_completion( -# model="gpt-3.5-turbo-instruct", -# engine="anything", -# prompt="What's the weather in SF?", -# max_tokens=5, -# ) -# print(response) -# response_str = response["choices"][0]["text"] -# # print(response.choices[0]) -# # print(response.choices[0].text) -# except Exception as e: -# pytest.fail(f"Error occurred: {e}") - - -# # test_completion_openai_engine_and_model() - - -# def test_completion_openai_engine(): -# try: -# print("\n text 003 test\n") -# litellm.set_verbose = True -# response = text_completion( -# engine="gpt-3.5-turbo-instruct", -# prompt="What's the weather in SF?", -# max_tokens=5, -# ) -# print(response) -# response_str = response["choices"][0]["text"] -# # print(response.choices[0]) -# # print(response.choices[0].text) -# except Exception as e: -# pytest.fail(f"Error occurred: {e}") - - -# # test_completion_openai_engine() - - -# def test_completion_chatgpt_prompt(): -# try: -# print("\n gpt3.5 test\n") -# response = text_completion( -# model="gpt-3.5-turbo", prompt="What's the weather in SF?" -# ) -# print(response) -# response_str = response["choices"][0]["text"] -# print("\n", response.choices) -# print("\n", response.choices[0]) -# # print(response.choices[0].text) -# except Exception as e: -# pytest.fail(f"Error occurred: {e}") - - -# # test_completion_chatgpt_prompt() - - -# def test_text_completion_basic(): -# try: -# print("\n test 003 with logprobs \n") -# litellm.set_verbose = False -# response = text_completion( -# model="gpt-3.5-turbo-instruct", -# prompt="good morning", -# max_tokens=10, -# logprobs=10, -# ) -# print(response) -# print(response.choices) -# print(response.choices[0]) -# # print(response.choices[0].text) -# response_str = response["choices"][0]["text"] -# except Exception as e: -# pytest.fail(f"Error occurred: {e}") - - -# # test_text_completion_basic() - - -# def test_completion_text_003_prompt_array(): -# try: -# litellm.set_verbose = False -# response = text_completion( -# model="gpt-3.5-turbo-instruct", -# prompt=token_prompt, # token prompt is a 2d list -# ) -# print("\n\n response") - -# print(response) -# # response_str = response["choices"][0]["text"] -# except Exception as e: -# pytest.fail(f"Error occurred: {e}") - - -# # test_completion_text_003_prompt_array() - - -# # not including this in our ci cd pipeline, since we don't want to fail tests due to an unstable replit -# # def test_text_completion_with_proxy(): -# # try: -# # litellm.set_verbose=True -# # response = text_completion( -# # model="facebook/opt-125m", -# # prompt='Write a tagline for a traditional bavarian tavern', -# # api_base="https://openai-proxy.berriai.repl.co/v1", -# # custom_llm_provider="openai", -# # temperature=0, -# # max_tokens=10, -# # ) -# # print("\n\n response") - -# # print(response) -# # except Exception as e: -# # pytest.fail(f"Error occurred: {e}") -# # test_text_completion_with_proxy() - - -# ##### hugging face tests -# def test_completion_hf_prompt_array(): -# try: -# litellm.set_verbose = True -# print("\n testing hf mistral\n") -# response = text_completion( -# model="huggingface/mistralai/Mistral-7B-v0.1", -# prompt=token_prompt, # token prompt is a 2d list, -# max_tokens=0, -# temperature=0.0, -# # echo=True, # hugging face inference api is currently raising errors for this, looks like they have a regression on their side -# ) -# print("\n\n response") - -# print(response) -# print(response.choices) -# assert len(response.choices) == 2 -# # response_str = response["choices"][0]["text"] -# except Exception as e: -# print(str(e)) -# if "is currently loading" in str(e): -# return -# if "Service Unavailable" in str(e): -# return -# pytest.fail(f"Error occurred: {e}") - - -# # test_completion_hf_prompt_array() - - -# def test_text_completion_stream(): -# try: -# response = text_completion( -# model="huggingface/mistralai/Mistral-7B-v0.1", -# prompt="good morning", -# stream=True, -# max_tokens=10, -# ) -# for chunk in response: -# print(f"chunk: {chunk}") -# except Exception as e: -# pytest.fail(f"GOT exception for HF In streaming{e}") - - -# # test_text_completion_stream() - -# # async def test_text_completion_async_stream(): -# # try: -# # response = await atext_completion( -# # model="text-completion-openai/gpt-3.5-turbo-instruct", -# # prompt="good morning", -# # stream=True, -# # max_tokens=10, -# # ) -# # async for chunk in response: -# # print(f"chunk: {chunk}") -# # except Exception as e: -# # pytest.fail(f"GOT exception for HF In streaming{e}") - -# # asyncio.run(test_text_completion_async_stream()) - - -# def test_async_text_completion(): -# litellm.set_verbose = True -# print("test_async_text_completion") - -# async def test_get_response(): -# try: -# response = await litellm.atext_completion( -# model="gpt-3.5-turbo-instruct", -# prompt="good morning", -# stream=False, -# max_tokens=10, -# ) -# print(f"response: {response}") -# except litellm.Timeout as e: -# print(e) -# except Exception as e: -# print(e) - -# asyncio.run(test_get_response()) - - -# @pytest.mark.skip(reason="Skip flaky tgai test") -# def test_async_text_completion_together_ai(): -# litellm.set_verbose = True -# print("test_async_text_completion") - -# async def test_get_response(): -# try: -# response = await litellm.atext_completion( -# model="together_ai/mistralai/Mixtral-8x7B-Instruct-v0.1", -# prompt="good morning", -# max_tokens=10, -# ) -# print(f"response: {response}") -# except litellm.Timeout as e: -# print(e) -# except Exception as e: -# pytest.fail("An unexpected error occurred") - -# asyncio.run(test_get_response()) - - -# # test_async_text_completion() - - -# def test_async_text_completion_stream(): -# # tests atext_completion + streaming - assert only one finish reason sent -# litellm.set_verbose = False -# print("test_async_text_completion with stream") - -# async def test_get_response(): -# try: -# response = await litellm.atext_completion( -# model="gpt-3.5-turbo-instruct", -# prompt="good morning", -# stream=True, -# ) -# print(f"response: {response}") - -# num_finish_reason = 0 -# async for chunk in response: -# print(chunk) -# if chunk["choices"][0].get("finish_reason") is not None: -# num_finish_reason += 1 -# print("finish_reason", chunk["choices"][0].get("finish_reason")) - -# assert ( -# num_finish_reason == 1 -# ), f"expected only one finish reason. Got {num_finish_reason}" -# except Exception as e: -# pytest.fail(f"GOT exception for gpt-3.5 instruct In streaming{e}") - -# asyncio.run(test_get_response()) - - -# # test_async_text_completion_stream() - - -# @pytest.mark.asyncio -# async def test_async_text_completion_chat_model_stream(): -# try: -# response = await litellm.atext_completion( -# model="gpt-3.5-turbo", -# prompt="good morning", -# stream=True, -# max_tokens=10, -# ) - -# num_finish_reason = 0 -# chunks = [] -# async for chunk in response: -# print(chunk) -# chunks.append(chunk) -# if chunk["choices"][0].get("finish_reason") is not None: -# num_finish_reason += 1 - -# assert ( -# num_finish_reason == 1 -# ), f"expected only one finish reason. Got {num_finish_reason}" -# response_obj = litellm.stream_chunk_builder(chunks=chunks) -# cost = litellm.completion_cost(completion_response=response_obj) -# assert cost > 0 -# except Exception as e: -# pytest.fail(f"GOT exception for gpt-3.5 In streaming{e}") - - -# # asyncio.run(test_async_text_completion_chat_model_stream()) - - -# @pytest.mark.asyncio -# async def test_completion_codestral_fim_api(): -# try: -# litellm.set_verbose = True -# import logging - -# from litellm._logging import verbose_logger - -# verbose_logger.setLevel(level=logging.DEBUG) -# response = await litellm.atext_completion( -# model="text-completion-codestral/codestral-2405", -# prompt="def is_odd(n): \n return n % 2 == 1 \ndef test_is_odd():", -# suffix="return True", -# temperature=0, -# top_p=1, -# max_tokens=10, -# min_tokens=10, -# seed=10, -# stop=["return"], -# ) -# # Add any assertions here to check the response -# print(response) - -# assert response.choices[0].text is not None -# assert len(response.choices[0].text) > 0 - -# # cost = litellm.completion_cost(completion_response=response) -# # print("cost to make mistral completion=", cost) -# # assert cost > 0.0 -# except Exception as e: -# pytest.fail(f"Error occurred: {e}") - - -# @pytest.mark.asyncio -# async def test_completion_codestral_fim_api_stream(): -# try: -# import logging - -# from litellm._logging import verbose_logger - -# litellm.set_verbose = False - -# # verbose_logger.setLevel(level=logging.DEBUG) -# response = await litellm.atext_completion( -# model="text-completion-codestral/codestral-2405", -# prompt="def is_odd(n): \n return n % 2 == 1 \ndef test_is_odd():", -# suffix="return True", -# temperature=0, -# top_p=1, -# stream=True, -# seed=10, -# stop=["return"], -# ) - -# full_response = "" -# # Add any assertions here to check the response -# async for chunk in response: -# print(chunk) -# full_response += chunk.get("choices")[0].get("text") or "" - -# print("full_response", full_response) - -# assert len(full_response) > 2 # we at least have a few chars in response :) - -# # cost = litellm.completion_cost(completion_response=response) -# # print("cost to make mistral completion=", cost) -# # assert cost > 0.0 -# except Exception as e: -# pytest.fail(f"Error occurred: {e}") - - -# def mock_post(*args, **kwargs): -# mock_response = MagicMock() -# mock_response.status_code = 200 -# mock_response.headers = {"Content-Type": "application/json"} -# mock_response.model_dump.return_value = { -# "id": "cmpl-7a59383dd4234092b9e5d652a7ab8143", -# "object": "text_completion", -# "created": 1718824735, -# "model": "Sao10K/L3-70B-Euryale-v2.1", -# "choices": [ -# { -# "index": 0, -# "text": ") might be faster than then answering, and the added time it takes for the", -# "logprobs": None, -# "finish_reason": "length", -# "stop_reason": None, -# } -# ], -# "usage": {"prompt_tokens": 2, "total_tokens": 18, "completion_tokens": 16}, -# } -# return mock_response - - -# def test_completion_vllm(): -# """ -# Asserts a text completion call for vllm actually goes to the text completion endpoint -# """ -# from openai import OpenAI - -# client = OpenAI(api_key="my-fake-key") - -# with patch.object(client.completions, "create", side_effect=mock_post) as mock_call: -# response = text_completion( -# model="openai/gemini-1.5-flash", prompt="ping", client=client, hello="world" -# ) -# print(response) - -# assert response.usage.prompt_tokens == 2 - -# mock_call.assert_called_once() - -# assert "hello" in mock_call.call_args.kwargs["extra_body"] + print(response) diff --git a/litellm/types/adapter.py b/litellm/types/adapter.py new file mode 100644 index 0000000000..2995cfbc1c --- /dev/null +++ b/litellm/types/adapter.py @@ -0,0 +1,10 @@ +from typing import List + +from typing_extensions import Dict, Required, TypedDict, override + +from litellm.integrations.custom_logger import CustomLogger + + +class AdapterItem(TypedDict): + id: str + adapter: CustomLogger diff --git a/litellm/types/llms/anthropic.py b/litellm/types/llms/anthropic.py index 8d8280ea79..7df73377d0 100644 --- a/litellm/types/llms/anthropic.py +++ b/litellm/types/llms/anthropic.py @@ -9,25 +9,27 @@ class AnthropicMessagesToolChoice(TypedDict, total=False): name: str -class AnthopicMessagesAssistantMessageTextContentParam(TypedDict, total=False): - type: Required[Literal["text"]] +class AnthropicMessagesTool(TypedDict, total=False): + name: Required[str] + description: str + input_schema: Required[dict] + +class AnthropicMessagesTextParam(TypedDict): + type: Literal["text"] text: str -class AnthopicMessagesAssistantMessageToolCallParam(TypedDict, total=False): - type: Required[Literal["tool_use"]] - +class AnthropicMessagesToolUseParam(TypedDict): + type: Literal["tool_use"] id: str - name: str - input: dict AnthropicMessagesAssistantMessageValues = Union[ - AnthopicMessagesAssistantMessageTextContentParam, - AnthopicMessagesAssistantMessageToolCallParam, + AnthropicMessagesTextParam, + AnthropicMessagesToolUseParam, ] @@ -46,6 +48,72 @@ class AnthopicMessagesAssistantMessageParam(TypedDict, total=False): """ +class AnthropicImageParamSource(TypedDict): + type: Literal["base64"] + media_type: str + data: str + + +class AnthropicMessagesImageParam(TypedDict): + type: Literal["image"] + source: AnthropicImageParamSource + + +class AnthropicMessagesToolResultContent(TypedDict): + type: Literal["text"] + text: str + + +class AnthropicMessagesToolResultParam(TypedDict, total=False): + type: Required[Literal["tool_result"]] + tool_use_id: Required[str] + is_error: bool + content: Union[ + str, + Iterable[ + Union[AnthropicMessagesToolResultContent, AnthropicMessagesImageParam] + ], + ] + + +AnthropicMessagesUserMessageValues = Union[ + AnthropicMessagesTextParam, + AnthropicMessagesImageParam, + AnthropicMessagesToolResultParam, +] + + +class AnthropicMessagesUserMessageParam(TypedDict, total=False): + role: Required[Literal["user"]] + content: Required[Union[str, Iterable[AnthropicMessagesUserMessageValues]]] + + +class AnthropicMetadata(TypedDict, total=False): + user_id: str + + +class AnthropicMessagesRequest(TypedDict, total=False): + model: Required[str] + messages: Required[ + List[ + Union[ + AnthropicMessagesUserMessageParam, + AnthopicMessagesAssistantMessageParam, + ] + ] + ] + max_tokens: Required[int] + metadata: AnthropicMetadata + stop_sequences: List[str] + stream: bool + system: str + temperature: float + tool_choice: AnthropicMessagesToolChoice + tools: List[AnthropicMessagesTool] + top_k: int + top_p: float + + class ContentTextBlockDelta(TypedDict): """ 'delta': {'type': 'text_delta', 'text': 'Hello'} diff --git a/litellm/types/llms/openai.py b/litellm/types/llms/openai.py index 6fc0593b98..63f07f2ca1 100644 --- a/litellm/types/llms/openai.py +++ b/litellm/types/llms/openai.py @@ -305,7 +305,13 @@ class ChatCompletionToolCallFunctionChunk(TypedDict, total=False): arguments: str -class ChatCompletionToolCallChunk(TypedDict): +class ChatCompletionAssistantToolCall(TypedDict): + id: Optional[str] + type: Literal["function"] + function: ChatCompletionToolCallFunctionChunk + + +class ChatCompletionToolCallChunk(TypedDict): # result of /chat/completions call id: Optional[str] type: Literal["function"] function: ChatCompletionToolCallFunctionChunk @@ -319,6 +325,107 @@ class ChatCompletionDeltaToolCallChunk(TypedDict, total=False): index: int +class ChatCompletionTextObject(TypedDict): + type: Literal["text"] + text: str + + +class ChatCompletionImageUrlObject(TypedDict, total=False): + url: Required[str] + detail: str + + +class ChatCompletionImageObject(TypedDict): + type: Literal["image_url"] + image_url: ChatCompletionImageUrlObject + + +class ChatCompletionUserMessage(TypedDict): + role: Literal["user"] + content: Union[ + str, Iterable[Union[ChatCompletionTextObject, ChatCompletionImageObject]] + ] + + +class ChatCompletionAssistantMessage(TypedDict, total=False): + role: Required[Literal["assistant"]] + content: Optional[str] + name: str + tool_calls: List[ChatCompletionAssistantToolCall] + + +class ChatCompletionToolMessage(TypedDict): + role: Literal["tool"] + content: str + tool_call_id: str + + +class ChatCompletionSystemMessage(TypedDict, total=False): + role: Required[Literal["system"]] + content: Required[str] + name: str + + +AllMessageValues = Union[ + ChatCompletionUserMessage, + ChatCompletionAssistantMessage, + ChatCompletionToolMessage, + ChatCompletionSystemMessage, +] + + +class ChatCompletionToolChoiceFunctionParam(TypedDict): + name: str + + +class ChatCompletionToolChoiceObjectParam(TypedDict): + type: Literal["function"] + function: ChatCompletionToolChoiceFunctionParam + + +ChatCompletionToolChoiceStringValues = Literal["none", "auto", "required"] + +ChatCompletionToolChoiceValues = Union[ + ChatCompletionToolChoiceStringValues, ChatCompletionToolChoiceObjectParam +] + + +class ChatCompletionToolParamFunctionChunk(TypedDict, total=False): + name: Required[str] + description: str + parameters: dict + + +class ChatCompletionToolParam(TypedDict): + type: Literal["function"] + function: ChatCompletionToolParamFunctionChunk + + +class ChatCompletionRequest(TypedDict, total=False): + model: Required[str] + messages: Required[List[AllMessageValues]] + frequency_penalty: float + logit_bias: dict + logprobs: bool + top_logprobs: int + max_tokens: int + n: int + presence_penalty: float + response_format: dict + seed: int + service_tier: str + stop: Union[str, List[str]] + stream_options: dict + temperature: float + top_p: float + tools: List[ChatCompletionToolParam] + tool_choice: ChatCompletionToolChoiceValues + parallel_tool_calls: bool + function_call: Union[str, dict] + functions: List + user: str + + class ChatCompletionDeltaChunk(TypedDict, total=False): content: Optional[str] tool_calls: List[ChatCompletionDeltaToolCallChunk] From 4ba30abb630e18457c7cf5101b57dd7f5aeed18b Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Wed, 10 Jul 2024 18:15:38 -0700 Subject: [PATCH 3/6] feat(proxy_server.py): working `/v1/messages` endpoint Works with claude engineer --- litellm/adapters/anthropic_adapter.py | 15 +- litellm/integrations/custom_logger.py | 7 +- litellm/llms/anthropic.py | 207 +++++++++------------ litellm/main.py | 3 +- litellm/proxy/auth/user_api_key_auth.py | 11 ++ litellm/proxy/proxy_server.py | 42 ++++- litellm/tests/test_anthropic_completion.py | 43 ++++- litellm/types/llms/anthropic.py | 48 +++++ litellm/types/utils.py | 48 +++-- 9 files changed, 272 insertions(+), 152 deletions(-) diff --git a/litellm/adapters/anthropic_adapter.py b/litellm/adapters/anthropic_adapter.py index ce75755ca0..7d9d799b66 100644 --- a/litellm/adapters/anthropic_adapter.py +++ b/litellm/adapters/anthropic_adapter.py @@ -8,11 +8,12 @@ from typing import Literal, Optional import dotenv import httpx +from pydantic import BaseModel import litellm from litellm import ChatCompletionRequest, verbose_logger from litellm.integrations.custom_logger import CustomLogger -from litellm.types.llms.anthropic import AnthropicMessagesRequest +from litellm.types.llms.anthropic import AnthropicMessagesRequest, AnthropicResponse class AnthropicAdapter(CustomLogger): @@ -31,12 +32,18 @@ class AnthropicAdapter(CustomLogger): translated_body = litellm.AnthropicConfig().translate_anthropic_to_openai( anthropic_message_request=request_body ) + return translated_body - def translate_completion_output_params(self, response: litellm.ModelResponse): - return super().translate_completion_output_params(response) + def translate_completion_output_params( + self, response: litellm.ModelResponse + ) -> Optional[AnthropicResponse]: - def translate_completion_output_params_streaming(self): + return litellm.AnthropicConfig().translate_openai_response_to_anthropic( + response=response + ) + + def translate_completion_output_params_streaming(self) -> Optional[BaseModel]: return super().translate_completion_output_params_streaming() diff --git a/litellm/integrations/custom_logger.py b/litellm/integrations/custom_logger.py index 4c3fa3a137..be02637041 100644 --- a/litellm/integrations/custom_logger.py +++ b/litellm/integrations/custom_logger.py @@ -5,6 +5,7 @@ import traceback from typing import Literal, Optional, Union import dotenv +from pydantic import BaseModel from litellm.caching import DualCache from litellm.proxy._types import UserAPIKeyAuth @@ -67,13 +68,15 @@ class CustomLogger: # https://docs.litellm.ai/docs/observability/custom_callbac """ pass - def translate_completion_output_params(self, response: ModelResponse): + def translate_completion_output_params( + self, response: ModelResponse + ) -> Optional[BaseModel]: """ Translates the output params, from the OpenAI format to the custom format. """ pass - def translate_completion_output_params_streaming(self): + def translate_completion_output_params_streaming(self) -> Optional[BaseModel]: """ Translates the streaming chunk, from the OpenAI format to the custom format. """ diff --git a/litellm/llms/anthropic.py b/litellm/llms/anthropic.py index 02e222b905..e4d6338ef7 100644 --- a/litellm/llms/anthropic.py +++ b/litellm/llms/anthropic.py @@ -21,10 +21,15 @@ from litellm.llms.custom_httpx.http_handler import ( ) from litellm.types.llms.anthropic import ( AnthopicMessagesAssistantMessageParam, + AnthropicFinishReason, AnthropicMessagesRequest, AnthropicMessagesTool, AnthropicMessagesToolChoice, AnthropicMessagesUserMessageParam, + AnthropicResponse, + AnthropicResponseContentBlockText, + AnthropicResponseContentBlockToolUse, + AnthropicResponseUsageBlock, ContentBlockDelta, ContentBlockStart, MessageBlockDelta, @@ -51,7 +56,7 @@ from litellm.types.llms.openai import ( ChatCompletionUsageBlock, ChatCompletionUserMessage, ) -from litellm.types.utils import GenericStreamingChunk +from litellm.types.utils import Choices, GenericStreamingChunk from litellm.utils import CustomStreamWrapper, ModelResponse, Usage from .base import BaseLLM @@ -187,6 +192,8 @@ class AnthropicConfig: optional_params["top_p"] = value return optional_params + ### FOR [BETA] `/v1/messages` endpoint support + def translatable_anthropic_params(self) -> List: """ Which anthropic params, we need to translate to the openai format. @@ -300,10 +307,14 @@ class AnthropicConfig: ) ) - assistant_message = ChatCompletionAssistantMessage( - role="assistant", content=assistant_message_str, tool_calls=tool_calls - ) - new_messages.append(assistant_message) + if assistant_message_str is not None or len(tool_calls) > 0: + assistant_message = ChatCompletionAssistantMessage( + role="assistant", + content=assistant_message_str, + ) + if len(tool_calls) > 0: + assistant_message["tool_calls"] = tool_calls + new_messages.append(assistant_message) return new_messages @@ -391,6 +402,77 @@ class AnthropicConfig: return new_kwargs + def _translate_openai_content_to_anthropic( + self, choices: List[Choices] + ) -> List[ + Union[AnthropicResponseContentBlockText, AnthropicResponseContentBlockToolUse] + ]: + new_content: List[ + Union[ + AnthropicResponseContentBlockText, AnthropicResponseContentBlockToolUse + ] + ] = [] + for choice in choices: + if ( + choice.message.tool_calls is not None + and len(choice.message.tool_calls) > 0 + ): + for tool_call in choice.message.tool_calls: + new_content.append( + AnthropicResponseContentBlockToolUse( + type="tool_use", + id=tool_call.id, + name=tool_call.function.name, + input=tool_call.function.arguments, + ) + ) + elif choice.message.content is not None: + new_content.append( + AnthropicResponseContentBlockText( + type="text", text=choice.message.content + ) + ) + + return new_content + + def _translate_openai_finish_reason_to_anthropic( + self, openai_finish_reason: str + ) -> AnthropicFinishReason: + if openai_finish_reason == "stop": + return "end_turn" + elif openai_finish_reason == "length": + return "max_tokens" + elif openai_finish_reason == "tool_calls": + return "tool_use" + return "end_turn" + + def translate_openai_response_to_anthropic( + self, response: litellm.ModelResponse + ) -> AnthropicResponse: + ## translate content block + anthropic_content = self._translate_openai_content_to_anthropic(choices=response.choices) # type: ignore + ## extract finish reason + anthropic_finish_reason = self._translate_openai_finish_reason_to_anthropic( + openai_finish_reason=response.choices[0].finish_reason # type: ignore + ) + # extract usage + usage: litellm.Usage = getattr(response, "usage") + anthropic_usage = AnthropicResponseUsageBlock( + input_tokens=usage.prompt_tokens, output_tokens=usage.completion_tokens + ) + translated_obj = AnthropicResponse( + id=response.id, + type="message", + role="assistant", + model=response.model or "unknown-model", + stop_sequence=None, + usage=anthropic_usage, + content=anthropic_content, + stop_reason=anthropic_finish_reason, + ) + + return translated_obj + # makes headers for API call def validate_environment(api_key, user_headers): @@ -454,121 +536,6 @@ class AnthropicChatCompletion(BaseLLM): def __init__(self) -> None: super().__init__() - # def process_streaming_response( - # self, - # model: str, - # response: Union[requests.Response, httpx.Response], - # model_response: ModelResponse, - # stream: bool, - # logging_obj: litellm.litellm_core_utils.litellm_logging.Logging, - # optional_params: dict, - # api_key: str, - # data: Union[dict, str], - # messages: List, - # print_verbose, - # encoding, - # ) -> CustomStreamWrapper: - # """ - # Return stream object for tool-calling + streaming - # """ - # ## LOGGING - # logging_obj.post_call( - # input=messages, - # api_key=api_key, - # original_response=response.text, - # additional_args={"complete_input_dict": data}, - # ) - # print_verbose(f"raw model_response: {response.text}") - # ## RESPONSE OBJECT - # try: - # completion_response = response.json() - # except: - # raise AnthropicError( - # message=response.text, status_code=response.status_code - # ) - # text_content = "" - # tool_calls = [] - # for content in completion_response["content"]: - # if content["type"] == "text": - # text_content += content["text"] - # ## TOOL CALLING - # elif content["type"] == "tool_use": - # tool_calls.append( - # { - # "id": content["id"], - # "type": "function", - # "function": { - # "name": content["name"], - # "arguments": json.dumps(content["input"]), - # }, - # } - # ) - # if "error" in completion_response: - # raise AnthropicError( - # message=str(completion_response["error"]), - # status_code=response.status_code, - # ) - # _message = litellm.Message( - # tool_calls=tool_calls, - # content=text_content or None, - # ) - # model_response.choices[0].message = _message # type: ignore - # model_response._hidden_params["original_response"] = completion_response[ - # "content" - # ] # allow user to access raw anthropic tool calling response - - # model_response.choices[0].finish_reason = map_finish_reason( - # completion_response["stop_reason"] - # ) - - # print_verbose("INSIDE ANTHROPIC STREAMING TOOL CALLING CONDITION BLOCK") - # # return an iterator - # streaming_model_response = ModelResponse(stream=True) - # streaming_model_response.choices[0].finish_reason = model_response.choices[ # type: ignore - # 0 - # ].finish_reason - # # streaming_model_response.choices = [litellm.utils.StreamingChoices()] - # streaming_choice = litellm.utils.StreamingChoices() - # streaming_choice.index = model_response.choices[0].index - # _tool_calls = [] - # print_verbose( - # f"type of model_response.choices[0]: {type(model_response.choices[0])}" - # ) - # print_verbose(f"type of streaming_choice: {type(streaming_choice)}") - # if isinstance(model_response.choices[0], litellm.Choices): - # if getattr( - # model_response.choices[0].message, "tool_calls", None - # ) is not None and isinstance( - # model_response.choices[0].message.tool_calls, list - # ): - # for tool_call in model_response.choices[0].message.tool_calls: - # _tool_call = {**tool_call.dict(), "index": 0} - # _tool_calls.append(_tool_call) - # delta_obj = litellm.utils.Delta( - # content=getattr(model_response.choices[0].message, "content", None), - # role=model_response.choices[0].message.role, - # tool_calls=_tool_calls, - # ) - # streaming_choice.delta = delta_obj - # streaming_model_response.choices = [streaming_choice] - # completion_stream = ModelResponseIterator( - # model_response=streaming_model_response - # ) - # print_verbose( - # "Returns anthropic CustomStreamWrapper with 'cached_response' streaming object" - # ) - # return CustomStreamWrapper( - # completion_stream=completion_stream, - # model=model, - # custom_llm_provider="cached_response", - # logging_obj=logging_obj, - # ) - # else: - # raise AnthropicError( - # status_code=422, - # message="Unprocessable response object - {}".format(response.text), - # ) - def process_response( self, model: str, diff --git a/litellm/main.py b/litellm/main.py index bb203ae4a9..74a86e1e5d 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -38,6 +38,7 @@ import dotenv import httpx import openai import tiktoken +from pydantic import BaseModel from typing_extensions import overload import litellm @@ -3947,7 +3948,7 @@ def text_completion( ###### Adapter Completion ################ -def adapter_completion(*, adapter_id: str, **kwargs) -> Any: +def adapter_completion(*, adapter_id: str, **kwargs) -> Optional[BaseModel]: translation_obj: Optional[CustomLogger] = None for item in litellm.adapters: if item["id"] == adapter_id: diff --git a/litellm/proxy/auth/user_api_key_auth.py b/litellm/proxy/auth/user_api_key_auth.py index 28d67ad8cf..4b931a2726 100644 --- a/litellm/proxy/auth/user_api_key_auth.py +++ b/litellm/proxy/auth/user_api_key_auth.py @@ -71,6 +71,11 @@ azure_api_key_header = APIKeyHeader( auto_error=False, description="Some older versions of the openai Python package will send an API-Key header with just the API key ", ) +anthropic_api_key_header = APIKeyHeader( + name="x-api-key", + auto_error=False, + description="If anthropic client used.", +) def _get_bearer_token( @@ -87,6 +92,9 @@ async def user_api_key_auth( request: Request, api_key: str = fastapi.Security(api_key_header), azure_api_key_header: str = fastapi.Security(azure_api_key_header), + anthropic_api_key_header: Optional[str] = fastapi.Security( + anthropic_api_key_header + ), ) -> UserAPIKeyAuth: from litellm.proxy.proxy_server import ( @@ -114,6 +122,9 @@ async def user_api_key_auth( elif isinstance(azure_api_key_header, str): api_key = azure_api_key_header + elif isinstance(anthropic_api_key_header, str): + api_key = anthropic_api_key_header + parent_otel_span: Optional[Span] = None if open_telemetry_logger is not None: parent_otel_span = open_telemetry_logger.tracer.start_span( diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 581cd9229d..ea1fa8f756 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -210,6 +210,12 @@ from litellm.router import ( from litellm.router import ModelInfo as RouterModelInfo from litellm.router import updateDeployment from litellm.scheduler import DefaultPriorities, FlowItem, Scheduler +from litellm.types.llms.anthropic import ( + AnthropicMessagesRequest, + AnthropicResponse, + AnthropicResponseContentBlockText, + AnthropicResponseUsageBlock, +) from litellm.types.llms.openai import HttpxBinaryResponseContent from litellm.types.router import RouterGeneralSettings @@ -5030,6 +5036,34 @@ async def moderations( ) +#### ANTHROPIC ENDPOINTS #### + + +@router.post( + "/v1/messages", + tags=["[beta] Anthropic `/v1/messages`"], + dependencies=[Depends(user_api_key_auth)], + response_model=AnthropicResponse, +) +async def anthropic_response(data: AnthropicMessagesRequest): + from litellm import adapter_completion + from litellm.adapters.anthropic_adapter import anthropic_adapter + + litellm.adapters = [{"id": "anthropic", "adapter": anthropic_adapter}] + + response: Optional[BaseModel] = adapter_completion(adapter_id="anthropic", **data) + + if response is None: + raise Exception("Response is None.") + elif not isinstance(response, AnthropicResponse): + raise Exception( + "Invalid model response={}. Not in 'AnthropicResponse' format".format( + response + ) + ) + return response + + #### DEV UTILS #### # @router.get( @@ -7546,7 +7580,7 @@ async def login(request: Request): litellm_dashboard_ui += "/ui/" import jwt - jwt_token = jwt.encode( + jwt_token = jwt.encode( # type: ignore { "user_id": user_id, "key": key, @@ -7610,7 +7644,7 @@ async def login(request: Request): litellm_dashboard_ui += "/ui/" import jwt - jwt_token = jwt.encode( + jwt_token = jwt.encode( # type: ignore { "user_id": user_id, "key": key, @@ -7745,7 +7779,7 @@ async def onboarding(invite_link: str): litellm_dashboard_ui += "/ui/onboarding" import jwt - jwt_token = jwt.encode( + jwt_token = jwt.encode( # type: ignore { "user_id": user_obj.user_id, "key": key, @@ -8162,7 +8196,7 @@ async def auth_callback(request: Request): import jwt - jwt_token = jwt.encode( + jwt_token = jwt.encode( # type: ignore { "user_id": user_id, "key": key, diff --git a/litellm/tests/test_anthropic_completion.py b/litellm/tests/test_anthropic_completion.py index 25d5823c32..4bd33e04c3 100644 --- a/litellm/tests/test_anthropic_completion.py +++ b/litellm/tests/test_anthropic_completion.py @@ -20,16 +20,51 @@ from unittest.mock import MagicMock, patch import pytest import litellm -from litellm import adapter_completion +from litellm import AnthropicConfig, adapter_completion from litellm.adapters.anthropic_adapter import anthropic_adapter +from litellm.types.llms.anthropic import AnthropicResponse -def test_anthropic_completion(): +def test_anthropic_completion_messages_translation(): + messages = [{"role": "user", "content": "Hey, how's it going?"}] + + translated_messages = AnthropicConfig().translate_anthropic_messages_to_openai(messages=messages) # type: ignore + + assert translated_messages == [{"role": "user", "content": "Hey, how's it going?"}] + + +def test_anthropic_completion_input_translation(): + data = { + "model": "gpt-3.5-turbo", + "messages": [{"role": "user", "content": "Hey, how's it going?"}], + } + translated_input = anthropic_adapter.translate_completion_input_params(kwargs=data) + + assert translated_input is not None + + assert translated_input["model"] == "gpt-3.5-turbo" + assert translated_input["messages"] == [ + {"role": "user", "content": "Hey, how's it going?"} + ] + + +def test_anthropic_completion_e2e(): + litellm.set_verbose = True + litellm.adapters = [{"id": "anthropic", "adapter": anthropic_adapter}] messages = [{"role": "user", "content": "Hey, how's it going?"}] response = adapter_completion( - model="gpt-3.5-turbo", messages=messages, adapter_id="anthropic" + model="gpt-3.5-turbo", + messages=messages, + adapter_id="anthropic", + mock_response="This is a fake call", ) - print(response) + print("Response: {}".format(response)) + + assert response is not None + + assert isinstance(response, AnthropicResponse) + + assert False diff --git a/litellm/types/llms/anthropic.py b/litellm/types/llms/anthropic.py index 7df73377d0..00cad587ca 100644 --- a/litellm/types/llms/anthropic.py +++ b/litellm/types/llms/anthropic.py @@ -223,3 +223,51 @@ class MessageStartBlock(TypedDict): type: Literal["message_start"] message: MessageChunk + + +class AnthropicResponseContentBlockText(BaseModel): + type: Literal["text"] + text: str + + +class AnthropicResponseContentBlockToolUse(BaseModel): + type: Literal["tool_use"] + id: str + name: str + input: str + + +class AnthropicResponseUsageBlock(BaseModel): + input_tokens: int + output_tokens: int + + +AnthropicFinishReason = Literal["end_turn", "max_tokens", "stop_sequence", "tool_use"] + + +class AnthropicResponse(BaseModel): + id: str + """Unique object identifier.""" + + type: Literal["message"] + """For Messages, this is always "message".""" + + role: Literal["assistant"] + """Conversational role of the generated message. This will always be "assistant".""" + + content: List[ + Union[AnthropicResponseContentBlockText, AnthropicResponseContentBlockToolUse] + ] + """Content generated by the model.""" + + model: str + """The model that handled the request.""" + + stop_reason: Optional[AnthropicFinishReason] + """The reason that we stopped.""" + + stop_sequence: Optional[str] + """Which custom stop sequence was generated, if any.""" + + usage: AnthropicResponseUsageBlock + """Billing and rate-limit usage.""" diff --git a/litellm/types/utils.py b/litellm/types/utils.py index a90f93484f..5f31798da9 100644 --- a/litellm/types/utils.py +++ b/litellm/types/utils.py @@ -166,7 +166,7 @@ class FunctionCall(OpenAIObject): class Function(OpenAIObject): arguments: str - name: Optional[str] = None + name: str def __init__( self, @@ -280,29 +280,43 @@ class ChatCompletionMessageToolCall(OpenAIObject): setattr(self, key, value) +""" +Reference: +ChatCompletionMessage(content='This is a test', role='assistant', function_call=None, tool_calls=None)) +""" + + class Message(OpenAIObject): + + content: Optional[str] + role: Literal["assistant"] + tool_calls: Optional[List[ChatCompletionMessageToolCall]] + function_call: Optional[FunctionCall] + def __init__( self, - content: Optional[str] = "default", - role="assistant", - logprobs=None, + content: Optional[str] = None, + role: Literal["assistant"] = "assistant", function_call=None, tool_calls=None, **params, ): - super(Message, self).__init__(**params) - self.content = content - self.role = role - if function_call is not None: - self.function_call = FunctionCall(**function_call) - - if tool_calls is not None: - self.tool_calls = [] - for tool_call in tool_calls: - self.tool_calls.append(ChatCompletionMessageToolCall(**tool_call)) - - if logprobs is not None: - self._logprobs = ChoiceLogprobs(**logprobs) + init_values = { + "content": content, + "role": role, + "function_call": ( + FunctionCall(**function_call) if function_call is not None else None + ), + "tool_calls": ( + [ChatCompletionMessageToolCall(**tool_call) for tool_call in tool_calls] + if tool_calls is not None + else None + ), + } + super(Message, self).__init__( + **init_values, + **params, + ) def get(self, key, default=None): # Custom .get() method to access attributes with a default value if the attribute doesn't exist From 48be4ce80576adee08cc3df36a3651c0769b1019 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Wed, 10 Jul 2024 18:53:54 -0700 Subject: [PATCH 4/6] feat(proxy_server.py): working `/v1/messages` with config.yaml Adds async router support for adapter_completion call --- litellm/llms/anthropic.py | 2 +- litellm/main.py | 30 ++++ litellm/proxy/_new_secret_config.yaml | 4 +- litellm/proxy/proxy_server.py | 182 ++++++++++++++++++++- litellm/router.py | 119 ++++++++++++++ litellm/tests/test_anthropic_completion.py | 37 ++++- litellm/types/llms/anthropic.py | 2 +- 7 files changed, 362 insertions(+), 14 deletions(-) diff --git a/litellm/llms/anthropic.py b/litellm/llms/anthropic.py index e4d6338ef7..5fe527c694 100644 --- a/litellm/llms/anthropic.py +++ b/litellm/llms/anthropic.py @@ -423,7 +423,7 @@ class AnthropicConfig: type="tool_use", id=tool_call.id, name=tool_call.function.name, - input=tool_call.function.arguments, + input=json.loads(tool_call.function.arguments), ) ) elif choice.message.content is not None: diff --git a/litellm/main.py b/litellm/main.py index 74a86e1e5d..949466642c 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -3948,6 +3948,36 @@ def text_completion( ###### Adapter Completion ################ +async def aadapter_completion(*, adapter_id: str, **kwargs) -> Optional[BaseModel]: + """ + Implemented to handle async calls for adapter_completion() + """ + try: + translation_obj: Optional[CustomLogger] = None + for item in litellm.adapters: + if item["id"] == adapter_id: + translation_obj = item["adapter"] + + if translation_obj is None: + raise ValueError( + "No matching adapter given. Received 'adapter_id'={}, litellm.adapters={}".format( + adapter_id, litellm.adapters + ) + ) + + new_kwargs = translation_obj.translate_completion_input_params(kwargs=kwargs) + + response: ModelResponse = await acompletion(**new_kwargs) # type: ignore + + translated_response = translation_obj.translate_completion_output_params( + response=response + ) + + return translated_response + except Exception as e: + raise e + + def adapter_completion(*, adapter_id: str, **kwargs) -> Optional[BaseModel]: translation_obj: Optional[CustomLogger] = None for item in litellm.adapters: diff --git a/litellm/proxy/_new_secret_config.yaml b/litellm/proxy/_new_secret_config.yaml index 49db7c3787..0f1f981d7a 100644 --- a/litellm/proxy/_new_secret_config.yaml +++ b/litellm/proxy/_new_secret_config.yaml @@ -2,7 +2,9 @@ model_list: - model_name: "*" litellm_params: model: "openai/*" - + - model_name: claude-3-5-sonnet-20240620 + litellm_params: + model: gpt-3.5-turbo general_settings: diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index ea1fa8f756..4bef33cbc4 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -5045,23 +5045,187 @@ async def moderations( dependencies=[Depends(user_api_key_auth)], response_model=AnthropicResponse, ) -async def anthropic_response(data: AnthropicMessagesRequest): +async def anthropic_response( + anthropic_data: AnthropicMessagesRequest, + fastapi_response: Response, + request: Request, + user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth), +): from litellm import adapter_completion from litellm.adapters.anthropic_adapter import anthropic_adapter litellm.adapters = [{"id": "anthropic", "adapter": anthropic_adapter}] - response: Optional[BaseModel] = adapter_completion(adapter_id="anthropic", **data) + global user_temperature, user_request_timeout, user_max_tokens, user_api_base + data: dict = {**anthropic_data, "adapter_id": "anthropic"} + try: + data["model"] = ( + general_settings.get("completion_model", None) # server default + or user_model # model name passed via cli args + or data["model"] # default passed in http request + ) + if user_model: + data["model"] = user_model - if response is None: - raise Exception("Response is None.") - elif not isinstance(response, AnthropicResponse): - raise Exception( - "Invalid model response={}. Not in 'AnthropicResponse' format".format( - response + data = await add_litellm_data_to_request( + data=data, # type: ignore + request=request, + general_settings=general_settings, + user_api_key_dict=user_api_key_dict, + version=version, + proxy_config=proxy_config, + ) + + # override with user settings, these are params passed via cli + if user_temperature: + data["temperature"] = user_temperature + if user_request_timeout: + data["request_timeout"] = user_request_timeout + if user_max_tokens: + data["max_tokens"] = user_max_tokens + if user_api_base: + data["api_base"] = user_api_base + + ### MODEL ALIAS MAPPING ### + # check if model name in model alias map + # get the actual model name + if data["model"] in litellm.model_alias_map: + data["model"] = litellm.model_alias_map[data["model"]] + + ### CALL HOOKS ### - modify incoming data before calling the model + data = await proxy_logging_obj.pre_call_hook( # type: ignore + user_api_key_dict=user_api_key_dict, data=data, call_type="text_completion" + ) + + ### ROUTE THE REQUESTs ### + router_model_names = llm_router.model_names if llm_router is not None else [] + # skip router if user passed their key + if "api_key" in data: + llm_response = asyncio.create_task(litellm.aadapter_completion(**data)) + elif ( + llm_router is not None and data["model"] in router_model_names + ): # model in router model list + llm_response = asyncio.create_task(llm_router.aadapter_completion(**data)) + elif ( + llm_router is not None + and llm_router.model_group_alias is not None + and data["model"] in llm_router.model_group_alias + ): # model set in model_group_alias + llm_response = asyncio.create_task(llm_router.aadapter_completion(**data)) + elif ( + llm_router is not None and data["model"] in llm_router.deployment_names + ): # model in router deployments, calling a specific deployment on the router + llm_response = asyncio.create_task( + llm_router.aadapter_completion(**data, specific_deployment=True) + ) + elif ( + llm_router is not None and data["model"] in llm_router.get_model_ids() + ): # model in router model list + llm_response = asyncio.create_task(llm_router.aadapter_completion(**data)) + elif ( + llm_router is not None + and data["model"] not in router_model_names + and llm_router.default_deployment is not None + ): # model in router deployments, calling a specific deployment on the router + llm_response = asyncio.create_task(llm_router.aadapter_completion(**data)) + elif user_model is not None: # `litellm --model ` + llm_response = asyncio.create_task(litellm.aadapter_completion(**data)) + else: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail={ + "error": "completion: Invalid model name passed in model=" + + data.get("model", "") + }, + ) + + # Await the llm_response task + response = await llm_response + + hidden_params = getattr(response, "_hidden_params", {}) or {} + model_id = hidden_params.get("model_id", None) or "" + cache_key = hidden_params.get("cache_key", None) or "" + api_base = hidden_params.get("api_base", None) or "" + response_cost = hidden_params.get("response_cost", None) or "" + + ### ALERTING ### + asyncio.create_task( + proxy_logging_obj.update_request_status( + litellm_call_id=data.get("litellm_call_id", ""), status="success" ) ) - return response + + verbose_proxy_logger.debug("final response: %s", response) + + fastapi_response.headers.update( + get_custom_headers( + user_api_key_dict=user_api_key_dict, + model_id=model_id, + cache_key=cache_key, + api_base=api_base, + version=version, + response_cost=response_cost, + ) + ) + + verbose_proxy_logger.info("\nResponse from Litellm:\n{}".format(response)) + return response + except RejectedRequestError as e: + _data = e.request_data + await proxy_logging_obj.post_call_failure_hook( + user_api_key_dict=user_api_key_dict, + original_exception=e, + request_data=_data, + ) + if _data.get("stream", None) is not None and _data["stream"] == True: + _chat_response = litellm.ModelResponse() + _usage = litellm.Usage( + prompt_tokens=0, + completion_tokens=0, + total_tokens=0, + ) + _chat_response.usage = _usage # type: ignore + _chat_response.choices[0].message.content = e.message # type: ignore + _iterator = litellm.utils.ModelResponseIterator( + model_response=_chat_response, convert_to_delta=True + ) + _streaming_response = litellm.TextCompletionStreamWrapper( + completion_stream=_iterator, + model=_data.get("model", ""), + ) + + selected_data_generator = select_data_generator( + response=_streaming_response, + user_api_key_dict=user_api_key_dict, + request_data=data, + ) + + return StreamingResponse( + selected_data_generator, + media_type="text/event-stream", + headers={}, + ) + else: + _response = litellm.TextCompletionResponse() + _response.choices[0].text = e.message + return _response + except Exception as e: + await proxy_logging_obj.post_call_failure_hook( + user_api_key_dict=user_api_key_dict, original_exception=e, request_data=data + ) + verbose_proxy_logger.error( + "litellm.proxy.proxy_server.completion(): Exception occured - {}".format( + str(e) + ) + ) + verbose_proxy_logger.debug(traceback.format_exc()) + error_msg = f"{str(e)}" + raise ProxyException( + message=getattr(e, "message", error_msg), + type=getattr(e, "type", "None"), + param=getattr(e, "param", "None"), + code=getattr(e, "status_code", 500), + ) #### DEV UTILS #### diff --git a/litellm/router.py b/litellm/router.py index 944ae93779..762f8c354c 100644 --- a/litellm/router.py +++ b/litellm/router.py @@ -1764,6 +1764,125 @@ class Router: self.fail_calls[model] += 1 raise e + async def aadapter_completion( + self, + adapter_id: str, + model: str, + is_retry: Optional[bool] = False, + is_fallback: Optional[bool] = False, + is_async: Optional[bool] = False, + **kwargs, + ): + try: + kwargs["model"] = model + kwargs["adapter_id"] = adapter_id + kwargs["original_function"] = self._aadapter_completion + kwargs["num_retries"] = kwargs.get("num_retries", self.num_retries) + timeout = kwargs.get("request_timeout", self.timeout) + kwargs.setdefault("metadata", {}).update({"model_group": model}) + response = await self.async_function_with_fallbacks(**kwargs) + + return response + except Exception as e: + asyncio.create_task( + send_llm_exception_alert( + litellm_router_instance=self, + request_kwargs=kwargs, + error_traceback_str=traceback.format_exc(), + original_exception=e, + ) + ) + raise e + + async def _aadapter_completion(self, adapter_id: str, model: str, **kwargs): + try: + verbose_router_logger.debug( + f"Inside _aadapter_completion()- model: {model}; kwargs: {kwargs}" + ) + deployment = await self.async_get_available_deployment( + model=model, + messages=[{"role": "user", "content": "default text"}], + specific_deployment=kwargs.pop("specific_deployment", None), + ) + kwargs.setdefault("metadata", {}).update( + { + "deployment": deployment["litellm_params"]["model"], + "model_info": deployment.get("model_info", {}), + "api_base": deployment.get("litellm_params", {}).get("api_base"), + } + ) + kwargs["model_info"] = deployment.get("model_info", {}) + data = deployment["litellm_params"].copy() + model_name = data["model"] + for k, v in self.default_litellm_params.items(): + if ( + k not in kwargs + ): # prioritize model-specific params > default router params + kwargs[k] = v + elif k == "metadata": + kwargs[k].update(v) + + potential_model_client = self._get_client( + deployment=deployment, kwargs=kwargs, client_type="async" + ) + # check if provided keys == client keys # + dynamic_api_key = kwargs.get("api_key", None) + if ( + dynamic_api_key is not None + and potential_model_client is not None + and dynamic_api_key != potential_model_client.api_key + ): + model_client = None + else: + model_client = potential_model_client + self.total_calls[model_name] += 1 + + response = litellm.aadapter_completion( + **{ + **data, + "adapter_id": adapter_id, + "caching": self.cache_responses, + "client": model_client, + "timeout": self.timeout, + **kwargs, + } + ) + + rpm_semaphore = self._get_client( + deployment=deployment, + kwargs=kwargs, + client_type="max_parallel_requests", + ) + + if rpm_semaphore is not None and isinstance( + rpm_semaphore, asyncio.Semaphore + ): + async with rpm_semaphore: + """ + - Check rpm limits before making the call + - If allowed, increment the rpm limit (allows global value to be updated, concurrency-safe) + """ + await self.async_routing_strategy_pre_call_checks( + deployment=deployment + ) + response = await response # type: ignore + else: + await self.async_routing_strategy_pre_call_checks(deployment=deployment) + response = await response # type: ignore + + self.success_calls[model_name] += 1 + verbose_router_logger.info( + f"litellm.aadapter_completion(model={model_name})\033[32m 200 OK\033[0m" + ) + return response + except Exception as e: + verbose_router_logger.info( + f"litellm.aadapter_completion(model={model})\033[31m Exception {str(e)}\033[0m" + ) + if model is not None: + self.fail_calls[model] += 1 + raise e + def embedding( self, model: str, diff --git a/litellm/tests/test_anthropic_completion.py b/litellm/tests/test_anthropic_completion.py index 4bd33e04c3..cac0945d8d 100644 --- a/litellm/tests/test_anthropic_completion.py +++ b/litellm/tests/test_anthropic_completion.py @@ -20,7 +20,7 @@ from unittest.mock import MagicMock, patch import pytest import litellm -from litellm import AnthropicConfig, adapter_completion +from litellm import AnthropicConfig, Router, adapter_completion from litellm.adapters.anthropic_adapter import anthropic_adapter from litellm.types.llms.anthropic import AnthropicResponse @@ -67,4 +67,37 @@ def test_anthropic_completion_e2e(): assert isinstance(response, AnthropicResponse) - assert False + +@pytest.mark.asyncio +async def test_anthropic_router_completion_e2e(): + litellm.set_verbose = True + + litellm.adapters = [{"id": "anthropic", "adapter": anthropic_adapter}] + + router = Router( + model_list=[ + { + "model_name": "claude-3-5-sonnet-20240620", + "litellm_params": { + "model": "gpt-3.5-turbo", + "mock_response": "hi this is macintosh.", + }, + } + ] + ) + messages = [{"role": "user", "content": "Hey, how's it going?"}] + + response = await router.aadapter_completion( + model="claude-3-5-sonnet-20240620", + messages=messages, + adapter_id="anthropic", + mock_response="This is a fake call", + ) + + print("Response: {}".format(response)) + + assert response is not None + + assert isinstance(response, AnthropicResponse) + + assert response.model == "gpt-3.5-turbo" diff --git a/litellm/types/llms/anthropic.py b/litellm/types/llms/anthropic.py index 00cad587ca..33f413eced 100644 --- a/litellm/types/llms/anthropic.py +++ b/litellm/types/llms/anthropic.py @@ -234,7 +234,7 @@ class AnthropicResponseContentBlockToolUse(BaseModel): type: Literal["tool_use"] id: str name: str - input: str + input: dict class AnthropicResponseUsageBlock(BaseModel): From af1064941a4801f3e80a610229ae459b2968f7b0 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Wed, 10 Jul 2024 21:56:47 -0700 Subject: [PATCH 5/6] fix(types/utils.py): fix streaming function name --- litellm/types/utils.py | 4 +++- litellm/utils.py | 14 +++++++------- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/litellm/types/utils.py b/litellm/types/utils.py index 5f31798da9..4ae88a7450 100644 --- a/litellm/types/utils.py +++ b/litellm/types/utils.py @@ -166,7 +166,9 @@ class FunctionCall(OpenAIObject): class Function(OpenAIObject): arguments: str - name: str + name: Optional[ + str + ] # can be None - openai e.g.: ChoiceDeltaToolCallFunction(arguments='{"', name=None), type=None) def __init__( self, diff --git a/litellm/utils.py b/litellm/utils.py index cf2c679a84..39ddc02ac9 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -8126,7 +8126,7 @@ class CustomStreamWrapper: if chunk.startswith(self.complete_response): # Remove last_sent_chunk only if it appears at the start of the new chunk - chunk = chunk[len(self.complete_response):] + chunk = chunk[len(self.complete_response) :] self.complete_response += chunk return chunk @@ -9483,8 +9483,8 @@ class CustomStreamWrapper: model_response.choices[0].delta = Delta(**_json_delta) except Exception as e: verbose_logger.error( - "litellm.CustomStreamWrapper.chunk_creator(): Exception occured - {}".format( - str(e) + "litellm.CustomStreamWrapper.chunk_creator(): Exception occured - {}\n{}".format( + str(e), traceback.format_exc() ) ) verbose_logger.debug(traceback.format_exc()) @@ -10124,7 +10124,7 @@ def mock_completion_streaming_obj( model_response, mock_response, model, n: Optional[int] = None ): for i in range(0, len(mock_response), 3): - completion_obj = Delta(role="assistant", content=mock_response[i: i + 3]) + completion_obj = Delta(role="assistant", content=mock_response[i : i + 3]) if n is None: model_response.choices[0].delta = completion_obj else: @@ -10133,7 +10133,7 @@ def mock_completion_streaming_obj( _streaming_choice = litellm.utils.StreamingChoices( index=j, delta=litellm.utils.Delta( - role="assistant", content=mock_response[i: i + 3] + role="assistant", content=mock_response[i : i + 3] ), ) _all_choices.append(_streaming_choice) @@ -10145,7 +10145,7 @@ async def async_mock_completion_streaming_obj( model_response, mock_response, model, n: Optional[int] = None ): for i in range(0, len(mock_response), 3): - completion_obj = Delta(role="assistant", content=mock_response[i: i + 3]) + completion_obj = Delta(role="assistant", content=mock_response[i : i + 3]) if n is None: model_response.choices[0].delta = completion_obj else: @@ -10154,7 +10154,7 @@ async def async_mock_completion_streaming_obj( _streaming_choice = litellm.utils.StreamingChoices( index=j, delta=litellm.utils.Delta( - role="assistant", content=mock_response[i: i + 3] + role="assistant", content=mock_response[i : i + 3] ), ) _all_choices.append(_streaming_choice) From c46e3ce590b8afcd3f64df6da5d5db0dc396e74b Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Wed, 10 Jul 2024 22:14:23 -0700 Subject: [PATCH 6/6] fix: fix linting error --- litellm/llms/anthropic.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/litellm/llms/anthropic.py b/litellm/llms/anthropic.py index 5fe527c694..ca93a85b71 100644 --- a/litellm/llms/anthropic.py +++ b/litellm/llms/anthropic.py @@ -422,7 +422,7 @@ class AnthropicConfig: AnthropicResponseContentBlockToolUse( type="tool_use", id=tool_call.id, - name=tool_call.function.name, + name=tool_call.function.name or "", input=json.loads(tool_call.function.arguments), ) )