diff --git a/docs/my-website/docs/routing.md b/docs/my-website/docs/routing.md index e65dc4dce..fb16c4f08 100644 --- a/docs/my-website/docs/routing.md +++ b/docs/my-website/docs/routing.md @@ -442,6 +442,8 @@ If a call fails after num_retries, fall back to another model group. If the error is a context window exceeded error, fall back to a larger model group (if given). +Fallbacks are done in-order - ["gpt-3.5-turbo, "gpt-4", "gpt-4-32k"], will do 'gpt-3.5-turbo' first, then 'gpt-4', etc. + ```python from litellm import Router diff --git a/docs/my-website/docs/tutorials/instructor.md b/docs/my-website/docs/tutorials/instructor.md new file mode 100644 index 000000000..906fdb4c6 --- /dev/null +++ b/docs/my-website/docs/tutorials/instructor.md @@ -0,0 +1,95 @@ +# Instructor - Function Calling + +Use LiteLLM Router with [jxnl's instructor library](https://github.com/jxnl/instructor) for function calling in prod. + +## Usage + +```python +import litellm +from litellm import Router +import instructor +from pydantic import BaseModel + +litellm.set_verbose = True # 👈 print DEBUG LOGS + +client = instructor.patch( + Router( + model_list=[ + { + "model_name": "gpt-3.5-turbo", openai model name + "litellm_params": { params for litellm completion/embedding call + "model": "azure/chatgpt-v-2", + "api_key": os.getenv("AZURE_API_KEY"), + "api_version": os.getenv("AZURE_API_VERSION"), + "api_base": os.getenv("AZURE_API_BASE"), + }, + } + ] + ) +) + + +class UserDetail(BaseModel): + name: str + age: int + + +user = client.chat.completions.create( + model="gpt-3.5-turbo", + response_model=UserDetail, + messages=[ + {"role": "user", "content": "Extract Jason is 25 years old"}, + ], +) + +assert isinstance(user, UserDetail) +assert user.name == "Jason" +assert user.age == 25 + +print(f"user: {user}") +``` + +## Async Calls + +```python +import litellm +from litellm import Router +import instructor, asyncio +from pydantic import BaseModel + +aclient = instructor.apatch( + Router( + model_list=[ + { + "model_name": "gpt-3.5-turbo", + "litellm_params": { + "model": "azure/chatgpt-v-2", + "api_key": os.getenv("AZURE_API_KEY"), + "api_version": os.getenv("AZURE_API_VERSION"), + "api_base": os.getenv("AZURE_API_BASE"), + }, + } + ], + default_litellm_params={"acompletion": True}, # 👈 IMPORTANT - tells litellm to route to async completion function. + ) +) + + +class UserExtract(BaseModel): + name: str + age: int + + +async def main(): + model = await aclient.chat.completions.create( + model="gpt-3.5-turbo", + response_model=UserExtract, + messages=[ + {"role": "user", "content": "Extract jason is 25 years old"}, + ], + ) + print(f"model: {model}") + + +asyncio.run(main()) +``` \ No newline at end of file diff --git a/docs/my-website/sidebars.js b/docs/my-website/sidebars.js index 6d871b490..b11ed9d29 100644 --- a/docs/my-website/sidebars.js +++ b/docs/my-website/sidebars.js @@ -188,6 +188,7 @@ const sidebars = { label: 'Tutorials', items: [ 'tutorials/azure_openai', + 'tutorials/instructor', 'tutorials/oobabooga', "tutorials/gradio_integration", 'tutorials/huggingface_codellama', diff --git a/litellm/main.py b/litellm/main.py index 817dc5510..3e875815e 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -116,24 +116,54 @@ class LiteLLM: default_headers: Optional[Mapping[str, str]] = None, ): self.params = locals() - self.chat = Chat(self.params) + self.chat = Chat(self.params, router_obj=None) class Chat: - def __init__(self, params): + def __init__(self, params, router_obj: Optional[Any]): self.params = params - self.completions = Completions(self.params) + if self.params.get("acompletion", False) == True: + self.params.pop("acompletion") + self.completions: Union[AsyncCompletions, Completions] = AsyncCompletions( + self.params, router_obj=router_obj + ) + else: + self.completions = Completions(self.params, router_obj=router_obj) class Completions: - def __init__(self, params): + def __init__(self, params, router_obj: Optional[Any]): self.params = params + self.router_obj = router_obj def create(self, messages, model=None, **kwargs): for k, v in kwargs.items(): self.params[k] = v model = model or self.params.get("model") - response = completion(model=model, messages=messages, **self.params) + if self.router_obj is not None: + response = self.router_obj.completion( + model=model, messages=messages, **self.params + ) + else: + response = completion(model=model, messages=messages, **self.params) + return response + + +class AsyncCompletions: + def __init__(self, params, router_obj: Optional[Any]): + self.params = params + self.router_obj = router_obj + + async def create(self, messages, model=None, **kwargs): + for k, v in kwargs.items(): + self.params[k] = v + model = model or self.params.get("model") + if self.router_obj is not None: + response = await self.router_obj.acompletion( + model=model, messages=messages, **self.params + ) + else: + response = await acompletion(model=model, messages=messages, **self.params) return response diff --git a/litellm/router.py b/litellm/router.py index b39b67a09..7bcaf7faf 100644 --- a/litellm/router.py +++ b/litellm/router.py @@ -230,7 +230,7 @@ class Router: ) # dict to store aliases for router, ex. {"gpt-4": "gpt-3.5-turbo"}, all requests with gpt-4 -> get routed to gpt-3.5-turbo group # make Router.chat.completions.create compatible for openai.chat.completions.create - self.chat = litellm.Chat(params=default_litellm_params) + self.chat = litellm.Chat(params=default_litellm_params, router_obj=self) # default litellm args self.default_litellm_params = default_litellm_params diff --git a/litellm/tests/test_class.py b/litellm/tests/test_class.py index 3520d870d..7f1fc9065 100644 --- a/litellm/tests/test_class.py +++ b/litellm/tests/test_class.py @@ -4,6 +4,7 @@ # import sys, os # import traceback # import pytest + # sys.path.insert( # 0, os.path.abspath("../..") # ) # Adds the parent directory to the system path @@ -16,51 +17,68 @@ # from pydantic import BaseModel # # This enables response_model keyword -# # # from client.chat.completions.create -# # client = instructor.patch(Router(model_list=[{ -# # "model_name": "gpt-3.5-turbo", # openai model name -# # "litellm_params": { # params for litellm completion/embedding call -# # "model": "azure/chatgpt-v-2", -# # "api_key": os.getenv("AZURE_API_KEY"), -# # "api_version": os.getenv("AZURE_API_VERSION"), -# # "api_base": os.getenv("AZURE_API_BASE") -# # } -# # }])) +# # from client.chat.completions.create +# client = instructor.patch( +# Router( +# model_list=[ +# { +# "model_name": "gpt-3.5-turbo", # openai model name +# "litellm_params": { # params for litellm completion/embedding call +# "model": "azure/chatgpt-v-2", +# "api_key": os.getenv("AZURE_API_KEY"), +# "api_version": os.getenv("AZURE_API_VERSION"), +# "api_base": os.getenv("AZURE_API_BASE"), +# }, +# } +# ] +# ) +# ) -# # class UserDetail(BaseModel): -# # name: str -# # age: int -# # user = client.chat.completions.create( -# # model="gpt-3.5-turbo", -# # response_model=UserDetail, -# # messages=[ -# # {"role": "user", "content": "Extract Jason is 25 years old"}, -# # ] -# # ) -# # assert isinstance(model, UserExtract) +# class UserDetail(BaseModel): +# name: str +# age: int -# # assert isinstance(user, UserDetail) -# # assert user.name == "Jason" -# # assert user.age == 25 -# # print(f"user: {user}") -# import instructor -# from openai import AsyncOpenAI +# user = client.chat.completions.create( +# model="gpt-3.5-turbo", +# response_model=UserDetail, +# messages=[ +# {"role": "user", "content": "Extract Jason is 25 years old"}, +# ], +# ) + +# assert isinstance(user, UserDetail) +# assert user.name == "Jason" +# assert user.age == 25 + +# print(f"user: {user}") +# # import instructor +# # from openai import AsyncOpenAI + +# aclient = instructor.apatch( +# Router( +# model_list=[ +# { +# "model_name": "gpt-3.5-turbo", # openai model name +# "litellm_params": { # params for litellm completion/embedding call +# "model": "azure/chatgpt-v-2", +# "api_key": os.getenv("AZURE_API_KEY"), +# "api_version": os.getenv("AZURE_API_VERSION"), +# "api_base": os.getenv("AZURE_API_BASE"), +# }, +# } +# ], +# default_litellm_params={"acompletion": True}, +# ) +# ) -# aclient = instructor.apatch(Router(model_list=[{ -# "model_name": "gpt-3.5-turbo", # openai model name -# "litellm_params": { # params for litellm completion/embedding call -# "model": "azure/chatgpt-v-2", -# "api_key": os.getenv("AZURE_API_KEY"), -# "api_version": os.getenv("AZURE_API_VERSION"), -# "api_base": os.getenv("AZURE_API_BASE") -# } -# }], default_litellm_params={"acompletion": True})) # class UserExtract(BaseModel): # name: str # age: int + + # async def main(): # model = await aclient.chat.completions.create( # model="gpt-3.5-turbo", @@ -71,4 +89,5 @@ # ) # print(f"model: {model}") + # asyncio.run(main()) diff --git a/litellm/utils.py b/litellm/utils.py index 2179a34c7..c6bf9bcee 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -1465,6 +1465,7 @@ class Logging: self.langfuse_public_key != langFuseLogger.public_key and self.langfuse_secret != langFuseLogger.secret_key ): + print_verbose("Instantiates langfuse client") langFuseLogger = LangFuseLogger( langfuse_public_key=self.langfuse_public_key, langfuse_secret=self.langfuse_secret, diff --git a/poetry.lock b/poetry.lock index 95f6c5d0e..817a7e968 100644 --- a/poetry.lock +++ b/poetry.lock @@ -553,47 +553,56 @@ files = [ [[package]] name = "cryptography" -version = "41.0.3" +version = "42.0.5" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." optional = true python-versions = ">=3.7" files = [ - {file = "cryptography-41.0.3-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:652627a055cb52a84f8c448185922241dd5217443ca194d5739b44612c5e6507"}, - {file = "cryptography-41.0.3-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:8f09daa483aedea50d249ef98ed500569841d6498aa9c9f4b0531b9964658922"}, - {file = "cryptography-41.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4fd871184321100fb400d759ad0cddddf284c4b696568204d281c902fc7b0d81"}, - {file = "cryptography-41.0.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84537453d57f55a50a5b6835622ee405816999a7113267739a1b4581f83535bd"}, - {file = "cryptography-41.0.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:3fb248989b6363906827284cd20cca63bb1a757e0a2864d4c1682a985e3dca47"}, - {file = "cryptography-41.0.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:42cb413e01a5d36da9929baa9d70ca90d90b969269e5a12d39c1e0d475010116"}, - {file = "cryptography-41.0.3-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:aeb57c421b34af8f9fe830e1955bf493a86a7996cc1338fe41b30047d16e962c"}, - {file = "cryptography-41.0.3-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:6af1c6387c531cd364b72c28daa29232162010d952ceb7e5ca8e2827526aceae"}, - {file = "cryptography-41.0.3-cp37-abi3-win32.whl", hash = "sha256:0d09fb5356f975974dbcb595ad2d178305e5050656affb7890a1583f5e02a306"}, - {file = "cryptography-41.0.3-cp37-abi3-win_amd64.whl", hash = "sha256:a983e441a00a9d57a4d7c91b3116a37ae602907a7618b882c8013b5762e80574"}, - {file = "cryptography-41.0.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5259cb659aa43005eb55a0e4ff2c825ca111a0da1814202c64d28a985d33b087"}, - {file = "cryptography-41.0.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:67e120e9a577c64fe1f611e53b30b3e69744e5910ff3b6e97e935aeb96005858"}, - {file = "cryptography-41.0.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:7efe8041897fe7a50863e51b77789b657a133c75c3b094e51b5e4b5cec7bf906"}, - {file = "cryptography-41.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ce785cf81a7bdade534297ef9e490ddff800d956625020ab2ec2780a556c313e"}, - {file = "cryptography-41.0.3-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:57a51b89f954f216a81c9d057bf1a24e2f36e764a1ca9a501a6964eb4a6800dd"}, - {file = "cryptography-41.0.3-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:4c2f0d35703d61002a2bbdcf15548ebb701cfdd83cdc12471d2bae80878a4207"}, - {file = "cryptography-41.0.3-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:23c2d778cf829f7d0ae180600b17e9fceea3c2ef8b31a99e3c694cbbf3a24b84"}, - {file = "cryptography-41.0.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:95dd7f261bb76948b52a5330ba5202b91a26fbac13ad0e9fc8a3ac04752058c7"}, - {file = "cryptography-41.0.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:41d7aa7cdfded09b3d73a47f429c298e80796c8e825ddfadc84c8a7f12df212d"}, - {file = "cryptography-41.0.3-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:d0d651aa754ef58d75cec6edfbd21259d93810b73f6ec246436a21b7841908de"}, - {file = "cryptography-41.0.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:ab8de0d091acbf778f74286f4989cf3d1528336af1b59f3e5d2ebca8b5fe49e1"}, - {file = "cryptography-41.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a74fbcdb2a0d46fe00504f571a2a540532f4c188e6ccf26f1f178480117b33c4"}, - {file = "cryptography-41.0.3.tar.gz", hash = "sha256:6d192741113ef5e30d89dcb5b956ef4e1578f304708701b8b73d38e3e1461f34"}, + {file = "cryptography-42.0.5-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:a30596bae9403a342c978fb47d9b0ee277699fa53bbafad14706af51fe543d16"}, + {file = "cryptography-42.0.5-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:b7ffe927ee6531c78f81aa17e684e2ff617daeba7f189f911065b2ea2d526dec"}, + {file = "cryptography-42.0.5-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2424ff4c4ac7f6b8177b53c17ed5d8fa74ae5955656867f5a8affaca36a27abb"}, + {file = "cryptography-42.0.5-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:329906dcc7b20ff3cad13c069a78124ed8247adcac44b10bea1130e36caae0b4"}, + {file = "cryptography-42.0.5-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:b03c2ae5d2f0fc05f9a2c0c997e1bc18c8229f392234e8a0194f202169ccd278"}, + {file = "cryptography-42.0.5-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f8837fe1d6ac4a8052a9a8ddab256bc006242696f03368a4009be7ee3075cdb7"}, + {file = "cryptography-42.0.5-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:0270572b8bd2c833c3981724b8ee9747b3ec96f699a9665470018594301439ee"}, + {file = "cryptography-42.0.5-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:b8cac287fafc4ad485b8a9b67d0ee80c66bf3574f655d3b97ef2e1082360faf1"}, + {file = "cryptography-42.0.5-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:16a48c23a62a2f4a285699dba2e4ff2d1cff3115b9df052cdd976a18856d8e3d"}, + {file = "cryptography-42.0.5-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:2bce03af1ce5a5567ab89bd90d11e7bbdff56b8af3acbbec1faded8f44cb06da"}, + {file = "cryptography-42.0.5-cp37-abi3-win32.whl", hash = "sha256:b6cd2203306b63e41acdf39aa93b86fb566049aeb6dc489b70e34bcd07adca74"}, + {file = "cryptography-42.0.5-cp37-abi3-win_amd64.whl", hash = "sha256:98d8dc6d012b82287f2c3d26ce1d2dd130ec200c8679b6213b3c73c08b2b7940"}, + {file = "cryptography-42.0.5-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:5e6275c09d2badf57aea3afa80d975444f4be8d3bc58f7f80d2a484c6f9485c8"}, + {file = "cryptography-42.0.5-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4985a790f921508f36f81831817cbc03b102d643b5fcb81cd33df3fa291a1a1"}, + {file = "cryptography-42.0.5-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7cde5f38e614f55e28d831754e8a3bacf9ace5d1566235e39d91b35502d6936e"}, + {file = "cryptography-42.0.5-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:7367d7b2eca6513681127ebad53b2582911d1736dc2ffc19f2c3ae49997496bc"}, + {file = "cryptography-42.0.5-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:cd2030f6650c089aeb304cf093f3244d34745ce0cfcc39f20c6fbfe030102e2a"}, + {file = "cryptography-42.0.5-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:a2913c5375154b6ef2e91c10b5720ea6e21007412f6437504ffea2109b5a33d7"}, + {file = "cryptography-42.0.5-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:c41fb5e6a5fe9ebcd58ca3abfeb51dffb5d83d6775405305bfa8715b76521922"}, + {file = "cryptography-42.0.5-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:3eaafe47ec0d0ffcc9349e1708be2aaea4c6dd4978d76bf6eb0cb2c13636c6fc"}, + {file = "cryptography-42.0.5-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:1b95b98b0d2af784078fa69f637135e3c317091b615cd0905f8b8a087e86fa30"}, + {file = "cryptography-42.0.5-cp39-abi3-win32.whl", hash = "sha256:1f71c10d1e88467126f0efd484bd44bca5e14c664ec2ede64c32f20875c0d413"}, + {file = "cryptography-42.0.5-cp39-abi3-win_amd64.whl", hash = "sha256:a011a644f6d7d03736214d38832e030d8268bcff4a41f728e6030325fea3e400"}, + {file = "cryptography-42.0.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:9481ffe3cf013b71b2428b905c4f7a9a4f76ec03065b05ff499bb5682a8d9ad8"}, + {file = "cryptography-42.0.5-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:ba334e6e4b1d92442b75ddacc615c5476d4ad55cc29b15d590cc6b86efa487e2"}, + {file = "cryptography-42.0.5-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:ba3e4a42397c25b7ff88cdec6e2a16c2be18720f317506ee25210f6d31925f9c"}, + {file = "cryptography-42.0.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:111a0d8553afcf8eb02a4fea6ca4f59d48ddb34497aa8706a6cf536f1a5ec576"}, + {file = "cryptography-42.0.5-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:cd65d75953847815962c84a4654a84850b2bb4aed3f26fadcc1c13892e1e29f6"}, + {file = "cryptography-42.0.5-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:e807b3188f9eb0eaa7bbb579b462c5ace579f1cedb28107ce8b48a9f7ad3679e"}, + {file = "cryptography-42.0.5-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f12764b8fffc7a123f641d7d049d382b73f96a34117e0b637b80643169cec8ac"}, + {file = "cryptography-42.0.5-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:37dd623507659e08be98eec89323469e8c7b4c1407c85112634ae3dbdb926fdd"}, + {file = "cryptography-42.0.5.tar.gz", hash = "sha256:6fe07eec95dfd477eb9530aef5bead34fec819b3aaf6c5bd6d20565da607bfe1"}, ] [package.dependencies] -cffi = ">=1.12" +cffi = {version = ">=1.12", markers = "platform_python_implementation != \"PyPy\""} [package.extras] docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] -docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"] +docstest = ["pyenchant (>=1.6.11)", "readme-renderer", "sphinxcontrib-spelling (>=4.0.1)"] nox = ["nox"] -pep8test = ["black", "check-sdist", "mypy", "ruff"] +pep8test = ["check-sdist", "click", "mypy", "ruff"] sdist = ["build"] ssh = ["bcrypt (>=3.1.5)"] -test = ["pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] +test = ["certifi", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] test-randomorder = ["pytest-randomly"] [[package]] @@ -1931,17 +1940,17 @@ cli = ["click (>=5.0)"] [[package]] name = "python-multipart" -version = "0.0.6" +version = "0.0.9" description = "A streaming multipart parser for Python" optional = true -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "python_multipart-0.0.6-py3-none-any.whl", hash = "sha256:ee698bab5ef148b0a760751c261902cd096e57e10558e11aca17646b74ee1c18"}, - {file = "python_multipart-0.0.6.tar.gz", hash = "sha256:e9925a80bb668529f1b67c7fdb0a5dacdd7cbfc6fb0bff3ea443fe22bdd62132"}, + {file = "python_multipart-0.0.9-py3-none-any.whl", hash = "sha256:97ca7b8ea7b05f977dc3849c3ba99d51689822fab725c3703af7c866a0c2b215"}, + {file = "python_multipart-0.0.9.tar.gz", hash = "sha256:03f54688c663f1b7977105f021043b0793151e4cb1c1a9d4a11fc13d622c4026"}, ] [package.extras] -dev = ["atomicwrites (==1.2.1)", "attrs (==19.2.0)", "coverage (==6.5.0)", "hatch", "invoke (==1.7.3)", "more-itertools (==4.3.0)", "pbr (==4.3.0)", "pluggy (==1.0.0)", "py (==1.11.0)", "pytest (==7.2.0)", "pytest-cov (==4.0.0)", "pytest-timeout (==2.1.0)", "pyyaml (==5.1)"] +dev = ["atomicwrites (==1.4.1)", "attrs (==23.2.0)", "coverage (==7.4.1)", "hatch", "invoke (==2.2.0)", "more-itertools (==10.2.0)", "pbr (==6.0.0)", "pluggy (==1.4.0)", "py (==1.11.0)", "pytest (==8.0.0)", "pytest-cov (==4.1.0)", "pytest-timeout (==2.2.0)", "pyyaml (==6.0.1)", "ruff (==0.2.1)"] [[package]] name = "pytz" @@ -2698,4 +2707,4 @@ proxy = ["PyJWT", "apscheduler", "backoff", "cryptography", "fastapi", "fastapi- [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0, !=3.9.7" -content-hash = "ac9ee16ff19c4126f36d7327447566c4681bc08b814f897e9ac3bb98f374071e" +content-hash = "d511820d41457e61cbad0bbda2f3c1210a49b2b440e794942d8e2c2c7895d3c0" diff --git a/pyproject.toml b/pyproject.toml index 66abc262e..c2167969b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "litellm" -version = "1.34.0" +version = "1.34.1" description = "Library to easily interface with LLM API providers" authors = ["BerriAI"] license = "MIT" @@ -33,8 +33,8 @@ orjson = {version = "^3.9.7", optional = true} apscheduler = {version = "^3.10.4", optional = true} fastapi-sso = { version = "^0.10.0", optional = true } PyJWT = { version = "^2.8.0", optional = true } -python-multipart = { version = "^0.0.6", optional = true } -cryptography = { version = "41.0.3", optional = true } +python-multipart = { version = "^0.0.9", optional = true} +cryptography = {version = "^42.0.5", optional = true} prisma = {version = "0.11.0", optional = true} azure-identity = {version = "^1.15.0", optional = true} azure-keyvault-secrets = {version = "^4.8.0", optional = true} @@ -80,7 +80,7 @@ requires = ["poetry-core", "wheel"] build-backend = "poetry.core.masonry.api" [tool.commitizen] -version = "1.34.0" +version = "1.34.1" version_files = [ "pyproject.toml:^version" ] diff --git a/requirements.txt b/requirements.txt index 10727e77a..cd63c1242 100644 --- a/requirements.txt +++ b/requirements.txt @@ -23,7 +23,7 @@ orjson==3.9.15 # fast /embedding responses apscheduler==3.10.4 # for resetting budget in background fastapi-sso==0.10.0 # admin UI, SSO pyjwt[crypto]==2.8.0 -python-multipart==0.0.6 # admin UI +python-multipart==0.0.9 # admin UI ### LITELLM PACKAGE DEPENDENCIES python-dotenv>=0.2.0 # for env tiktoken>=0.4.0 # for calculating usage