fix hf routing bug

This commit is contained in:
Krrish Dholakia 2023-09-28 13:32:39 -07:00
parent 91f906d7c5
commit 46044b70a8
3 changed files with 3 additions and 6 deletions

View file

@ -239,6 +239,7 @@ def completion(
model model
] # update the model to the actual value if an alias has been passed in ] # update the model to the actual value if an alias has been passed in
model_response = ModelResponse() model_response = ModelResponse()
if deployment_id != None: # azure llms if deployment_id != None: # azure llms
model=deployment_id model=deployment_id
custom_llm_provider="azure" custom_llm_provider="azure"
@ -706,11 +707,7 @@ def completion(
original_response=response, original_response=response,
additional_args={"headers": litellm.headers}, additional_args={"headers": litellm.headers},
) )
elif ( elif (
(
model in litellm.huggingface_models and
custom_llm_provider!="custom" # if users use a hf model, with a custom/provider. See implementation of custom_llm_provider == custom
) or
custom_llm_provider == "huggingface" custom_llm_provider == "huggingface"
): ):
custom_llm_provider = "huggingface" custom_llm_provider = "huggingface"

View file

@ -1,6 +1,6 @@
[tool.poetry] [tool.poetry]
name = "litellm" name = "litellm"
version = "0.1.794" version = "0.1.795"
description = "Library to easily interface with LLM API providers" description = "Library to easily interface with LLM API providers"
authors = ["BerriAI"] authors = ["BerriAI"]
license = "MIT License" license = "MIT License"