forked from phoenix/litellm-mirror
fix(huggingface_restapi.py): raise better exceptions for unprocessable hf responses
This commit is contained in:
parent
1ff8f75752
commit
71e64c34cb
1 changed files with 5 additions and 0 deletions
|
@ -170,6 +170,11 @@ class Huggingface(BaseLLM):
|
|||
"content"
|
||||
] = completion_response["generated_text"] # type: ignore
|
||||
elif task == "text-generation-inference":
|
||||
if (not isinstance(completion_response, list)
|
||||
or not isinstance(completion_response[0], dict)
|
||||
or "generated_text" not in completion_response[0]):
|
||||
raise HuggingfaceError(status_code=422, message=f"response is not in expected format - {completion_response}")
|
||||
|
||||
if len(completion_response[0]["generated_text"]) > 0:
|
||||
model_response["choices"][0]["message"][
|
||||
"content"
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue