mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 19:24:27 +00:00
check if json has error
This commit is contained in:
parent
8c0f3ab964
commit
27c9cd0256
1 changed files with 5 additions and 1 deletions
|
@ -320,7 +320,11 @@ def completion(
|
||||||
logging(model=model, input=prompt, custom_llm_provider=custom_llm_provider, additional_args={"max_tokens": max_tokens, "original_response": response.text}, logger_fn=logger_fn)
|
logging(model=model, input=prompt, custom_llm_provider=custom_llm_provider, additional_args={"max_tokens": max_tokens, "original_response": response.text}, logger_fn=logger_fn)
|
||||||
if isinstance(response, dict) and "error" in response:
|
if isinstance(response, dict) and "error" in response:
|
||||||
raise Exception(response["error"])
|
raise Exception(response["error"])
|
||||||
completion_response = response.json()[0]['generated_text']
|
json_response = response.json()
|
||||||
|
if 'error' in json_response: # raise HF errors when they exist
|
||||||
|
raise Exception(json_response['error'])
|
||||||
|
|
||||||
|
completion_response = json_response[0]['generated_text']
|
||||||
prompt_tokens = len(encoding.encode(prompt))
|
prompt_tokens = len(encoding.encode(prompt))
|
||||||
completion_tokens = len(encoding.encode(completion_response))
|
completion_tokens = len(encoding.encode(completion_response))
|
||||||
## RESPONSE OBJECT
|
## RESPONSE OBJECT
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue