mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 11:43:54 +00:00
fix(utils.py): fix default values
This commit is contained in:
parent
8caf105427
commit
a241cdb818
4 changed files with 13 additions and 7 deletions
|
@ -952,7 +952,7 @@ def completion(
|
|||
original_response=json.dumps(response_body),
|
||||
additional_args={"complete_input_dict": data},
|
||||
)
|
||||
print_verbose(f"raw model_response: {response}")
|
||||
print_verbose(f"raw model_response: {response_body}")
|
||||
## RESPONSE OBJECT
|
||||
outputText = "default"
|
||||
if provider == "ai21":
|
||||
|
@ -1065,6 +1065,7 @@ def completion(
|
|||
outputText = response_body.get("results")[0].get("outputText")
|
||||
|
||||
response_metadata = response.get("ResponseMetadata", {})
|
||||
|
||||
if response_metadata.get("HTTPStatusCode", 500) >= 400:
|
||||
raise BedrockError(
|
||||
message=outputText,
|
||||
|
@ -1100,11 +1101,13 @@ def completion(
|
|||
prompt_tokens = response_metadata.get(
|
||||
"x-amzn-bedrock-input-token-count", len(encoding.encode(prompt))
|
||||
)
|
||||
_text_response = model_response["choices"][0]["message"].get("content", "")
|
||||
completion_tokens = response_metadata.get(
|
||||
"x-amzn-bedrock-output-token-count",
|
||||
len(
|
||||
encoding.encode(
|
||||
model_response["choices"][0]["message"].get("content", "")
|
||||
_text_response,
|
||||
disallowed_special=(),
|
||||
)
|
||||
),
|
||||
)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue