diff --git a/litellm/llms/bedrock.py b/litellm/llms/bedrock.py index 3f14ac9e41..e0938ded8e 100644 --- a/litellm/llms/bedrock.py +++ b/litellm/llms/bedrock.py @@ -287,14 +287,14 @@ class AmazonMistralConfig: Reference: https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-mistral.html Supported Params for the Amazon / Mistral models: - - `max_tokens` (integer) max tokens, + - `maxTokens` (integer) max tokens, - `temperature` (float) temperature for model, - - `top_p` (float) top p for model + - `topP` (float) top p for model - `stop` [string] A list of stop sequences that if generated by the model, stops the model from generating further output. - - `top_k` (float) top k for model + - `topK` (float) top k for model """ - max_tokens: Optional[int] = None + maxTokens: Optional[int] = None temperature: Optional[float] = None topP: Optional[float] = None topK: Optional[float] = None @@ -302,7 +302,7 @@ class AmazonMistralConfig: def __init__( self, - max_tokens: Optional[int] = None, + maxTokens: Optional[int] = None, temperature: Optional[float] = None, topP: Optional[int] = None, topK: Optional[float] = None, @@ -1118,4 +1118,4 @@ def image_generation( image_dict = {"url": artifact["base64"]} model_response.data = image_dict - return model_response + return model_response \ No newline at end of file