forked from phoenix/litellm-mirror
fix(sagemaker.py): fix meta llama model name for sagemaker custom deployment
This commit is contained in:
parent
3c60682eb4
commit
a38504ff1b
1 changed files with 1 additions and 1 deletions
|
@ -120,7 +120,7 @@ def completion(
|
|||
)
|
||||
else:
|
||||
hf_model_name = model
|
||||
if "jumpstart-dft-meta-textgeneration-llama" in model or "meta-textgenerationneuron-llama-2" in model: # llama2 model
|
||||
if "meta-textgeneration-llama-2" in model or "meta-textgenerationneuron-llama-2" in model: # llama2 model
|
||||
if model.endswith("-f") or "-f-" in model or "chat" in model: # sagemaker default for a chat model
|
||||
hf_model_name = "meta-llama/Llama-2-7b-chat" # apply the prompt template for a llama2 chat model
|
||||
else:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue