mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 19:54:13 +00:00
fix(sagemaker.py): bring back llama2 templating for sagemaker
This commit is contained in:
parent
f9b74e54a3
commit
fc07598b21
1 changed files with 6 additions and 0 deletions
|
@ -120,6 +120,12 @@ def completion(
|
||||||
messages=messages
|
messages=messages
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
|
if hf_model_name is None:
|
||||||
|
if "llama2" in model.lower(): # llama2 model
|
||||||
|
if "chat" in model.lower():
|
||||||
|
hf_model_name = "meta-llama/Llama-2-7b-chat-hf"
|
||||||
|
else:
|
||||||
|
hf_model_name = "meta-llama/Llama-2-7b"
|
||||||
hf_model_name = hf_model_name or model # pass in hf model name for pulling it's prompt template - (e.g. `hf_model_name="meta-llama/Llama-2-7b-chat-hf` applies the llama2 chat template to the prompt)
|
hf_model_name = hf_model_name or model # pass in hf model name for pulling it's prompt template - (e.g. `hf_model_name="meta-llama/Llama-2-7b-chat-hf` applies the llama2 chat template to the prompt)
|
||||||
prompt = prompt_factory(model=hf_model_name, messages=messages)
|
prompt = prompt_factory(model=hf_model_name, messages=messages)
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue