mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 11:43:54 +00:00
fix(sagemaker.py): enable passing hf model name for prompt template
This commit is contained in:
parent
20dab6f636
commit
f9b74e54a3
3 changed files with 8 additions and 7 deletions
|
@ -1039,6 +1039,7 @@ def test_completion_sagemaker():
|
|||
messages=messages,
|
||||
temperature=0.2,
|
||||
max_tokens=80,
|
||||
hf_model_name="meta-llama/Llama-2-7b",
|
||||
)
|
||||
# Add any assertions here to check the response
|
||||
print(response)
|
||||
|
@ -1056,6 +1057,7 @@ def test_completion_chat_sagemaker():
|
|||
messages=messages,
|
||||
max_tokens=100,
|
||||
stream=True,
|
||||
hf_model_name="meta-llama/Llama-2-7b-chat-hf",
|
||||
)
|
||||
# Add any assertions here to check the response
|
||||
complete_response = ""
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue