diff --git a/docs/my-website/docs/providers/huggingface.md b/docs/my-website/docs/providers/huggingface.md
index 9dda860a53..e232cb5867 100644
--- a/docs/my-website/docs/providers/huggingface.md
+++ b/docs/my-website/docs/providers/huggingface.md
@@ -42,24 +42,21 @@ def default_pt(messages):
```python
# Create your own custom prompt template works
litellm.register_prompt_template(
- model="togethercomputer/LLaMA-2-7B-32K",
- role_dict={
+ model="togethercomputer/LLaMA-2-7B-32K",
+ roles={
"system": {
"pre_message": "[INST] <>\n",
"post_message": "\n<>\n [/INST]\n"
},
- "user": {
+ "user": {
"pre_message": "[INST] ",
"post_message": " [/INST]\n"
},
"assistant": {
- "pre_message": "\n",
- "post_message": "\n",
+ "post_message": "\n"
}
- } # tell LiteLLM how you want to map the openai messages to this model
- pre_message_sep= "\n",
- post_message_sep= "\n"
-)
+ }
+ )
def test_huggingface_custom_model():
model = "huggingface/togethercomputer/LLaMA-2-7B-32K"
diff --git a/docs/my-website/docs/providers/togetherai.md b/docs/my-website/docs/providers/togetherai.md
index 8ceb196bee..dd1b37989a 100644
--- a/docs/my-website/docs/providers/togetherai.md
+++ b/docs/my-website/docs/providers/togetherai.md
@@ -75,11 +75,22 @@ Let's register our custom prompt template: [Implementation Code](https://github.
import litellm
litellm.register_prompt_template(
- model="OpenAssistant/llama2-70b-oasst-sft-v10",
- roles={"system":"<|im_start|>system", "assistant":"<|im_start|>assistant", "user":"<|im_start|>user"}, # tell LiteLLM how you want to map the openai messages to this model
- pre_message_sep= "\n",
- post_message_sep= "\n"
-)
+ model="OpenAssistant/llama2-70b-oasst-sft-v10",
+ roles={
+ "system": {
+ "pre_message": "[<|im_start|>system",
+ "post_message": "\n"
+ },
+ "user": {
+ "pre_message": "<|im_start|>user",
+ "post_message": "\n"
+ },
+ "assistant": {
+ "pre_message": "<|im_start|>assistant",
+ "post_message": "\n"
+ }
+ }
+ )
```
Let's use it!
@@ -105,11 +116,22 @@ from litellm import completion
os.environ["TOGETHERAI_API_KEY"] = ""
litellm.register_prompt_template(
- model="OpenAssistant/llama2-70b-oasst-sft-v10",
- roles={"system":"<|im_start|>system", "assistant":"<|im_start|>assistant", "user":"<|im_start|>user"}, # tell LiteLLM how you want to map the openai messages to this model
- pre_message_sep= "\n",
- post_message_sep= "\n"
-)
+ model="OpenAssistant/llama2-70b-oasst-sft-v10",
+ roles={
+ "system": {
+ "pre_message": "[<|im_start|>system",
+ "post_message": "\n"
+ },
+ "user": {
+ "pre_message": "<|im_start|>user",
+ "post_message": "\n"
+ },
+ "assistant": {
+ "pre_message": "<|im_start|>assistant",
+ "post_message": "\n"
+ }
+ }
+ )
messages=[{"role":"user", "content": "Write me a poem about the blue sky"}]
diff --git a/docs/my-website/docs/providers/vllm.md b/docs/my-website/docs/providers/vllm.md
index d97f09eaef..5a916217ea 100644
--- a/docs/my-website/docs/providers/vllm.md
+++ b/docs/my-website/docs/providers/vllm.md
@@ -95,7 +95,7 @@ def default_pt(messages):
# Create your own custom prompt template works
litellm.register_prompt_template(
model="togethercomputer/LLaMA-2-7B-32K",
- role_dict={
+ roles={
"system": {
"pre_message": "[INST] <>\n",
"post_message": "\n<>\n [/INST]\n"
diff --git a/litellm/__pycache__/main.cpython-311.pyc b/litellm/__pycache__/main.cpython-311.pyc
index 518b9d26e6..a7c59fbcb7 100644
Binary files a/litellm/__pycache__/main.cpython-311.pyc and b/litellm/__pycache__/main.cpython-311.pyc differ
diff --git a/litellm/__pycache__/utils.cpython-311.pyc b/litellm/__pycache__/utils.cpython-311.pyc
index 6e7724d5f9..a12a969770 100644
Binary files a/litellm/__pycache__/utils.cpython-311.pyc and b/litellm/__pycache__/utils.cpython-311.pyc differ
diff --git a/litellm/utils.py b/litellm/utils.py
index 95b0502e59..e28e9e29aa 100644
--- a/litellm/utils.py
+++ b/litellm/utils.py
@@ -1538,7 +1538,7 @@ def register_prompt_template(model: str, roles: dict, initial_prompt_value: str
"assistant": {
"post_message": "\n" # follows this - https://replicate.com/blog/how-to-prompt-llama
}
- },
+ }
)
```
"""