diff --git a/litellm/llms/prompt_templates/factory.py b/litellm/llms/prompt_templates/factory.py index 4056e43e9..205bf56fa 100644 --- a/litellm/llms/prompt_templates/factory.py +++ b/litellm/llms/prompt_templates/factory.py @@ -36,6 +36,16 @@ def falcon_instruct_pt(messages): prompt += message['role']+":"+ message["content"].replace("\r\n", "\n").replace("\n\n", "\n") prompt += "\n\n" +def falcon_chat_pt(messages): + prompt = "" + for message in messages: + if message["role"] == "system": + prompt += "System: " + messages["content"] + elif message["role"] == "assistant": + prompt += "Falcon: " + message["content"] + elif message["role"] == "user": + prompt += "User: " + message["content"] + # MPT prompt template - from https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py#L110 def mpt_chat_pt(messages): @@ -93,7 +103,9 @@ def prompt_factory(model: str, messages: list): else: return default_pt(messages=messages) elif "tiiuae/falcon" in model: # Note: for the instruct models, it's best to use a User: .., Assistant:.. approach in your prompt template. - if "instruct" in model: + if model == "tiiuae/falcon-180B-chat": + return falcon_chat_pt(messages=messages) + elif "instruct" in model: return falcon_instruct_pt(messages=messages) else: return default_pt(messages=messages) diff --git a/pyproject.toml b/pyproject.toml index e038a9390..fb8f22707 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "litellm" -version = "0.1.538" +version = "0.1.539" description = "Library to easily interface with LLM API providers" authors = ["BerriAI"] license = "MIT License"