diff --git a/litellm/llms/prompt_templates/factory.py b/litellm/llms/prompt_templates/factory.py index c580cf4da..ec938f33e 100644 --- a/litellm/llms/prompt_templates/factory.py +++ b/litellm/llms/prompt_templates/factory.py @@ -350,7 +350,7 @@ def prompt_factory(model: str, messages: list, custom_llm_provider: Optional[str elif "mosaicml/mpt" in model: if "chat" in model: return mpt_chat_pt(messages=messages) - elif "codellama/codellama" in model: + elif "codellama/codellama" in model or "togethercomputer/codellama" in model: if "instruct" in model: return llama_2_chat_pt(messages=messages) # https://huggingface.co/blog/codellama#conversational-instructions elif "wizardlm/wizardcoder" in model: diff --git a/litellm/tests/test_together_ai.py b/litellm/tests/test_together_ai.py new file mode 100644 index 000000000..ac7ea6771 --- /dev/null +++ b/litellm/tests/test_together_ai.py @@ -0,0 +1,64 @@ +import sys, os +import traceback + +sys.path.insert( + 0, os.path.abspath("../..") +) # Adds the parent directory to the system path +import time +import litellm +import openai +import pytest + +### Together AI +import together +together.api_key = "60c79880fc49df126d3e87b53f8a463ff6e1c6d27fe64207cde25cdfcd1f2f36" + +sample_message = [ + {"role": "user", "content": "Who are you"}, + {"role": "assistant", "content": "I am your helpful assistant."}, + {"role": "user", "content": "Tell me a joke"}, +] + + +def format_prompt_togetherai(messages, prompt_format, stop_words): + start_token, end_token = prompt_format.split('{prompt}') + prompt = '' + for message in messages: + role = message['role'] + message_content = message['content'] + if role == 'system': + prompt += f"{start_token}\n<>\n{message_content}\n<>\n" + elif role == 'user': + prompt += f"{start_token}{message_content}{end_token}" + else: + prompt += f'{message_content}{stop_words[0]}' + return prompt + + +model = 'togethercomputer/CodeLlama-13b-Instruct' +stop_words = list(together.Models.info(model)['config']['stop']) +prompt_format = str(together.Models.info(model)['config']['prompt_format']) +formatted_prompt = format_prompt_togetherai( + messages=sample_message, prompt_format=prompt_format, stop_words=stop_words) +for token in together.Complete.create_streaming(prompt=formatted_prompt, + model=model, stop=stop_words, max_tokens=512): + print(token, end="") + + +### litellm + +import os +from litellm import completion + +os.environ["TOGETHERAI_API_KEY"] = "60c79880fc49df126d3e87b53f8a463ff6e1c6d27fe64207cde25cdfcd1f2f36" + +sample_message = [ + {"role": "user", "content": "Who are you"}, + {"role": "assistant", "content": "I am your helpful assistant."}, + {"role": "user", "content": "Tell me a joke"}, +] + +res = completion(model="together_ai/togethercomputer/CodeLlama-13b-Instruct", + messages=sample_message, stream=False, max_tokens=1000) + +print(list(res)) \ No newline at end of file