forked from phoenix/litellm-mirror
fix(factory.py): support togethercomputer codellama pt
This commit is contained in:
parent
d4c6cc3920
commit
fff0228c20
2 changed files with 65 additions and 1 deletions
|
@ -350,7 +350,7 @@ def prompt_factory(model: str, messages: list, custom_llm_provider: Optional[str
|
||||||
elif "mosaicml/mpt" in model:
|
elif "mosaicml/mpt" in model:
|
||||||
if "chat" in model:
|
if "chat" in model:
|
||||||
return mpt_chat_pt(messages=messages)
|
return mpt_chat_pt(messages=messages)
|
||||||
elif "codellama/codellama" in model:
|
elif "codellama/codellama" in model or "togethercomputer/codellama" in model:
|
||||||
if "instruct" in model:
|
if "instruct" in model:
|
||||||
return llama_2_chat_pt(messages=messages) # https://huggingface.co/blog/codellama#conversational-instructions
|
return llama_2_chat_pt(messages=messages) # https://huggingface.co/blog/codellama#conversational-instructions
|
||||||
elif "wizardlm/wizardcoder" in model:
|
elif "wizardlm/wizardcoder" in model:
|
||||||
|
|
64
litellm/tests/test_together_ai.py
Normal file
64
litellm/tests/test_together_ai.py
Normal file
|
@ -0,0 +1,64 @@
|
||||||
|
import sys, os
|
||||||
|
import traceback
|
||||||
|
|
||||||
|
sys.path.insert(
|
||||||
|
0, os.path.abspath("../..")
|
||||||
|
) # Adds the parent directory to the system path
|
||||||
|
import time
|
||||||
|
import litellm
|
||||||
|
import openai
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
### Together AI
|
||||||
|
import together
|
||||||
|
together.api_key = "60c79880fc49df126d3e87b53f8a463ff6e1c6d27fe64207cde25cdfcd1f2f36"
|
||||||
|
|
||||||
|
sample_message = [
|
||||||
|
{"role": "user", "content": "Who are you"},
|
||||||
|
{"role": "assistant", "content": "I am your helpful assistant."},
|
||||||
|
{"role": "user", "content": "Tell me a joke"},
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def format_prompt_togetherai(messages, prompt_format, stop_words):
|
||||||
|
start_token, end_token = prompt_format.split('{prompt}')
|
||||||
|
prompt = ''
|
||||||
|
for message in messages:
|
||||||
|
role = message['role']
|
||||||
|
message_content = message['content']
|
||||||
|
if role == 'system':
|
||||||
|
prompt += f"{start_token}\n<<SYS>>\n{message_content}\n<</SYS>>\n"
|
||||||
|
elif role == 'user':
|
||||||
|
prompt += f"{start_token}{message_content}{end_token}"
|
||||||
|
else:
|
||||||
|
prompt += f'{message_content}{stop_words[0]}'
|
||||||
|
return prompt
|
||||||
|
|
||||||
|
|
||||||
|
model = 'togethercomputer/CodeLlama-13b-Instruct'
|
||||||
|
stop_words = list(together.Models.info(model)['config']['stop'])
|
||||||
|
prompt_format = str(together.Models.info(model)['config']['prompt_format'])
|
||||||
|
formatted_prompt = format_prompt_togetherai(
|
||||||
|
messages=sample_message, prompt_format=prompt_format, stop_words=stop_words)
|
||||||
|
for token in together.Complete.create_streaming(prompt=formatted_prompt,
|
||||||
|
model=model, stop=stop_words, max_tokens=512):
|
||||||
|
print(token, end="")
|
||||||
|
|
||||||
|
|
||||||
|
### litellm
|
||||||
|
|
||||||
|
import os
|
||||||
|
from litellm import completion
|
||||||
|
|
||||||
|
os.environ["TOGETHERAI_API_KEY"] = "60c79880fc49df126d3e87b53f8a463ff6e1c6d27fe64207cde25cdfcd1f2f36"
|
||||||
|
|
||||||
|
sample_message = [
|
||||||
|
{"role": "user", "content": "Who are you"},
|
||||||
|
{"role": "assistant", "content": "I am your helpful assistant."},
|
||||||
|
{"role": "user", "content": "Tell me a joke"},
|
||||||
|
]
|
||||||
|
|
||||||
|
res = completion(model="together_ai/togethercomputer/CodeLlama-13b-Instruct",
|
||||||
|
messages=sample_message, stream=False, max_tokens=1000)
|
||||||
|
|
||||||
|
print(list(res))
|
Loading…
Add table
Add a link
Reference in a new issue