huggingface/mistralai/Mistral-7B-Instruct-v0.3

This commit is contained in:
Ishaan Jaff 2025-01-13 18:42:36 -08:00
parent 3fe1f3b3b2
commit 970e9c7507
2 changed files with 3 additions and 3 deletions

View file

@ -102,7 +102,7 @@ async def test_huggingface_text_completion_logprobs():
client = AsyncHTTPHandler() client = AsyncHTTPHandler()
with patch.object(client, "post", return_value=return_val) as mock_post: with patch.object(client, "post", return_value=return_val) as mock_post:
response = await litellm.atext_completion( response = await litellm.atext_completion(
model="huggingface/mistralai/Mistral-7B-v0.1", model="huggingface/mistralai/Mistral-7B-Instruct-v0.3",
prompt="good morning", prompt="good morning",
client=client, client=client,
) )

View file

@ -3940,7 +3940,7 @@ def test_completion_hf_prompt_array():
litellm.set_verbose = True litellm.set_verbose = True
print("\n testing hf mistral\n") print("\n testing hf mistral\n")
response = text_completion( response = text_completion(
model="huggingface/mistralai/Mistral-7B-v0.1", model="huggingface/mistralai/Mistral-7B-Instruct-v0.3",
prompt=token_prompt, # token prompt is a 2d list, prompt=token_prompt, # token prompt is a 2d list,
max_tokens=0, max_tokens=0,
temperature=0.0, temperature=0.0,
@ -3971,7 +3971,7 @@ def test_text_completion_stream():
try: try:
for _ in range(2): # check if closed client used for _ in range(2): # check if closed client used
response = text_completion( response = text_completion(
model="huggingface/mistralai/Mistral-7B-v0.1", model="huggingface/mistralai/Mistral-7B-Instruct-v0.3",
prompt="good morning", prompt="good morning",
stream=True, stream=True,
max_tokens=10, max_tokens=10,