fix(factory.py): add replicate meta llama prompt templating support

This commit is contained in:
Krrish Dholakia 2024-04-25 08:24:28 -07:00
parent a26ecbad97
commit cf24e3eb02
4 changed files with 26 additions and 4 deletions

View file

@ -1767,6 +1767,25 @@ def test_completion_azure_deployment_id():
# test_completion_anthropic_openai_proxy()
def test_completion_replicate_llama3():
litellm.set_verbose = True
model_name = "replicate/meta/meta-llama-3-8b-instruct"
try:
response = completion(
model=model_name,
messages=messages,
)
print(response)
# Add any assertions here to check the response
response_str = response["choices"][0]["message"]["content"]
print("RESPONSE STRING\n", response_str)
if type(response_str) != str:
pytest.fail(f"Error occurred: {e}")
raise Exception("it worked!")
except Exception as e:
pytest.fail(f"Error occurred: {e}")
@pytest.mark.skip(reason="replicate endpoints take +2 mins just for this request")
def test_completion_replicate_vicuna():
print("TESTING REPLICATE")