mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 19:54:13 +00:00
fix(factory.py): add replicate meta llama prompt templating support
This commit is contained in:
parent
a26ecbad97
commit
cf24e3eb02
4 changed files with 26 additions and 4 deletions
|
@ -1767,6 +1767,25 @@ def test_completion_azure_deployment_id():
|
|||
# test_completion_anthropic_openai_proxy()
|
||||
|
||||
|
||||
def test_completion_replicate_llama3():
|
||||
litellm.set_verbose = True
|
||||
model_name = "replicate/meta/meta-llama-3-8b-instruct"
|
||||
try:
|
||||
response = completion(
|
||||
model=model_name,
|
||||
messages=messages,
|
||||
)
|
||||
print(response)
|
||||
# Add any assertions here to check the response
|
||||
response_str = response["choices"][0]["message"]["content"]
|
||||
print("RESPONSE STRING\n", response_str)
|
||||
if type(response_str) != str:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
raise Exception("it worked!")
|
||||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
|
||||
@pytest.mark.skip(reason="replicate endpoints take +2 mins just for this request")
|
||||
def test_completion_replicate_vicuna():
|
||||
print("TESTING REPLICATE")
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue