fix(factory.py): add replicate meta llama prompt templating support

This commit is contained in:
Krrish Dholakia 2024-04-25 08:24:28 -07:00
parent 92f21cba30
commit 4f46b4c397
4 changed files with 26 additions and 4 deletions

View file

@ -307,9 +307,7 @@ def completion(
result, logs = handle_prediction_response(
prediction_url, api_key, print_verbose
)
model_response["ended"] = (
time.time()
) # for pricing this must remain right after calling api
## LOGGING
logging_obj.post_call(
input=prompt,