forked from phoenix/litellm-mirror
fix(factory.py): add replicate meta llama prompt templating support
This commit is contained in:
parent
92f21cba30
commit
4f46b4c397
4 changed files with 26 additions and 4 deletions
|
@ -307,9 +307,7 @@ def completion(
|
|||
result, logs = handle_prediction_response(
|
||||
prediction_url, api_key, print_verbose
|
||||
)
|
||||
model_response["ended"] = (
|
||||
time.time()
|
||||
) # for pricing this must remain right after calling api
|
||||
|
||||
## LOGGING
|
||||
logging_obj.post_call(
|
||||
input=prompt,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue