From b92f4c8d3c76c288365e80f475f109c6313a8fcd Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Mon, 13 Nov 2023 20:35:23 -0800 Subject: [PATCH] (docs) cook book autoevals --- .../auto_evals.py | 35 +++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 cookbook/benchmark/eval_suites_mlflow_autoevals/auto_evals.py diff --git a/cookbook/benchmark/eval_suites_mlflow_autoevals/auto_evals.py b/cookbook/benchmark/eval_suites_mlflow_autoevals/auto_evals.py new file mode 100644 index 000000000..fd76343c6 --- /dev/null +++ b/cookbook/benchmark/eval_suites_mlflow_autoevals/auto_evals.py @@ -0,0 +1,35 @@ +import sys, os +import traceback +from dotenv import load_dotenv +load_dotenv() + +import litellm +from litellm import embedding, completion, completion_cost + +from autoevals.llm import * +################### +import litellm + +# litellm completion call +question = "which country has the highest population" +response = litellm.completion( + model = "gpt-3.5-turbo", + messages = [ + { + "role": "user", + "content": question + } + ], +) +print(response) +# use the auto eval Factuality() evaluator + +print("calling evaluator") +evaluator = Factuality() +result = evaluator( + output=response.choices[0]["message"]["content"], # response from litellm.completion() + expected="India", # expected output + input=question # question passed to litellm.completion +) + +print(result) \ No newline at end of file