logging improvements

This commit is contained in:
Krrish Dholakia 2023-08-10 17:40:17 -07:00
parent 7071955f6d
commit 695d777ee5
2 changed files with 5 additions and 7 deletions

View file

@ -350,10 +350,9 @@ def completion(
logging(model=model, input=prompt, azure=azure, logger_fn=logger_fn) logging(model=model, input=prompt, azure=azure, logger_fn=logger_fn)
input_payload = {"inputs": prompt} input_payload = {"inputs": prompt}
response = requests.post(API_URL, headers=headers, json=input_payload) response = requests.post(API_URL, headers=headers, json=input_payload)
completion_response = response.json()[0]['generated_text']
## LOGGING ## LOGGING
logging(model=model, input=prompt, azure=azure, additional_args={"max_tokens": max_tokens, "original_response": completion_response}, logger_fn=logger_fn) logging(model=model, input=prompt, azure=azure, additional_args={"max_tokens": max_tokens, "original_response": response.text}, logger_fn=logger_fn)
completion_response = response.json()[0]['generated_text']
prompt_tokens = len(encoding.encode(prompt)) prompt_tokens = len(encoding.encode(prompt))
completion_tokens = len(encoding.encode(completion_response)) completion_tokens = len(encoding.encode(completion_response))
## RESPONSE OBJECT ## RESPONSE OBJECT
@ -383,14 +382,13 @@ def completion(
}, },
headers=headers headers=headers
) )
## LOGGING
logging(model=model, input=prompt, azure=azure, additional_args={"max_tokens": max_tokens, "original_response": res.text}, logger_fn=logger_fn)
if stream == True: if stream == True:
response = CustomStreamWrapper(res, "together_ai") response = CustomStreamWrapper(res, "together_ai")
return response return response
completion_response = res.json()['output']['choices'][0]['text'] completion_response = res.json()['output']['choices'][0]['text']
## LOGGING
logging(model=model, input=prompt, azure=azure, additional_args={"max_tokens": max_tokens, "original_response": completion_response}, logger_fn=logger_fn)
prompt_tokens = len(encoding.encode(prompt)) prompt_tokens = len(encoding.encode(prompt))
completion_tokens = len(encoding.encode(completion_response)) completion_tokens = len(encoding.encode(completion_response))
## RESPONSE OBJECT ## RESPONSE OBJECT

View file

@ -1,6 +1,6 @@
[tool.poetry] [tool.poetry]
name = "litellm" name = "litellm"
version = "0.1.374" version = "0.1.375"
description = "Library to easily interface with LLM API providers" description = "Library to easily interface with LLM API providers"
authors = ["BerriAI"] authors = ["BerriAI"]
license = "MIT License" license = "MIT License"