replacing individual provider flags with 'custom_llm_provider'

This commit is contained in:
Krrish Dholakia 2023-08-12 16:40:36 -07:00
parent bc767cc42a
commit 72c1b5dcfc
3 changed files with 16 additions and 20 deletions

View file

@ -285,7 +285,7 @@ def completion(
completion_response = response[0].text
## LOGGING
logging(model=model, input=prompt, azure=azure, additional_args={"max_tokens": max_tokens, "original_response": completion_response}, logger_fn=logger_fn)
logging(model=model, input=prompt, custom_llm_provider=custom_llm_provider, additional_args={"max_tokens": max_tokens, "original_response": completion_response}, logger_fn=logger_fn)
prompt_tokens = len(encoding.encode(prompt))
completion_tokens = len(encoding.encode(completion_response))
## RESPONSE OBJECT
@ -306,11 +306,11 @@ def completion(
prompt = " ".join([message["content"] for message in messages])
## LOGGING
logging(model=model, input=prompt, azure=azure, logger_fn=logger_fn)
logging(model=model, input=prompt, custom_llm_provider=custom_llm_provider, logger_fn=logger_fn)
input_payload = {"inputs": prompt}
response = requests.post(API_URL, headers=headers, json=input_payload)
## LOGGING
logging(model=model, input=prompt, azure=azure, additional_args={"max_tokens": max_tokens, "original_response": response.text}, logger_fn=logger_fn)
logging(model=model, input=prompt, custom_llm_provider=custom_llm_provider, additional_args={"max_tokens": max_tokens, "original_response": response.text}, logger_fn=logger_fn)
completion_response = response.json()[0]['generated_text']
prompt_tokens = len(encoding.encode(prompt))
completion_tokens = len(encoding.encode(completion_response))
@ -332,7 +332,7 @@ def completion(
prompt = " ".join([message["content"] for message in messages]) # TODO: Add chat support for together AI
## LOGGING
logging(model=model, input=prompt, azure=azure, logger_fn=logger_fn)
logging(model=model, input=prompt, custom_llm_provider=custom_llm_provider, logger_fn=logger_fn)
res = requests.post(endpoint, json={
"model": model,
"prompt": prompt,
@ -342,7 +342,7 @@ def completion(
headers=headers
)
## LOGGING
logging(model=model, input=prompt, azure=azure, additional_args={"max_tokens": max_tokens, "original_response": res.text}, logger_fn=logger_fn)
logging(model=model, input=prompt, custom_llm_provider=custom_llm_provider, additional_args={"max_tokens": max_tokens, "original_response": res.text}, logger_fn=logger_fn)
if stream == True:
response = CustomStreamWrapper(res, "together_ai")
return response