handled merge conflicts

This commit is contained in:
Krrish Dholakia 2023-07-31 18:33:29 -07:00
parent 990e4b499e
commit ce26285f1c
6 changed files with 6 additions and 28 deletions

View file

@ -59,7 +59,6 @@ def get_optional_params(
####### COMPLETION ENDPOINTS ################ ####### COMPLETION ENDPOINTS ################
############################################# #############################################
@client @client
<<<<<<< HEAD
@func_set_timeout(180, allowOverride=True) ## https://pypi.org/project/func-timeout/ - timeouts, in case calls hang (e.g. Azure) @func_set_timeout(180, allowOverride=True) ## https://pypi.org/project/func-timeout/ - timeouts, in case calls hang (e.g. Azure)
def completion( def completion(
model, messages, # required params model, messages, # required params
@ -77,12 +76,6 @@ def completion(
temperature=temperature, top_p=top_p, n=n, stream=stream, stop=stop, max_tokens=max_tokens, temperature=temperature, top_p=top_p, n=n, stream=stream, stop=stop, max_tokens=max_tokens,
presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, logit_bias=logit_bias, user=user presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, logit_bias=logit_bias, user=user
) )
=======
@func_set_timeout(60, allowOverride=True) ## https://pypi.org/project/func-timeout/ - timeouts, in case calls hang (e.g. Azure)
def completion(model, messages, max_tokens=None, *, forceTimeout=60, azure=False, logger_fn=None): # ,*,.. if optional params like forceTimeout, azure and logger_fn are passed then they're keyword arguments
try:
response = None
>>>>>>> bd42ec8 (clean up code files)
if azure == True: if azure == True:
# azure configs # azure configs
openai.api_type = "azure" openai.api_type = "azure"
@ -97,7 +90,7 @@ def completion(model, messages, max_tokens=None, *, forceTimeout=60, azure=False
messages = messages, messages = messages,
**optional_params **optional_params
) )
elif model in open_ai_chat_completion_models: elif model in litellm.open_ai_chat_completion_models:
openai.api_type = "openai" openai.api_type = "openai"
openai.api_base = "https://api.openai.com/v1" openai.api_base = "https://api.openai.com/v1"
openai.api_version = None openai.api_version = None
@ -111,7 +104,7 @@ def completion(model, messages, max_tokens=None, *, forceTimeout=60, azure=False
messages = messages, messages = messages,
**optional_params **optional_params
) )
elif model in open_ai_text_completion_models: elif model in litellm.open_ai_text_completion_models:
openai.api_type = "openai" openai.api_type = "openai"
openai.api_base = "https://api.openai.com/v1" openai.api_base = "https://api.openai.com/v1"
openai.api_version = None openai.api_version = None
@ -221,10 +214,6 @@ def completion(model, messages, max_tokens=None, *, forceTimeout=60, azure=False
], ],
} }
response = new_response response = new_response
<<<<<<< HEAD
else:
raise Exception(f"Model '{model}' not found. Please check your model name and try again.")
=======
elif model in litellm.open_ai_chat_completion_models: elif model in litellm.open_ai_chat_completion_models:
openai.api_type = "openai" openai.api_type = "openai"
@ -255,7 +244,6 @@ def completion(model, messages, max_tokens=None, *, forceTimeout=60, azure=False
logging(model=model, input=messages, azure=azure, logger_fn=logger_fn) logging(model=model, input=messages, azure=azure, logger_fn=logger_fn)
args = locals() args = locals()
raise ValueError(f"No valid completion model args passed in - {args}") raise ValueError(f"No valid completion model args passed in - {args}")
>>>>>>> bd42ec8 (clean up code files)
return response return response
except Exception as e: except Exception as e:
logging(model=model, input=messages, azure=azure, additional_args={"max_tokens": max_tokens}, logger_fn=logger_fn) logging(model=model, input=messages, azure=azure, additional_args={"max_tokens": max_tokens}, logger_fn=logger_fn)

View file

@ -1,10 +1,10 @@
import sys, os import sys, os
import traceback import traceback
sys.path.append('..') # Adds the parent directory to the system path sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path
import main import litellm
from main import completion from litellm import embedding, completion
main.set_verbose = True litellm.set_verbose = True
user_message = "Hello, whats the weather in San Francisco??" user_message = "Hello, whats the weather in San Francisco??"
messages = [{ "content": user_message,"role": "user"}] messages = [{ "content": user_message,"role": "user"}]

View file

@ -17,20 +17,10 @@ model_fallback_list = ["replicate/llama-2-70b-chat:2c1608e18606fad2812020dc54193
user_message = "Hello, how are you?" user_message = "Hello, how are you?"
messages = [{ "content": user_message,"role": "user"}] messages = [{ "content": user_message,"role": "user"}]
# for _ in range(10):
for model in model_fallback_list: for model in model_fallback_list:
try: try:
response = embedding(model="text-embedding-ada-002", input=[user_message]) response = embedding(model="text-embedding-ada-002", input=[user_message])
response = completion(model=model, messages=messages) response = completion(model=model, messages=messages)
print(response) print(response)
<<<<<<< HEAD
if response != None:
break
except Exception as e: except Exception as e:
=======
# if response != None:
# break
except:
>>>>>>> bd42ec8 (clean up code files)
print(f"error occurred: {traceback.format_exc()}") print(f"error occurred: {traceback.format_exc()}")
raise e