add config

This commit is contained in:
ishaan-jaff 2023-08-11 10:18:12 -07:00
parent fb285c8c9f
commit 27cc3760b7
5 changed files with 48 additions and 3 deletions

View file

@ -48,6 +48,7 @@ api_base = None
headers = None
api_version = None
organization = None
config_path = None
####### Secret Manager #####################
secret_manager_client = None
####### COMPLETION MODELS ###################

6
litellm/config.json Normal file
View file

@ -0,0 +1,6 @@
{
"model": "command-nightly",
"temperature": 0.7,
"max_tokens": 10
}

View file

@ -8,7 +8,7 @@ from litellm import client, logging, exception_type, timeout, get_optional_param
import tiktoken
from concurrent.futures import ThreadPoolExecutor
encoding = tiktoken.get_encoding("cl100k_base")
from litellm.utils import get_secret, install_and_import, CustomStreamWrapper
from litellm.utils import get_secret, install_and_import, CustomStreamWrapper, read_config_args
####### ENVIRONMENT VARIABLES ###################
dotenv.load_dotenv() # Loading env variables using dotenv
new_response = {
@ -38,7 +38,7 @@ async def acompletion(*args, **kwargs):
# @retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(2), reraise=True, retry_error_callback=lambda retry_state: setattr(retry_state.outcome, 'retry_variable', litellm.retry)) # retry call, turn this off by setting `litellm.retry = False`
@timeout(60) ## set timeouts, in case calls hang (e.g. Azure) - default is 60s, override with `force_timeout`
def completion(
model, messages, # required params
messages, model="gpt-3.5-turbo",# required params
# Optional OpenAI params: see https://platform.openai.com/docs/api-reference/chat/create
functions=[], function_call="", # optional params
temperature=1, top_p=1, n=1, stream=False, stop=None, max_tokens=float('inf'),
@ -504,3 +504,10 @@ def print_verbose(print_statement):
if random.random() <= 0.3:
print("Get help - https://discord.com/invite/wuPM9dRgDw")
def config_completion(**kwargs):
if litellm.config_path != None:
config_args = read_config_args(litellm.config_path)
# overwrite any args passed in with config args
return completion(**kwargs, **config_args)
else:
raise ValueError("No config path set, please set a config path using `litellm.config_path = 'path/to/config.json'`")

View file

@ -217,4 +217,19 @@ def test_completion_together_ai_stream():
for chunk in response:
print(chunk['choices'][0]['delta']) # same as openai format
except Exception as e:
pytest.fail(f"Error occurred: {e}")
pytest.fail(f"Error occurred: {e}")
# test config file with completion #
def test_completion_openai_config():
try:
litellm.config_path = "../config.json"
litellm.set_verbose = True
response = litellm.config_completion(messages=messages)
# Add any assertions here to check the response
print(response)
litellm.config_path = None
except Exception as e:
pytest.fail(f"Error occurred: {e}")

View file

@ -709,3 +709,19 @@ class CustomStreamWrapper:
completion_obj["content"] = chunk.text
# return this for all models
return {"choices": [{"delta": completion_obj}]}
########## Reading Config File ############################
def read_config_args(config_path):
try:
import os
current_path = os.getcwd()
with open(config_path, "r") as config_file:
config = json.load(config_file)
# read keys/ values from config file and return them
return config
except Exception as e:
print("An error occurred while reading config:", str(e))
raise e