diff --git a/litellm/__init__.py b/litellm/__init__.py index 5f526e0eb..ca6830293 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -48,6 +48,7 @@ api_base = None headers = None api_version = None organization = None +config_path = None ####### Secret Manager ##################### secret_manager_client = None ####### COMPLETION MODELS ################### diff --git a/litellm/config.json b/litellm/config.json new file mode 100644 index 000000000..a6707a1ee --- /dev/null +++ b/litellm/config.json @@ -0,0 +1,6 @@ +{ + "model": "command-nightly", + "temperature": 0.7, + "max_tokens": 10 + } + \ No newline at end of file diff --git a/litellm/main.py b/litellm/main.py index e5291a9b7..3a93f9c24 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -8,7 +8,7 @@ from litellm import client, logging, exception_type, timeout, get_optional_param import tiktoken from concurrent.futures import ThreadPoolExecutor encoding = tiktoken.get_encoding("cl100k_base") -from litellm.utils import get_secret, install_and_import, CustomStreamWrapper +from litellm.utils import get_secret, install_and_import, CustomStreamWrapper, read_config_args ####### ENVIRONMENT VARIABLES ################### dotenv.load_dotenv() # Loading env variables using dotenv new_response = { @@ -38,7 +38,7 @@ async def acompletion(*args, **kwargs): # @retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(2), reraise=True, retry_error_callback=lambda retry_state: setattr(retry_state.outcome, 'retry_variable', litellm.retry)) # retry call, turn this off by setting `litellm.retry = False` @timeout(60) ## set timeouts, in case calls hang (e.g. Azure) - default is 60s, override with `force_timeout` def completion( - model, messages, # required params + messages, model="gpt-3.5-turbo",# required params # Optional OpenAI params: see https://platform.openai.com/docs/api-reference/chat/create functions=[], function_call="", # optional params temperature=1, top_p=1, n=1, stream=False, stop=None, max_tokens=float('inf'), @@ -504,3 +504,10 @@ def print_verbose(print_statement): if random.random() <= 0.3: print("Get help - https://discord.com/invite/wuPM9dRgDw") +def config_completion(**kwargs): + if litellm.config_path != None: + config_args = read_config_args(litellm.config_path) + # overwrite any args passed in with config args + return completion(**kwargs, **config_args) + else: + raise ValueError("No config path set, please set a config path using `litellm.config_path = 'path/to/config.json'`") \ No newline at end of file diff --git a/litellm/tests/test_completion.py b/litellm/tests/test_completion.py index 9583ea03f..54c1a2ab1 100644 --- a/litellm/tests/test_completion.py +++ b/litellm/tests/test_completion.py @@ -217,4 +217,19 @@ def test_completion_together_ai_stream(): for chunk in response: print(chunk['choices'][0]['delta']) # same as openai format except Exception as e: - pytest.fail(f"Error occurred: {e}") \ No newline at end of file + pytest.fail(f"Error occurred: {e}") + +# test config file with completion # +def test_completion_openai_config(): + try: + litellm.config_path = "../config.json" + litellm.set_verbose = True + response = litellm.config_completion(messages=messages) + # Add any assertions here to check the response + print(response) + litellm.config_path = None + except Exception as e: + pytest.fail(f"Error occurred: {e}") + + + diff --git a/litellm/utils.py b/litellm/utils.py index e8f5f3976..4fa2751a6 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -709,3 +709,19 @@ class CustomStreamWrapper: completion_obj["content"] = chunk.text # return this for all models return {"choices": [{"delta": completion_obj}]} + + + +########## Reading Config File ############################ +def read_config_args(config_path): + try: + import os + current_path = os.getcwd() + with open(config_path, "r") as config_file: + config = json.load(config_file) + + # read keys/ values from config file and return them + return config + except Exception as e: + print("An error occurred while reading config:", str(e)) + raise e \ No newline at end of file