mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
refactor: add black formatting
This commit is contained in:
parent
b87d630b0a
commit
4905929de3
156 changed files with 19723 additions and 10869 deletions
|
@ -33,7 +33,7 @@
|
|||
# llm_model_list: Optional[list] = None
|
||||
# server_settings: Optional[dict] = None
|
||||
|
||||
# set_callbacks() # sets litellm callbacks for logging if they exist in the environment
|
||||
# set_callbacks() # sets litellm callbacks for logging if they exist in the environment
|
||||
|
||||
# if "CONFIG_FILE_PATH" in os.environ:
|
||||
# llm_router, llm_model_list, server_settings = load_router_config(router=llm_router, config_file_path=os.getenv("CONFIG_FILE_PATH"))
|
||||
|
@ -44,7 +44,7 @@
|
|||
# @router.get("/models") # if project requires model list
|
||||
# def model_list():
|
||||
# all_models = litellm.utils.get_valid_models()
|
||||
# if llm_model_list:
|
||||
# if llm_model_list:
|
||||
# all_models += llm_model_list
|
||||
# return dict(
|
||||
# data=[
|
||||
|
@ -79,8 +79,8 @@
|
|||
# @router.post("/v1/embeddings")
|
||||
# @router.post("/embeddings")
|
||||
# async def embedding(request: Request):
|
||||
# try:
|
||||
# data = await request.json()
|
||||
# try:
|
||||
# data = await request.json()
|
||||
# # default to always using the "ENV" variables, only if AUTH_STRATEGY==DYNAMIC then reads headers
|
||||
# if os.getenv("AUTH_STRATEGY", None) == "DYNAMIC" and "authorization" in request.headers: # if users pass LLM api keys as part of header
|
||||
# api_key = request.headers.get("authorization")
|
||||
|
@ -106,13 +106,13 @@
|
|||
# data = await request.json()
|
||||
# server_model = server_settings.get("completion_model", None) if server_settings else None
|
||||
# data["model"] = server_model or model or data["model"]
|
||||
# ## CHECK KEYS ##
|
||||
# ## CHECK KEYS ##
|
||||
# # default to always using the "ENV" variables, only if AUTH_STRATEGY==DYNAMIC then reads headers
|
||||
# # env_validation = litellm.validate_environment(model=data["model"])
|
||||
# # if (env_validation['keys_in_environment'] is False or os.getenv("AUTH_STRATEGY", None) == "DYNAMIC") and ("authorization" in request.headers or "api-key" in request.headers): # if users pass LLM api keys as part of header
|
||||
# # if "authorization" in request.headers:
|
||||
# # api_key = request.headers.get("authorization")
|
||||
# # elif "api-key" in request.headers:
|
||||
# # elif "api-key" in request.headers:
|
||||
# # api_key = request.headers.get("api-key")
|
||||
# # print(f"api_key in headers: {api_key}")
|
||||
# # if " " in api_key:
|
||||
|
@ -122,11 +122,11 @@
|
|||
# # api_key = api_key
|
||||
# # data["api_key"] = api_key
|
||||
# # print(f"api_key in data: {api_key}")
|
||||
# ## CHECK CONFIG ##
|
||||
# ## CHECK CONFIG ##
|
||||
# if llm_model_list and data["model"] in [m["model_name"] for m in llm_model_list]:
|
||||
# for m in llm_model_list:
|
||||
# if data["model"] == m["model_name"]:
|
||||
# for key, value in m["litellm_params"].items():
|
||||
# for m in llm_model_list:
|
||||
# if data["model"] == m["model_name"]:
|
||||
# for key, value in m["litellm_params"].items():
|
||||
# data[key] = value
|
||||
# break
|
||||
# response = litellm.completion(
|
||||
|
@ -145,21 +145,21 @@
|
|||
# @router.post("/router/completions")
|
||||
# async def router_completion(request: Request):
|
||||
# global llm_router
|
||||
# try:
|
||||
# try:
|
||||
# data = await request.json()
|
||||
# if "model_list" in data:
|
||||
# if "model_list" in data:
|
||||
# llm_router = litellm.Router(model_list=data.pop("model_list"))
|
||||
# if llm_router is None:
|
||||
# if llm_router is None:
|
||||
# raise Exception("Save model list via config.yaml. Eg.: ` docker build -t myapp --build-arg CONFIG_FILE=myconfig.yaml .` or pass it in as model_list=[..] as part of the request body")
|
||||
|
||||
|
||||
# # openai.ChatCompletion.create replacement
|
||||
# response = await llm_router.acompletion(model="gpt-3.5-turbo",
|
||||
# response = await llm_router.acompletion(model="gpt-3.5-turbo",
|
||||
# messages=[{"role": "user", "content": "Hey, how's it going?"}])
|
||||
|
||||
# if 'stream' in data and data['stream'] == True: # use generate_responses to stream responses
|
||||
# return StreamingResponse(data_generator(response), media_type='text/event-stream')
|
||||
# return response
|
||||
# except Exception as e:
|
||||
# except Exception as e:
|
||||
# error_traceback = traceback.format_exc()
|
||||
# error_msg = f"{str(e)}\n\n{error_traceback}"
|
||||
# return {"error": error_msg}
|
||||
|
@ -167,11 +167,11 @@
|
|||
# @router.post("/router/embedding")
|
||||
# async def router_embedding(request: Request):
|
||||
# global llm_router
|
||||
# try:
|
||||
# try:
|
||||
# data = await request.json()
|
||||
# if "model_list" in data:
|
||||
# if "model_list" in data:
|
||||
# llm_router = litellm.Router(model_list=data.pop("model_list"))
|
||||
# if llm_router is None:
|
||||
# if llm_router is None:
|
||||
# raise Exception("Save model list via config.yaml. Eg.: ` docker build -t myapp --build-arg CONFIG_FILE=myconfig.yaml .` or pass it in as model_list=[..] as part of the request body")
|
||||
|
||||
# response = await llm_router.aembedding(model="gpt-3.5-turbo", # type: ignore
|
||||
|
@ -180,7 +180,7 @@
|
|||
# if 'stream' in data and data['stream'] == True: # use generate_responses to stream responses
|
||||
# return StreamingResponse(data_generator(response), media_type='text/event-stream')
|
||||
# return response
|
||||
# except Exception as e:
|
||||
# except Exception as e:
|
||||
# error_traceback = traceback.format_exc()
|
||||
# error_msg = f"{str(e)}\n\n{error_traceback}"
|
||||
# return {"error": error_msg}
|
||||
|
@ -190,4 +190,4 @@
|
|||
# return "LiteLLM: RUNNING"
|
||||
|
||||
|
||||
# app.include_router(router)
|
||||
# app.include_router(router)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue