forked from phoenix/litellm-mirror
Refactor proxy_server.py for readability and code consistency
This commit is contained in:
parent
266b3b82f5
commit
4414594e7d
1 changed files with 181 additions and 153 deletions
|
@ -1,11 +1,11 @@
|
|||
import sys, os, platform, time, copy
|
||||
import threading
|
||||
import shutil, random, traceback
|
||||
|
||||
sys.path.insert(
|
||||
0, os.path.abspath("../..")
|
||||
) # Adds the parent directory to the system path - for litellm local dev
|
||||
|
||||
|
||||
try:
|
||||
import uvicorn
|
||||
import fastapi
|
||||
|
@ -29,6 +29,7 @@ except ImportError as e:
|
|||
from llm import litellm_completion
|
||||
|
||||
import random
|
||||
|
||||
list_of_messages = [
|
||||
"'The thing I wish you improved is...'",
|
||||
"'A feature I really want is...'",
|
||||
|
@ -40,6 +41,7 @@ list_of_messages = [
|
|||
"'I get frustrated when the product...'",
|
||||
]
|
||||
|
||||
|
||||
def generate_feedback_box():
|
||||
box_width = 60
|
||||
|
||||
|
@ -47,19 +49,19 @@ def generate_feedback_box():
|
|||
message = random.choice(list_of_messages)
|
||||
|
||||
print()
|
||||
print('\033[1;37m' + '#' + '-'*box_width + '#\033[0m')
|
||||
print('\033[1;37m' + '#' + ' '*box_width + '#\033[0m')
|
||||
print('\033[1;37m' + '#' + '-' * box_width + '#\033[0m')
|
||||
print('\033[1;37m' + '#' + ' ' * box_width + '#\033[0m')
|
||||
print('\033[1;37m' + '# {:^59} #\033[0m'.format(message))
|
||||
print('\033[1;37m' + '# {:^59} #\033[0m'.format('https://github.com/BerriAI/litellm/issues/new'))
|
||||
print('\033[1;37m' + '#' + ' '*box_width + '#\033[0m')
|
||||
print('\033[1;37m' + '#' + '-'*box_width + '#\033[0m')
|
||||
print('\033[1;37m' + '#' + ' ' * box_width + '#\033[0m')
|
||||
print('\033[1;37m' + '#' + '-' * box_width + '#\033[0m')
|
||||
print()
|
||||
print(' Thank you for using LiteLLM! - Krrish & Ishaan')
|
||||
print()
|
||||
print()
|
||||
|
||||
generate_feedback_box()
|
||||
|
||||
generate_feedback_box()
|
||||
|
||||
print()
|
||||
print("\033[1;31mGive Feedback / Get Help: https://github.com/BerriAI/litellm/issues/new\033[0m")
|
||||
|
@ -100,19 +102,24 @@ config_dir = os.getcwd()
|
|||
config_dir = appdirs.user_config_dir("litellm")
|
||||
user_config_path = os.path.join(config_dir, config_filename)
|
||||
log_file = 'api_log.json'
|
||||
|
||||
|
||||
#### HELPER FUNCTIONS ####
|
||||
def print_verbose(print_statement):
|
||||
global user_debug
|
||||
if user_debug:
|
||||
print(print_statement)
|
||||
|
||||
def usage_telemetry(feature: str): # helps us know if people are using this feature. Set `litellm --telemetry False` to your cli call to turn this off
|
||||
|
||||
def usage_telemetry(
|
||||
feature: str): # helps us know if people are using this feature. Set `litellm --telemetry False` to your cli call to turn this off
|
||||
if user_telemetry:
|
||||
data = {
|
||||
"feature": feature # "local_proxy_server"
|
||||
}
|
||||
threading.Thread(target=litellm.utils.litellm_telemetry, args=(data,), daemon=True).start()
|
||||
|
||||
|
||||
def add_keys_to_config(key, value):
|
||||
# Check if file exists
|
||||
if os.path.exists(user_config_path):
|
||||
|
@ -130,6 +137,7 @@ def add_keys_to_config(key, value):
|
|||
with open(user_config_path, 'wb') as f:
|
||||
tomli_w.dump(config, f)
|
||||
|
||||
|
||||
def save_params_to_config(data: dict):
|
||||
# Check if file exists
|
||||
if os.path.exists(user_config_path):
|
||||
|
@ -164,7 +172,6 @@ def save_params_to_config(data: dict):
|
|||
|
||||
|
||||
def load_config():
|
||||
try:
|
||||
global user_config, user_api_base, user_max_tokens, user_temperature, user_model
|
||||
# As the .env file is typically much simpler in structure, we use load_dotenv here directly
|
||||
with open(user_config_path, "rb") as f:
|
||||
|
@ -176,9 +183,12 @@ def load_config():
|
|||
os.environ[key] = user_config["keys"][key] # litellm can read keys from the environment
|
||||
## settings
|
||||
if "general" in user_config:
|
||||
litellm.add_function_to_prompt = user_config["general"].get("add_function_to_prompt", True) # by default add function to prompt if unsupported by provider
|
||||
litellm.drop_params = user_config["general"].get("drop_params", True) # by default drop params if unsupported by provider
|
||||
litellm.model_fallbacks = user_config["general"].get("fallbacks", None) # fallback models in case initial completion call fails
|
||||
litellm.add_function_to_prompt = user_config["general"].get("add_function_to_prompt",
|
||||
True) # by default add function to prompt if unsupported by provider
|
||||
litellm.drop_params = user_config["general"].get("drop_params",
|
||||
True) # by default drop params if unsupported by provider
|
||||
litellm.model_fallbacks = user_config["general"].get("fallbacks",
|
||||
None) # fallback models in case initial completion call fails
|
||||
default_model = user_config["general"].get("default_model", None) # route all requests to this model.
|
||||
|
||||
if user_model is None: # `litellm --model <model-name>`` > default_model.
|
||||
|
@ -223,10 +233,11 @@ def load_config():
|
|||
},
|
||||
final_prompt_value=model_prompt_template.get("MODEL_POST_PROMPT", ""),
|
||||
)
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
def initialize(model, alias, api_base, debug, temperature, max_tokens, max_budget, telemetry, drop_params, add_function_to_prompt, headers, save):
|
||||
|
||||
|
||||
def initialize(model, alias, api_base, debug, temperature, max_tokens, max_budget, telemetry, drop_params,
|
||||
add_function_to_prompt, headers, save):
|
||||
global user_model, user_api_base, user_debug, user_max_tokens, user_temperature, user_telemetry, user_headers
|
||||
user_model = model
|
||||
user_debug = debug
|
||||
|
@ -263,6 +274,7 @@ def initialize(model, alias, api_base, debug, temperature, max_tokens, max_budge
|
|||
user_telemetry = telemetry
|
||||
usage_telemetry(feature="local_proxy_server")
|
||||
|
||||
|
||||
def deploy_proxy(model, api_base, debug, temperature, max_tokens, telemetry, deploy):
|
||||
import requests
|
||||
# Load .env file
|
||||
|
@ -293,8 +305,6 @@ def deploy_proxy(model, api_base, debug, temperature, max_tokens, telemetry, dep
|
|||
files = {"file": open(".env", "rb")}
|
||||
# print(files)
|
||||
|
||||
|
||||
|
||||
response = requests.post(url, data=data, files=files)
|
||||
# print(response)
|
||||
# Check the status of the request
|
||||
|
@ -309,6 +319,7 @@ def deploy_proxy(model, api_base, debug, temperature, max_tokens, telemetry, dep
|
|||
|
||||
return url
|
||||
|
||||
|
||||
def track_cost_callback(
|
||||
kwargs, # kwargs to completion
|
||||
completion_response, # response from completion
|
||||
|
@ -330,12 +341,12 @@ def track_cost_callback(
|
|||
# for streaming responses
|
||||
if "complete_streaming_response" in kwargs:
|
||||
# for tracking streaming cost we pass the "messages" and the output_text to litellm.completion_cost
|
||||
completion_response=kwargs["complete_streaming_response"]
|
||||
completion_response = kwargs["complete_streaming_response"]
|
||||
input_text = kwargs["messages"]
|
||||
output_text = completion_response["choices"][0]["message"]["content"]
|
||||
response_cost = litellm.completion_cost(
|
||||
model = kwargs["model"],
|
||||
messages = input_text,
|
||||
model=kwargs["model"],
|
||||
messages=input_text,
|
||||
completion=output_text
|
||||
)
|
||||
model = kwargs['model']
|
||||
|
@ -374,6 +385,7 @@ def track_cost_callback(
|
|||
except:
|
||||
pass
|
||||
|
||||
|
||||
def logger(
|
||||
kwargs, # kwargs to completion
|
||||
completion_response=None, # response from completion
|
||||
|
@ -399,6 +411,7 @@ def logger(
|
|||
existing_data = {}
|
||||
|
||||
existing_data.update(log_data)
|
||||
|
||||
def write_to_log():
|
||||
with open(log_file, 'w') as f:
|
||||
json.dump(existing_data, f, indent=2)
|
||||
|
@ -406,7 +419,8 @@ def logger(
|
|||
thread = threading.Thread(target=write_to_log, daemon=True)
|
||||
thread.start()
|
||||
elif log_event_type == 'post_api_call':
|
||||
if "stream" not in kwargs["optional_params"] or kwargs["optional_params"]["stream"] is False or kwargs.get("complete_streaming_response", False):
|
||||
if "stream" not in kwargs["optional_params"] or kwargs["optional_params"]["stream"] is False or kwargs.get(
|
||||
"complete_streaming_response", False):
|
||||
inference_params = copy.deepcopy(kwargs)
|
||||
timestamp = inference_params.pop('start_time')
|
||||
dt_key = timestamp.strftime("%Y%m%d%H%M%S%f")[:23]
|
||||
|
@ -425,10 +439,12 @@ def logger(
|
|||
except:
|
||||
pass
|
||||
|
||||
|
||||
litellm.input_callback = [logger]
|
||||
litellm.success_callback = [logger]
|
||||
litellm.failure_callback = [logger]
|
||||
|
||||
|
||||
#### API ENDPOINTS ####
|
||||
@router.get("/models") # if project requires model list
|
||||
def model_list():
|
||||
|
@ -440,19 +456,26 @@ def model_list():
|
|||
else:
|
||||
all_models = litellm.utils.get_valid_models()
|
||||
return dict(
|
||||
data = [{"id": model, "object": "model", "created": 1677610602, "owned_by": "openai"} for model in all_models],
|
||||
data=[{"id": model, "object": "model", "created": 1677610602, "owned_by": "openai"} for model in
|
||||
all_models],
|
||||
object="list",
|
||||
)
|
||||
|
||||
|
||||
@router.post("/completions")
|
||||
async def completion(request: Request):
|
||||
data = await request.json()
|
||||
return litellm_completion(data=data, type="completion", user_model=user_model, user_temperature=user_temperature, user_max_tokens=user_max_tokens, user_api_base=user_api_base, user_headers=user_headers, user_debug=user_debug)
|
||||
return litellm_completion(data=data, type="completion", user_model=user_model, user_temperature=user_temperature,
|
||||
user_max_tokens=user_max_tokens, user_api_base=user_api_base, user_headers=user_headers,
|
||||
user_debug=user_debug)
|
||||
|
||||
|
||||
@router.post("/chat/completions")
|
||||
async def chat_completion(request: Request):
|
||||
data = await request.json()
|
||||
response = litellm_completion(data, type="chat_completion", user_model=user_model, user_temperature=user_temperature, user_max_tokens=user_max_tokens, user_api_base=user_api_base, user_headers=user_headers, user_debug=user_debug)
|
||||
response = litellm_completion(data, type="chat_completion", user_model=user_model,
|
||||
user_temperature=user_temperature, user_max_tokens=user_max_tokens,
|
||||
user_api_base=user_api_base, user_headers=user_headers, user_debug=user_debug)
|
||||
return response
|
||||
|
||||
|
||||
|
@ -462,6 +485,7 @@ async def v1_completion(request: Request):
|
|||
data = await request.json()
|
||||
return litellm_completion(data=data, type="completion")
|
||||
|
||||
|
||||
@router.post("/v1/chat/completions")
|
||||
async def v1_chat_completion(request: Request):
|
||||
data = await request.json()
|
||||
|
@ -469,6 +493,7 @@ async def v1_chat_completion(request: Request):
|
|||
response = litellm_completion(data, type="chat_completion")
|
||||
return response
|
||||
|
||||
|
||||
def print_cost_logs():
|
||||
with open('costs.json', 'r') as f:
|
||||
# print this in green
|
||||
|
@ -477,13 +502,16 @@ def print_cost_logs():
|
|||
print("\033[0m")
|
||||
return
|
||||
|
||||
|
||||
@router.get("/ollama_logs")
|
||||
async def retrieve_server_log(request: Request):
|
||||
filepath = os.path.expanduser('~/.ollama/logs/server.log')
|
||||
return FileResponse(filepath)
|
||||
|
||||
|
||||
@router.get("/")
|
||||
async def home(request: Request):
|
||||
return "LiteLLM: RUNNING"
|
||||
|
||||
|
||||
app.include_router(router)
|
Loading…
Add table
Add a link
Reference in a new issue