fix(proxy_server): import errors

This commit is contained in:
Krrish Dholakia 2023-10-11 11:05:03 -07:00
parent 3415471390
commit d0ec844629
4 changed files with 41 additions and 50 deletions

View file

@ -5,38 +5,7 @@ from dotenv import load_dotenv
load_dotenv()
from importlib import resources
import shutil, random
list_of_messages = [
"'The thing I wish you improved is...'",
"'A feature I really want is...'",
"'The worst thing about this product is...'",
"'This product would be better if...'",
"'I don't like how this works...'",
"'It would help me if you could add...'",
"'This feature doesn't meet my needs because...'",
"'I get frustrated when the product...'",
]
def generate_feedback_box():
box_width = 60
# Select a random message
message = random.choice(list_of_messages)
print()
print('\033[1;37m' + '#' + '-'*box_width + '#\033[0m')
print('\033[1;37m' + '#' + ' '*box_width + '#\033[0m')
print('\033[1;37m' + '# {:^59} #\033[0m'.format(message))
print('\033[1;37m' + '# {:^59} #\033[0m'.format('https://github.com/BerriAI/litellm/issues/new'))
print('\033[1;37m' + '#' + ' '*box_width + '#\033[0m')
print('\033[1;37m' + '#' + '-'*box_width + '#\033[0m')
print()
print(' Thank you for using LiteLLM! - Krrish & Ishaan')
print()
print()
# generate_feedback_box()
import shutil
config_filename = "litellm.secrets.toml"
pkg_config_filename = "template.secrets.toml"
@ -145,7 +114,6 @@ def run_server(host, port, api_base, model, deploy, debug, temperature, max_toke
return
else:
initialize(model, api_base, debug, temperature, max_tokens, max_budget, telemetry, drop_params, add_function_to_prompt)
try:
import uvicorn
except:

View file

@ -1,4 +1,5 @@
import sys, os, platform, appdirs
import sys, os, platform
import threading
import shutil, random, traceback
sys.path.insert(
0, os.path.abspath("../..")
@ -8,14 +9,48 @@ try:
import uvicorn
import fastapi
import tomli as tomllib
import appdirs
except ImportError:
import subprocess
import sys
subprocess.check_call([sys.executable, "-m", "pip", "install", "uvicorn", "fastapi", "tomli"])
subprocess.check_call([sys.executable, "-m", "pip", "install", "uvicorn", "fastapi", "tomli", "appdirs"])
import uvicorn
import fastapi
import tomli as tomllib
import appdirs
import random
list_of_messages = [
"'The thing I wish you improved is...'",
"'A feature I really want is...'",
"'The worst thing about this product is...'",
"'This product would be better if...'",
"'I don't like how this works...'",
"'It would help me if you could add...'",
"'This feature doesn't meet my needs because...'",
"'I get frustrated when the product...'",
]
def generate_feedback_box():
box_width = 60
# Select a random message
message = random.choice(list_of_messages)
print()
print('\033[1;37m' + '#' + '-'*box_width + '#\033[0m')
print('\033[1;37m' + '#' + ' '*box_width + '#\033[0m')
print('\033[1;37m' + '# {:^59} #\033[0m'.format(message))
print('\033[1;37m' + '# {:^59} #\033[0m'.format('https://github.com/BerriAI/litellm/issues/new'))
print('\033[1;37m' + '#' + ' '*box_width + '#\033[0m')
print('\033[1;37m' + '#' + '-'*box_width + '#\033[0m')
print()
print(' Thank you for using LiteLLM! - Krrish & Ishaan')
print()
print()
generate_feedback_box()
print()
@ -58,7 +93,7 @@ def usage_telemetry(): # helps us know if people are using this feature. Set `li
data = {
"feature": "local_proxy_server"
}
litellm.utils.litellm_telemetry(data=data)
threading.Thread(target=litellm.utils.litellm_telemetry, args=(data,)).start()
def load_config():
try:
@ -105,9 +140,9 @@ def load_config():
litellm.max_budget = model_config.get("max_budget", None) # check if user set a budget for hosted model - e.g. gpt-4
print_verbose(f"user_config: {user_config}")
print_verbose(f"model_config: {model_config}")
if model_config is None:
return
user_model = model_config["model_name"] # raise an error if this isn't set when user runs either `litellm --model local_model` or `litellm --model hosted_model`
print_verbose(f"user_model: {user_model}")

13
poetry.lock generated
View file

@ -122,17 +122,6 @@ files = [
[package.dependencies]
frozenlist = ">=1.1.0"
[[package]]
name = "appdirs"
version = "1.4.4"
description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"."
optional = false
python-versions = "*"
files = [
{file = "appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128"},
{file = "appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"},
]
[[package]]
name = "async-timeout"
version = "4.0.3"
@ -1161,4 +1150,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p
[metadata]
lock-version = "2.0"
python-versions = "^3.8"
content-hash = "97376eb8d05a2d44599b79f93dd0343278484e641d3da6aef9c8c710b35124bc"
content-hash = "f05b5edcd5906842d46553802e96ab0774e46c49fea8c368f8a8427f6f6f08bb"

View file

@ -14,7 +14,6 @@ tiktoken = ">=0.4.0"
importlib-metadata = ">=6.8.0"
tokenizers = "*"
click = "*"
appdirs = "^1.4.4"
jinja2 = "^3.1.2"
certifi = "^2023.7.22"