forked from phoenix/litellm-mirror
fix(proxy_server): import errors
This commit is contained in:
parent
3415471390
commit
d0ec844629
4 changed files with 41 additions and 50 deletions
|
@ -5,38 +5,7 @@ from dotenv import load_dotenv
|
||||||
|
|
||||||
load_dotenv()
|
load_dotenv()
|
||||||
from importlib import resources
|
from importlib import resources
|
||||||
import shutil, random
|
import shutil
|
||||||
list_of_messages = [
|
|
||||||
"'The thing I wish you improved is...'",
|
|
||||||
"'A feature I really want is...'",
|
|
||||||
"'The worst thing about this product is...'",
|
|
||||||
"'This product would be better if...'",
|
|
||||||
"'I don't like how this works...'",
|
|
||||||
"'It would help me if you could add...'",
|
|
||||||
"'This feature doesn't meet my needs because...'",
|
|
||||||
"'I get frustrated when the product...'",
|
|
||||||
]
|
|
||||||
|
|
||||||
def generate_feedback_box():
|
|
||||||
box_width = 60
|
|
||||||
|
|
||||||
# Select a random message
|
|
||||||
message = random.choice(list_of_messages)
|
|
||||||
|
|
||||||
print()
|
|
||||||
print('\033[1;37m' + '#' + '-'*box_width + '#\033[0m')
|
|
||||||
print('\033[1;37m' + '#' + ' '*box_width + '#\033[0m')
|
|
||||||
print('\033[1;37m' + '# {:^59} #\033[0m'.format(message))
|
|
||||||
print('\033[1;37m' + '# {:^59} #\033[0m'.format('https://github.com/BerriAI/litellm/issues/new'))
|
|
||||||
print('\033[1;37m' + '#' + ' '*box_width + '#\033[0m')
|
|
||||||
print('\033[1;37m' + '#' + '-'*box_width + '#\033[0m')
|
|
||||||
print()
|
|
||||||
print(' Thank you for using LiteLLM! - Krrish & Ishaan')
|
|
||||||
print()
|
|
||||||
print()
|
|
||||||
|
|
||||||
# generate_feedback_box()
|
|
||||||
|
|
||||||
|
|
||||||
config_filename = "litellm.secrets.toml"
|
config_filename = "litellm.secrets.toml"
|
||||||
pkg_config_filename = "template.secrets.toml"
|
pkg_config_filename = "template.secrets.toml"
|
||||||
|
@ -145,7 +114,6 @@ def run_server(host, port, api_base, model, deploy, debug, temperature, max_toke
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
initialize(model, api_base, debug, temperature, max_tokens, max_budget, telemetry, drop_params, add_function_to_prompt)
|
initialize(model, api_base, debug, temperature, max_tokens, max_budget, telemetry, drop_params, add_function_to_prompt)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import uvicorn
|
import uvicorn
|
||||||
except:
|
except:
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
import sys, os, platform, appdirs
|
import sys, os, platform
|
||||||
|
import threading
|
||||||
import shutil, random, traceback
|
import shutil, random, traceback
|
||||||
sys.path.insert(
|
sys.path.insert(
|
||||||
0, os.path.abspath("../..")
|
0, os.path.abspath("../..")
|
||||||
|
@ -8,14 +9,48 @@ try:
|
||||||
import uvicorn
|
import uvicorn
|
||||||
import fastapi
|
import fastapi
|
||||||
import tomli as tomllib
|
import tomli as tomllib
|
||||||
|
import appdirs
|
||||||
except ImportError:
|
except ImportError:
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
subprocess.check_call([sys.executable, "-m", "pip", "install", "uvicorn", "fastapi", "tomli"])
|
subprocess.check_call([sys.executable, "-m", "pip", "install", "uvicorn", "fastapi", "tomli", "appdirs"])
|
||||||
import uvicorn
|
import uvicorn
|
||||||
import fastapi
|
import fastapi
|
||||||
import tomli as tomllib
|
import tomli as tomllib
|
||||||
|
import appdirs
|
||||||
|
|
||||||
|
import random
|
||||||
|
list_of_messages = [
|
||||||
|
"'The thing I wish you improved is...'",
|
||||||
|
"'A feature I really want is...'",
|
||||||
|
"'The worst thing about this product is...'",
|
||||||
|
"'This product would be better if...'",
|
||||||
|
"'I don't like how this works...'",
|
||||||
|
"'It would help me if you could add...'",
|
||||||
|
"'This feature doesn't meet my needs because...'",
|
||||||
|
"'I get frustrated when the product...'",
|
||||||
|
]
|
||||||
|
|
||||||
|
def generate_feedback_box():
|
||||||
|
box_width = 60
|
||||||
|
|
||||||
|
# Select a random message
|
||||||
|
message = random.choice(list_of_messages)
|
||||||
|
|
||||||
|
print()
|
||||||
|
print('\033[1;37m' + '#' + '-'*box_width + '#\033[0m')
|
||||||
|
print('\033[1;37m' + '#' + ' '*box_width + '#\033[0m')
|
||||||
|
print('\033[1;37m' + '# {:^59} #\033[0m'.format(message))
|
||||||
|
print('\033[1;37m' + '# {:^59} #\033[0m'.format('https://github.com/BerriAI/litellm/issues/new'))
|
||||||
|
print('\033[1;37m' + '#' + ' '*box_width + '#\033[0m')
|
||||||
|
print('\033[1;37m' + '#' + '-'*box_width + '#\033[0m')
|
||||||
|
print()
|
||||||
|
print(' Thank you for using LiteLLM! - Krrish & Ishaan')
|
||||||
|
print()
|
||||||
|
print()
|
||||||
|
|
||||||
|
generate_feedback_box()
|
||||||
|
|
||||||
|
|
||||||
print()
|
print()
|
||||||
|
@ -58,7 +93,7 @@ def usage_telemetry(): # helps us know if people are using this feature. Set `li
|
||||||
data = {
|
data = {
|
||||||
"feature": "local_proxy_server"
|
"feature": "local_proxy_server"
|
||||||
}
|
}
|
||||||
litellm.utils.litellm_telemetry(data=data)
|
threading.Thread(target=litellm.utils.litellm_telemetry, args=(data,)).start()
|
||||||
|
|
||||||
def load_config():
|
def load_config():
|
||||||
try:
|
try:
|
||||||
|
@ -105,9 +140,9 @@ def load_config():
|
||||||
litellm.max_budget = model_config.get("max_budget", None) # check if user set a budget for hosted model - e.g. gpt-4
|
litellm.max_budget = model_config.get("max_budget", None) # check if user set a budget for hosted model - e.g. gpt-4
|
||||||
|
|
||||||
print_verbose(f"user_config: {user_config}")
|
print_verbose(f"user_config: {user_config}")
|
||||||
|
print_verbose(f"model_config: {model_config}")
|
||||||
if model_config is None:
|
if model_config is None:
|
||||||
return
|
return
|
||||||
|
|
||||||
user_model = model_config["model_name"] # raise an error if this isn't set when user runs either `litellm --model local_model` or `litellm --model hosted_model`
|
user_model = model_config["model_name"] # raise an error if this isn't set when user runs either `litellm --model local_model` or `litellm --model hosted_model`
|
||||||
print_verbose(f"user_model: {user_model}")
|
print_verbose(f"user_model: {user_model}")
|
||||||
|
|
||||||
|
|
13
poetry.lock
generated
13
poetry.lock
generated
|
@ -122,17 +122,6 @@ files = [
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
frozenlist = ">=1.1.0"
|
frozenlist = ">=1.1.0"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "appdirs"
|
|
||||||
version = "1.4.4"
|
|
||||||
description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"."
|
|
||||||
optional = false
|
|
||||||
python-versions = "*"
|
|
||||||
files = [
|
|
||||||
{file = "appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128"},
|
|
||||||
{file = "appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"},
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "async-timeout"
|
name = "async-timeout"
|
||||||
version = "4.0.3"
|
version = "4.0.3"
|
||||||
|
@ -1161,4 +1150,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p
|
||||||
[metadata]
|
[metadata]
|
||||||
lock-version = "2.0"
|
lock-version = "2.0"
|
||||||
python-versions = "^3.8"
|
python-versions = "^3.8"
|
||||||
content-hash = "97376eb8d05a2d44599b79f93dd0343278484e641d3da6aef9c8c710b35124bc"
|
content-hash = "f05b5edcd5906842d46553802e96ab0774e46c49fea8c368f8a8427f6f6f08bb"
|
||||||
|
|
|
@ -14,7 +14,6 @@ tiktoken = ">=0.4.0"
|
||||||
importlib-metadata = ">=6.8.0"
|
importlib-metadata = ">=6.8.0"
|
||||||
tokenizers = "*"
|
tokenizers = "*"
|
||||||
click = "*"
|
click = "*"
|
||||||
appdirs = "^1.4.4"
|
|
||||||
jinja2 = "^3.1.2"
|
jinja2 = "^3.1.2"
|
||||||
certifi = "^2023.7.22"
|
certifi = "^2023.7.22"
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue