mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 11:43:54 +00:00
fix(proxy_cli.py): only run prisma db push if prisma in environment
This commit is contained in:
parent
ae74c52a33
commit
61cbac6b4f
3 changed files with 67 additions and 50 deletions
|
@ -343,6 +343,7 @@ def run_server(
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
import uvicorn
|
import uvicorn
|
||||||
|
|
||||||
if os.name == "nt":
|
if os.name == "nt":
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
|
@ -384,57 +385,39 @@ def run_server(
|
||||||
os.environ["DATABASE_URL"] = database_url
|
os.environ["DATABASE_URL"] = database_url
|
||||||
|
|
||||||
if os.getenv("DATABASE_URL", None) is not None:
|
if os.getenv("DATABASE_URL", None) is not None:
|
||||||
# run prisma db push, before starting server
|
|
||||||
# Save the current working directory
|
|
||||||
original_dir = os.getcwd()
|
|
||||||
# set the working directory to where this script is
|
|
||||||
abspath = os.path.abspath(__file__)
|
|
||||||
dname = os.path.dirname(abspath)
|
|
||||||
os.chdir(dname)
|
|
||||||
try:
|
try:
|
||||||
subprocess.run(
|
subprocess.run(["prisma"], capture_output=True)
|
||||||
["prisma", "db", "push", "--accept-data-loss"]
|
is_prisma_runnable = True
|
||||||
) # this looks like a weird edge case when prisma just wont start on render. we need to have the --accept-data-loss
|
except FileNotFoundError:
|
||||||
finally:
|
is_prisma_runnable = False
|
||||||
os.chdir(original_dir)
|
|
||||||
|
if is_prisma_runnable:
|
||||||
|
# run prisma db push, before starting server
|
||||||
|
# Save the current working directory
|
||||||
|
original_dir = os.getcwd()
|
||||||
|
# set the working directory to where this script is
|
||||||
|
abspath = os.path.abspath(__file__)
|
||||||
|
dname = os.path.dirname(abspath)
|
||||||
|
os.chdir(dname)
|
||||||
|
try:
|
||||||
|
subprocess.run(
|
||||||
|
["prisma", "db", "push", "--accept-data-loss"]
|
||||||
|
) # this looks like a weird edge case when prisma just wont start on render. we need to have the --accept-data-loss
|
||||||
|
finally:
|
||||||
|
os.chdir(original_dir)
|
||||||
|
else:
|
||||||
|
print(
|
||||||
|
f"Unable to connect to DB. DATABASE_URL found in environment, but prisma package not found."
|
||||||
|
)
|
||||||
if port == 8000 and is_port_in_use(port):
|
if port == 8000 and is_port_in_use(port):
|
||||||
port = random.randint(1024, 49152)
|
port = random.randint(1024, 49152)
|
||||||
_endpoint_str = f"curl --location 'http://0.0.0.0:{port}/chat/completions' \\"
|
|
||||||
curl_command = (
|
|
||||||
_endpoint_str
|
|
||||||
+ """
|
|
||||||
--header 'Content-Type: application/json' \\
|
|
||||||
--data ' {
|
|
||||||
"model": "gpt-3.5-turbo",
|
|
||||||
"messages": [
|
|
||||||
{
|
|
||||||
"role": "user",
|
|
||||||
"content": "what llm are you"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}'
|
|
||||||
\n
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
print() # noqa
|
|
||||||
print( # noqa
|
|
||||||
f'\033[1;34mLiteLLM: Test your local proxy with: "litellm --test" This runs an openai.ChatCompletion request to your proxy [In a new terminal tab]\033[0m\n'
|
|
||||||
)
|
|
||||||
print( # noqa
|
|
||||||
f"\033[1;34mLiteLLM: Curl Command Test for your local proxy\n {curl_command} \033[0m\n"
|
|
||||||
)
|
|
||||||
print(
|
|
||||||
"\033[1;34mDocs: https://docs.litellm.ai/docs/simple_proxy\033[0m\n"
|
|
||||||
) # noqa
|
|
||||||
print( # noqa
|
|
||||||
f"\033[1;34mSee all Router/Swagger docs on http://0.0.0.0:{port} \033[0m\n"
|
|
||||||
) # noqa
|
|
||||||
|
|
||||||
from litellm.proxy.proxy_server import app
|
from litellm.proxy.proxy_server import app
|
||||||
|
|
||||||
if os.name == "nt":
|
if os.name == "nt":
|
||||||
uvicorn.run(app, host=host, port=port) # run uvicorn
|
uvicorn.run(app, host=host, port=port) # run uvicorn
|
||||||
else:
|
else:
|
||||||
import gunicorn.app.base
|
import gunicorn.app.base
|
||||||
|
|
||||||
# Gunicorn Application Class
|
# Gunicorn Application Class
|
||||||
class StandaloneApplication(gunicorn.app.base.BaseApplication):
|
class StandaloneApplication(gunicorn.app.base.BaseApplication):
|
||||||
def __init__(self, app, options=None):
|
def __init__(self, app, options=None):
|
||||||
|
@ -442,6 +425,39 @@ def run_server(
|
||||||
self.application = app # FastAPI app
|
self.application = app # FastAPI app
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
|
||||||
|
_endpoint_str = (
|
||||||
|
f"curl --location 'http://0.0.0.0:{port}/chat/completions' \\"
|
||||||
|
)
|
||||||
|
curl_command = (
|
||||||
|
_endpoint_str
|
||||||
|
+ """
|
||||||
|
--header 'Content-Type: application/json' \\
|
||||||
|
--data ' {
|
||||||
|
"model": "gpt-3.5-turbo",
|
||||||
|
"messages": [
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "what llm are you"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}'
|
||||||
|
\n
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
print() # noqa
|
||||||
|
print( # noqa
|
||||||
|
f'\033[1;34mLiteLLM: Test your local proxy with: "litellm --test" This runs an openai.ChatCompletion request to your proxy [In a new terminal tab]\033[0m\n'
|
||||||
|
)
|
||||||
|
print( # noqa
|
||||||
|
f"\033[1;34mLiteLLM: Curl Command Test for your local proxy\n {curl_command} \033[0m\n"
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
"\033[1;34mDocs: https://docs.litellm.ai/docs/simple_proxy\033[0m\n"
|
||||||
|
) # noqa
|
||||||
|
print( # noqa
|
||||||
|
f"\033[1;34mSee all Router/Swagger docs on http://0.0.0.0:{port} \033[0m\n"
|
||||||
|
) # noqa
|
||||||
|
|
||||||
def load_config(self):
|
def load_config(self):
|
||||||
# note: This Loads the gunicorn config - has nothing to do with LiteLLM Proxy config
|
# note: This Loads the gunicorn config - has nothing to do with LiteLLM Proxy config
|
||||||
config = {
|
config = {
|
||||||
|
@ -462,7 +478,9 @@ def run_server(
|
||||||
"worker_class": "uvicorn.workers.UvicornWorker",
|
"worker_class": "uvicorn.workers.UvicornWorker",
|
||||||
"preload": True, # Add the preload flag
|
"preload": True, # Add the preload flag
|
||||||
}
|
}
|
||||||
StandaloneApplication(app=app, options=gunicorn_options).run() # Run gunicorn
|
StandaloneApplication(
|
||||||
|
app=app, options=gunicorn_options
|
||||||
|
).run() # Run gunicorn
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|
|
@ -1210,11 +1210,6 @@ async def startup_event():
|
||||||
# check if master key set in environment - load from there
|
# check if master key set in environment - load from there
|
||||||
master_key = litellm.get_secret("LITELLM_MASTER_KEY", None)
|
master_key = litellm.get_secret("LITELLM_MASTER_KEY", None)
|
||||||
|
|
||||||
### CONNECT TO DB ###
|
|
||||||
# check if DATABASE_URL in environment - load from there
|
|
||||||
if prisma_client is None:
|
|
||||||
prisma_setup(database_url=os.getenv("DATABASE_URL"))
|
|
||||||
|
|
||||||
### LOAD CONFIG ###
|
### LOAD CONFIG ###
|
||||||
worker_config = litellm.get_secret("WORKER_CONFIG")
|
worker_config = litellm.get_secret("WORKER_CONFIG")
|
||||||
verbose_proxy_logger.debug(f"worker_config: {worker_config}")
|
verbose_proxy_logger.debug(f"worker_config: {worker_config}")
|
||||||
|
|
|
@ -259,7 +259,7 @@ class PrismaClient:
|
||||||
self.proxy_logging_obj = proxy_logging_obj
|
self.proxy_logging_obj = proxy_logging_obj
|
||||||
try:
|
try:
|
||||||
from prisma import Prisma # type: ignore
|
from prisma import Prisma # type: ignore
|
||||||
except:
|
except Exception as e:
|
||||||
os.environ["DATABASE_URL"] = database_url
|
os.environ["DATABASE_URL"] = database_url
|
||||||
# Save the current working directory
|
# Save the current working directory
|
||||||
original_dir = os.getcwd()
|
original_dir = os.getcwd()
|
||||||
|
@ -273,6 +273,10 @@ class PrismaClient:
|
||||||
subprocess.run(
|
subprocess.run(
|
||||||
["prisma", "db", "push", "--accept-data-loss"]
|
["prisma", "db", "push", "--accept-data-loss"]
|
||||||
) # this looks like a weird edge case when prisma just wont start on render. we need to have the --accept-data-loss
|
) # this looks like a weird edge case when prisma just wont start on render. we need to have the --accept-data-loss
|
||||||
|
except:
|
||||||
|
raise Exception(
|
||||||
|
f"Unable to run prisma commands. Run `pip install prisma`"
|
||||||
|
)
|
||||||
finally:
|
finally:
|
||||||
os.chdir(original_dir)
|
os.chdir(original_dir)
|
||||||
# Now you can import the Prisma Client
|
# Now you can import the Prisma Client
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue