mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 11:43:54 +00:00
feat(proxy_cli.py): move print statements to show actually deployed port
This commit is contained in:
parent
a89c5fcf19
commit
3028d0f622
2 changed files with 30 additions and 25 deletions
|
@ -365,6 +365,36 @@ def run_server(
|
||||||
os.chdir(original_dir)
|
os.chdir(original_dir)
|
||||||
if port == 8000 and is_port_in_use(port):
|
if port == 8000 and is_port_in_use(port):
|
||||||
port = random.randint(1024, 49152)
|
port = random.randint(1024, 49152)
|
||||||
|
_endpoint_str = f"curl --location 'http://0.0.0.0:{port}/chat/completions' \\"
|
||||||
|
curl_command = (
|
||||||
|
_endpoint_str
|
||||||
|
+ """
|
||||||
|
--header 'Content-Type: application/json' \\
|
||||||
|
--data ' {
|
||||||
|
"model": "gpt-3.5-turbo",
|
||||||
|
"messages": [
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "what llm are you"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}'
|
||||||
|
\n
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
print() # noqa
|
||||||
|
print( # noqa
|
||||||
|
f'\033[1;34mLiteLLM: Test your local proxy with: "litellm --test" This runs an openai.ChatCompletion request to your proxy [In a new terminal tab]\033[0m\n'
|
||||||
|
)
|
||||||
|
print( # noqa
|
||||||
|
f"\033[1;34mLiteLLM: Curl Command Test for your local proxy\n {curl_command} \033[0m\n"
|
||||||
|
)
|
||||||
|
print(
|
||||||
|
"\033[1;34mDocs: https://docs.litellm.ai/docs/simple_proxy\033[0m\n"
|
||||||
|
) # noqa
|
||||||
|
print( # noqa
|
||||||
|
f"\033[1;34mSee all Router/Swagger docs on http://0.0.0.0:{port} \033[0m\n"
|
||||||
|
) # noqa
|
||||||
|
|
||||||
# Gunicorn Application Class
|
# Gunicorn Application Class
|
||||||
class StandaloneApplication(gunicorn.app.base.BaseApplication):
|
class StandaloneApplication(gunicorn.app.base.BaseApplication):
|
||||||
|
|
|
@ -1051,31 +1051,6 @@ async def initialize(
|
||||||
pass
|
pass
|
||||||
user_telemetry = telemetry
|
user_telemetry = telemetry
|
||||||
usage_telemetry(feature="local_proxy_server")
|
usage_telemetry(feature="local_proxy_server")
|
||||||
curl_command = """
|
|
||||||
curl --location 'http://0.0.0.0:8000/chat/completions' \\
|
|
||||||
--header 'Content-Type: application/json' \\
|
|
||||||
--data ' {
|
|
||||||
"model": "gpt-3.5-turbo",
|
|
||||||
"messages": [
|
|
||||||
{
|
|
||||||
"role": "user",
|
|
||||||
"content": "what llm are you"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}'
|
|
||||||
\n
|
|
||||||
"""
|
|
||||||
print() # noqa
|
|
||||||
print( # noqa
|
|
||||||
f'\033[1;34mLiteLLM: Test your local proxy with: "litellm --test" This runs an openai.ChatCompletion request to your proxy [In a new terminal tab]\033[0m\n'
|
|
||||||
)
|
|
||||||
print( # noqa
|
|
||||||
f"\033[1;34mLiteLLM: Curl Command Test for your local proxy\n {curl_command} \033[0m\n"
|
|
||||||
)
|
|
||||||
print("\033[1;34mDocs: https://docs.litellm.ai/docs/simple_proxy\033[0m\n") # noqa
|
|
||||||
print( # noqa
|
|
||||||
f"\033[1;34mSee all Router/Swagger docs on http://0.0.0.0:8000 \033[0m\n"
|
|
||||||
) # noqa
|
|
||||||
|
|
||||||
|
|
||||||
# for streaming
|
# for streaming
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue