mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-24 18:24:20 +00:00
bump: version 0.13.1 → 0.13.2.dev1
This commit is contained in:
parent
2e72efddd6
commit
21ae940992
6 changed files with 29 additions and 19 deletions
|
@ -383,4 +383,3 @@ from .exceptions import (
|
|||
from .budget_manager import BudgetManager
|
||||
from .proxy.proxy_cli import run_server
|
||||
from .router import Router
|
||||
from .proxy.proxy_server import app
|
||||
|
|
|
@ -177,7 +177,7 @@ def run_server(host, port, api_base, api_version, model, alias, add_key, headers
|
|||
if port == 8000 and is_port_in_use(port):
|
||||
port = random.randint(1024, 49152)
|
||||
print(os.listdir(os.getcwd()))
|
||||
uvicorn.run("litellm:app", host=host, port=port, workers=num_workers)
|
||||
uvicorn.run("litellm.proxy.proxy_server:app", host=host, port=port, workers=num_workers)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -74,17 +74,13 @@ def generate_feedback_box():
|
|||
print(" Thank you for using LiteLLM! - Krrish & Ishaan")
|
||||
print()
|
||||
print()
|
||||
|
||||
|
||||
generate_feedback_box()
|
||||
|
||||
print()
|
||||
print(
|
||||
"\033[1;31mGive Feedback / Get Help: https://github.com/BerriAI/litellm/issues/new\033[0m"
|
||||
)
|
||||
print()
|
||||
print("\033[1;34mDocs: https://docs.litellm.ai/docs/proxy_server\033[0m")
|
||||
print()
|
||||
print()
|
||||
print(
|
||||
"\033[1;31mGive Feedback / Get Help: https://github.com/BerriAI/litellm/issues/new\033[0m"
|
||||
)
|
||||
print()
|
||||
print("\033[1;34mDocs: https://docs.litellm.ai/docs/proxy_server\033[0m")
|
||||
print()
|
||||
|
||||
import litellm
|
||||
litellm.suppress_debug_info = True
|
||||
|
@ -360,6 +356,7 @@ def initialize(
|
|||
config
|
||||
):
|
||||
global user_model, user_api_base, user_debug, user_max_tokens, user_request_timeout, user_temperature, user_telemetry, user_headers, llm_model_list, llm_router, server_settings
|
||||
generate_feedback_box()
|
||||
user_model = model
|
||||
user_debug = debug
|
||||
dynamic_config = {"general": {}, user_model: {}}
|
||||
|
|
|
@ -37,5 +37,19 @@
|
|||
},
|
||||
"log_event_type": "pre_api_call"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"20231105211050527183": {
|
||||
"pre_api_call": {
|
||||
"model": "togethercomputer/llama-2-70b-chat",
|
||||
"messages": [
|
||||
{
|
||||
"content": "Write a short poem about the sky",
|
||||
"role": "user"
|
||||
}
|
||||
],
|
||||
"optional_params": {},
|
||||
"litellm_params": {
|
||||
"return_async": false,
|
||||
"api_key": null,
|
||||
"force_timeout": 600,
|
||||
"logger_fn":
|
|
@ -36,7 +36,7 @@ def test_completion_custom_provider_model_name():
|
|||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
|
||||
# test_completion_custom_provider_model_name()
|
||||
test_completion_custom_provider_model_name()
|
||||
|
||||
|
||||
def test_completion_claude():
|
||||
|
@ -1384,4 +1384,4 @@ def test_moderation():
|
|||
print(output)
|
||||
return output
|
||||
|
||||
# test_moderation()
|
||||
# test_moderation()
|
|
@ -1,6 +1,6 @@
|
|||
[tool.poetry]
|
||||
name = "litellm"
|
||||
version = "0.13.1"
|
||||
version = "0.13.2.dev1"
|
||||
description = "Library to easily interface with LLM API providers"
|
||||
authors = ["BerriAI"]
|
||||
license = "MIT License"
|
||||
|
@ -26,7 +26,7 @@ requires = ["poetry-core"]
|
|||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
[tool.commitizen]
|
||||
version = "0.13.1"
|
||||
version = "0.13.2.dev1"
|
||||
version_files = [
|
||||
"pyproject.toml:^version"
|
||||
]
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue