bump: version 0.2.5 → 0.3.0

This commit is contained in:
ishaan-jaff 2023-10-06 15:40:01 -07:00
parent 6f47a2d6f6
commit 4ae8a71aa3
2 changed files with 19 additions and 22 deletions

View file

@ -1139,26 +1139,20 @@ def batch_completion(
messages: List = [], messages: List = [],
functions: List = [], functions: List = [],
function_call: str = "", # optional params function_call: str = "", # optional params
temperature: float = 1, temperature: Optional[float] = None,
top_p: float = 1, top_p: Optional[float] = None,
n: int = 1, n: Optional[int] = None,
stream: bool = False, stream: Optional[bool] = None,
stop=None, stop=None,
max_tokens: float = float("inf"), max_tokens: Optional[float] = None,
presence_penalty: float = 0, presence_penalty: Optional[float] = None,
frequency_penalty=0, frequency_penalty: Optional[float]=None,
logit_bias: dict = {}, logit_bias: dict = {},
user: str = "", user: str = "",
deployment_id = None,
request_timeout: Optional[int] = None,
# Optional liteLLM function params # Optional liteLLM function params
*, **kwargs):
return_async=False,
api_key: Optional[str] = None,
api_version: Optional[str] = None,
api_base: Optional[str] = None,
force_timeout=600,
# used by text-bison only
top_k=40,
custom_llm_provider=None,):
args = locals() args = locals()
batch_messages = messages batch_messages = messages
completions = [] completions = []
@ -1183,10 +1177,10 @@ def batch_completion(
user=user, user=user,
# params to identify the model # params to identify the model
model=model, model=model,
custom_llm_provider=custom_llm_provider, custom_llm_provider=custom_llm_provider
top_k=top_k,
) )
results = vllm.batch_completions(model=model, messages=batch_messages, custom_prompt_dict=litellm.custom_prompt_dict, optional_params=optional_params) results = vllm.batch_completions(model=model, messages=batch_messages, custom_prompt_dict=litellm.custom_prompt_dict, optional_params=optional_params)
# all non VLLM models for batch completion models
else: else:
def chunks(lst, n): def chunks(lst, n):
"""Yield successive n-sized chunks from lst.""" """Yield successive n-sized chunks from lst."""
@ -1195,9 +1189,12 @@ def batch_completion(
with ThreadPoolExecutor(max_workers=100) as executor: with ThreadPoolExecutor(max_workers=100) as executor:
for sub_batch in chunks(batch_messages, 100): for sub_batch in chunks(batch_messages, 100):
for message_list in sub_batch: for message_list in sub_batch:
kwargs_modified = args kwargs_modified = args.copy()
kwargs_modified["messages"] = message_list kwargs_modified["messages"] = message_list
future = executor.submit(completion, **kwargs_modified) original_kwargs = {}
if "kwargs" in kwargs_modified:
original_kwargs = kwargs_modified.pop("kwargs")
future = executor.submit(completion, **kwargs_modified, **original_kwargs)
completions.append(future) completions.append(future)
# Retrieve the results from the futures # Retrieve the results from the futures

View file

@ -1,6 +1,6 @@
[tool.poetry] [tool.poetry]
name = "litellm" name = "litellm"
version = "0.2.6" version = "0.3.0"
description = "Library to easily interface with LLM API providers" description = "Library to easily interface with LLM API providers"
authors = ["BerriAI"] authors = ["BerriAI"]
license = "MIT License" license = "MIT License"
@ -25,7 +25,7 @@ requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api" build-backend = "poetry.core.masonry.api"
[tool.commitizen] [tool.commitizen]
version = "0.2.6" version = "0.3.0"
version_files = [ version_files = [
"pyproject.toml:^version" "pyproject.toml:^version"
] ]