mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 03:34:10 +00:00
fixes to get optional params
This commit is contained in:
parent
0ae93d1112
commit
8c48af11c2
9 changed files with 86 additions and 90 deletions
Binary file not shown.
Binary file not shown.
|
@ -56,48 +56,44 @@ def completion(
|
|||
## COMPLETION CALL
|
||||
response = palm.chat(messages=prompt)
|
||||
|
||||
## LOGGING
|
||||
logging_obj.post_call(
|
||||
input=prompt,
|
||||
api_key="",
|
||||
original_response=response,
|
||||
additional_args={"complete_input_dict": {}},
|
||||
)
|
||||
print_verbose(f"raw model_response: {response}")
|
||||
## RESPONSE OBJECT
|
||||
completion_response = response.last
|
||||
|
||||
if "stream" in optional_params and optional_params["stream"] == True:
|
||||
return response.iter_lines()
|
||||
if "error" in completion_response:
|
||||
raise PalmError(
|
||||
message=completion_response["error"],
|
||||
status_code=response.status_code,
|
||||
)
|
||||
else:
|
||||
## LOGGING
|
||||
logging_obj.post_call(
|
||||
input=prompt,
|
||||
api_key="",
|
||||
original_response=response,
|
||||
additional_args={"complete_input_dict": {}},
|
||||
)
|
||||
print_verbose(f"raw model_response: {response}")
|
||||
## RESPONSE OBJECT
|
||||
completion_response = response.last
|
||||
try:
|
||||
model_response["choices"][0]["message"]["content"] = completion_response
|
||||
except:
|
||||
raise PalmError(message=json.dumps(completion_response), status_code=response.status_code)
|
||||
|
||||
if "error" in completion_response:
|
||||
raise PalmError(
|
||||
message=completion_response["error"],
|
||||
status_code=response.status_code,
|
||||
)
|
||||
else:
|
||||
try:
|
||||
model_response["choices"][0]["message"]["content"] = completion_response
|
||||
except:
|
||||
raise PalmError(message=json.dumps(completion_response), status_code=response.status_code)
|
||||
## CALCULATING USAGE - baseten charges on time, not tokens - have some mapping of cost here.
|
||||
prompt_tokens = len(
|
||||
encoding.encode(prompt)
|
||||
)
|
||||
completion_tokens = len(
|
||||
encoding.encode(model_response["choices"][0]["message"]["content"])
|
||||
)
|
||||
|
||||
## CALCULATING USAGE - baseten charges on time, not tokens - have some mapping of cost here.
|
||||
prompt_tokens = len(
|
||||
encoding.encode(prompt)
|
||||
)
|
||||
completion_tokens = len(
|
||||
encoding.encode(model_response["choices"][0]["message"]["content"])
|
||||
)
|
||||
|
||||
model_response["created"] = time.time()
|
||||
model_response["model"] = "palm/" + model
|
||||
model_response["usage"] = {
|
||||
"prompt_tokens": prompt_tokens,
|
||||
"completion_tokens": completion_tokens,
|
||||
"total_tokens": prompt_tokens + completion_tokens,
|
||||
}
|
||||
return model_response
|
||||
model_response["created"] = time.time()
|
||||
model_response["model"] = "palm/" + model
|
||||
model_response["usage"] = {
|
||||
"prompt_tokens": prompt_tokens,
|
||||
"completion_tokens": completion_tokens,
|
||||
"total_tokens": prompt_tokens + completion_tokens,
|
||||
}
|
||||
return model_response
|
||||
|
||||
def embedding():
|
||||
# logic for parsing in - calling - parsing out model embedding calls
|
||||
|
|
|
@ -6,6 +6,7 @@ import time
|
|||
from typing import Callable
|
||||
from litellm.utils import ModelResponse, get_secret
|
||||
import sys
|
||||
from copy import deepcopy
|
||||
|
||||
class SagemakerError(Exception):
|
||||
def __init__(self, status_code, message):
|
||||
|
@ -60,9 +61,12 @@ def completion(
|
|||
)
|
||||
else:
|
||||
prompt += f"{message['content']}"
|
||||
# pop streaming if it's in the optional params as 'stream' raises an error with sagemaker
|
||||
inference_params = deepcopy(optional_params)
|
||||
inference_params.pop("stream", None)
|
||||
data = {
|
||||
"inputs": prompt,
|
||||
"parameters": optional_params
|
||||
"parameters": inference_params
|
||||
}
|
||||
|
||||
## LOGGING
|
||||
|
@ -79,46 +83,43 @@ def completion(
|
|||
CustomAttributes="accept_eula=true",
|
||||
)
|
||||
response = response["Body"].read().decode("utf8")
|
||||
if "stream" in optional_params and optional_params["stream"] == True:
|
||||
return response.iter_lines()
|
||||
## LOGGING
|
||||
logging_obj.post_call(
|
||||
input=prompt,
|
||||
api_key="",
|
||||
original_response=response,
|
||||
additional_args={"complete_input_dict": data},
|
||||
)
|
||||
print_verbose(f"raw model_response: {response}")
|
||||
## RESPONSE OBJECT
|
||||
completion_response = json.loads(response)
|
||||
if "error" in completion_response:
|
||||
raise SagemakerError(
|
||||
message=completion_response["error"],
|
||||
status_code=response.status_code,
|
||||
)
|
||||
else:
|
||||
## LOGGING
|
||||
logging_obj.post_call(
|
||||
input=prompt,
|
||||
api_key="",
|
||||
original_response=response,
|
||||
additional_args={"complete_input_dict": data},
|
||||
)
|
||||
print_verbose(f"raw model_response: {response}")
|
||||
## RESPONSE OBJECT
|
||||
completion_response = json.loads(response)
|
||||
if "error" in completion_response:
|
||||
raise SagemakerError(
|
||||
message=completion_response["error"],
|
||||
status_code=response.status_code,
|
||||
)
|
||||
else:
|
||||
try:
|
||||
model_response["choices"][0]["message"]["content"] = completion_response[0]["generation"]
|
||||
except:
|
||||
raise SagemakerError(message=json.dumps(completion_response), status_code=response.status_code)
|
||||
try:
|
||||
model_response["choices"][0]["message"]["content"] = completion_response[0]["generation"]
|
||||
except:
|
||||
raise SagemakerError(message=json.dumps(completion_response), status_code=response.status_code)
|
||||
|
||||
## CALCULATING USAGE - baseten charges on time, not tokens - have some mapping of cost here.
|
||||
prompt_tokens = len(
|
||||
encoding.encode(prompt)
|
||||
)
|
||||
completion_tokens = len(
|
||||
encoding.encode(model_response["choices"][0]["message"]["content"])
|
||||
)
|
||||
## CALCULATING USAGE - baseten charges on time, not tokens - have some mapping of cost here.
|
||||
prompt_tokens = len(
|
||||
encoding.encode(prompt)
|
||||
)
|
||||
completion_tokens = len(
|
||||
encoding.encode(model_response["choices"][0]["message"]["content"])
|
||||
)
|
||||
|
||||
model_response["created"] = time.time()
|
||||
model_response["model"] = model
|
||||
model_response["usage"] = {
|
||||
"prompt_tokens": prompt_tokens,
|
||||
"completion_tokens": completion_tokens,
|
||||
"total_tokens": prompt_tokens + completion_tokens,
|
||||
}
|
||||
return model_response
|
||||
model_response["created"] = time.time()
|
||||
model_response["model"] = model
|
||||
model_response["usage"] = {
|
||||
"prompt_tokens": prompt_tokens,
|
||||
"completion_tokens": completion_tokens,
|
||||
"total_tokens": prompt_tokens + completion_tokens,
|
||||
}
|
||||
return model_response
|
||||
|
||||
def embedding():
|
||||
# logic for parsing in - calling - parsing out model embedding calls
|
||||
|
|
|
@ -977,7 +977,6 @@ def completion(
|
|||
encoding=encoding,
|
||||
logging_obj=logging
|
||||
)
|
||||
|
||||
if "stream" in optional_params and optional_params["stream"]==True: ## [BETA]
|
||||
# sagemaker does not support streaming as of now so we're faking streaming:
|
||||
# https://discuss.huggingface.co/t/streaming-output-text-when-deploying-on-sagemaker/39611
|
||||
|
|
|
@ -926,7 +926,7 @@ def test_completion_with_fallbacks():
|
|||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
test_completion_with_fallbacks()
|
||||
# test_completion_with_fallbacks()
|
||||
# def test_completion_with_fallbacks_multiple_keys():
|
||||
# print(f"backup key 1: {os.getenv('BACKUP_OPENAI_API_KEY_1')}")
|
||||
# print(f"backup key 2: {os.getenv('BACKUP_OPENAI_API_KEY_2')}")
|
||||
|
|
|
@ -709,7 +709,7 @@ def test_completion_sagemaker_stream():
|
|||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
# test_completion_sagemaker_stream()
|
||||
test_completion_sagemaker_stream()
|
||||
|
||||
# test on openai completion call
|
||||
def test_openai_text_completion_call():
|
||||
|
|
|
@ -977,9 +977,9 @@ def get_optional_params( # use the openai defaults
|
|||
raise ValueError("LiteLLM.Exception: Function calling is not supported by this provider")
|
||||
|
||||
def _check_valid_arg(supported_params):
|
||||
print(f"checking params for {model}")
|
||||
print(f"params passed in {passed_params}")
|
||||
print(f"non-default params passed in {non_default_params}")
|
||||
print_verbose(f"checking params for {model}")
|
||||
print_verbose(f"params passed in {passed_params}")
|
||||
print_verbose(f"non-default params passed in {non_default_params}")
|
||||
unsupported_params = [k for k in non_default_params.keys() if k not in supported_params]
|
||||
if unsupported_params:
|
||||
raise ValueError("LiteLLM.Exception: Unsupported parameters passed: {}".format(', '.join(unsupported_params)))
|
||||
|
@ -1225,7 +1225,6 @@ def get_optional_params( # use the openai defaults
|
|||
for k in passed_params.keys():
|
||||
if k not in default_params.keys():
|
||||
optional_params[k] = passed_params[k]
|
||||
print(f"final params going to model: {optional_params}")
|
||||
return optional_params
|
||||
|
||||
def get_llm_provider(model: str, custom_llm_provider: Optional[str] = None):
|
||||
|
@ -3441,14 +3440,15 @@ def completion_with_split_tests(models={}, messages=[], use_client=False, overri
|
|||
|
||||
def completion_with_fallbacks(**kwargs):
|
||||
print(f"kwargs inside completion_with_fallbacks: {kwargs}")
|
||||
nested_kwargs = kwargs.pop("kwargs")
|
||||
nested_kwargs = kwargs.pop("kwargs", {})
|
||||
response = None
|
||||
rate_limited_models = set()
|
||||
model_expiration_times = {}
|
||||
start_time = time.time()
|
||||
original_model = kwargs["model"]
|
||||
fallbacks = [kwargs["model"]] + nested_kwargs["fallbacks"]
|
||||
del nested_kwargs["fallbacks"] # remove fallbacks so it's not recursive
|
||||
fallbacks = [kwargs["model"]] + nested_kwargs.get("fallbacks", [])
|
||||
if "fallbacks" in nested_kwargs:
|
||||
del nested_kwargs["fallbacks"] # remove fallbacks so it's not recursive
|
||||
|
||||
while response == None and time.time() - start_time < 45:
|
||||
for model in fallbacks:
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[tool.poetry]
|
||||
name = "litellm"
|
||||
version = "0.1.811"
|
||||
version = "0.1.812"
|
||||
description = "Library to easily interface with LLM API providers"
|
||||
authors = ["BerriAI"]
|
||||
license = "MIT License"
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue