mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
Litellm merge pr (#7161)
* build: merge branch * test: fix openai naming * fix(main.py): fix openai renaming * style: ignore function length for config factory * fix(sagemaker/): fix routing logic * fix: fix imports * fix: fix override
This commit is contained in:
parent
d5aae81c6d
commit
350cfc36f7
88 changed files with 3617 additions and 4421 deletions
|
@ -75,6 +75,7 @@ class OpenAILikeChatConfig(OpenAIGPTConfig):
|
|||
custom_llm_provider: str,
|
||||
base_model: Optional[str],
|
||||
) -> ModelResponse:
|
||||
print(f"response: {response}")
|
||||
response_json = response.json()
|
||||
logging_obj.post_call(
|
||||
input=messages,
|
||||
|
@ -99,3 +100,25 @@ class OpenAILikeChatConfig(OpenAIGPTConfig):
|
|||
if base_model is not None:
|
||||
returned_response._hidden_params["model"] = base_model
|
||||
return returned_response
|
||||
|
||||
def map_openai_params(
|
||||
self,
|
||||
non_default_params: dict,
|
||||
optional_params: dict,
|
||||
model: str,
|
||||
drop_params: bool,
|
||||
replace_max_completion_tokens_with_max_tokens: bool = True,
|
||||
) -> dict:
|
||||
mapped_params = super().map_openai_params(
|
||||
non_default_params, optional_params, model, drop_params
|
||||
)
|
||||
if (
|
||||
"max_completion_tokens" in non_default_params
|
||||
and replace_max_completion_tokens_with_max_tokens
|
||||
):
|
||||
mapped_params["max_tokens"] = non_default_params[
|
||||
"max_completion_tokens"
|
||||
] # most openai-compatible providers support 'max_tokens' not 'max_completion_tokens'
|
||||
mapped_params.pop("max_completion_tokens", None)
|
||||
|
||||
return mapped_params
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue