mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
* build(model_prices_and_context_window.json): add azure o1 pricing Closes https://github.com/BerriAI/litellm/issues/7712 * refactor: replace regex with string method for whitespace check in stop-sequences handling (#7713) * Allows overriding keep_alive time in ollama (#7079) * Allows overriding keep_alive time in ollama * Also adds to ollama_chat * Adds some info on the docs about this parameter * fix: together ai warning (#7688) Co-authored-by: Carl Senze <carl.senze@aleph-alpha.com> * fix(proxy_server.py): handle config containing thread locked objects when using get_config_state * fix(proxy_server.py): add exception to debug * build(model_prices_and_context_window.json): update 'supports_vision' for azure o1 --------- Co-authored-by: Wolfram Ravenwolf <52386626+WolframRavenwolf@users.noreply.github.com> Co-authored-by: Regis David Souza Mesquita <github@rdsm.dev> Co-authored-by: Carl <45709281+capsenz@users.noreply.github.com> Co-authored-by: Carl Senze <carl.senze@aleph-alpha.com>
59 lines
2 KiB
Python
59 lines
2 KiB
Python
"""
|
|
Support for OpenAI's `/v1/chat/completions` endpoint.
|
|
|
|
Calls done in OpenAI/openai.py as TogetherAI is openai-compatible.
|
|
|
|
Docs: https://docs.together.ai/reference/completions-1
|
|
"""
|
|
|
|
from typing import Optional
|
|
|
|
from litellm import get_model_info, verbose_logger
|
|
|
|
from ..openai.chat.gpt_transformation import OpenAIGPTConfig
|
|
|
|
|
|
class TogetherAIConfig(OpenAIGPTConfig):
|
|
def get_supported_openai_params(self, model: str) -> list:
|
|
"""
|
|
Only some together models support response_format / tool calling
|
|
|
|
Docs: https://docs.together.ai/docs/json-mode
|
|
"""
|
|
supports_function_calling: Optional[bool] = None
|
|
try:
|
|
model_info = get_model_info(model, custom_llm_provider="together_ai")
|
|
supports_function_calling = model_info.get(
|
|
"supports_function_calling", False
|
|
)
|
|
except Exception as e:
|
|
verbose_logger.debug(f"Error getting supported openai params: {e}")
|
|
pass
|
|
|
|
optional_params = super().get_supported_openai_params(model)
|
|
if supports_function_calling is not True:
|
|
verbose_logger.debug(
|
|
"Only some together models support function calling/response_format. Docs - https://docs.together.ai/docs/function-calling"
|
|
)
|
|
optional_params.remove("tools")
|
|
optional_params.remove("tool_choice")
|
|
optional_params.remove("function_call")
|
|
optional_params.remove("response_format")
|
|
return optional_params
|
|
|
|
def map_openai_params(
|
|
self,
|
|
non_default_params: dict,
|
|
optional_params: dict,
|
|
model: str,
|
|
drop_params: bool,
|
|
) -> dict:
|
|
mapped_openai_params = super().map_openai_params(
|
|
non_default_params, optional_params, model, drop_params
|
|
)
|
|
|
|
if "response_format" in mapped_openai_params and mapped_openai_params[
|
|
"response_format"
|
|
] == {"type": "text"}:
|
|
mapped_openai_params.pop("response_format")
|
|
return mapped_openai_params
|