Litellm dev 12 28 2024 p2 (#7458)

* docs(sidebar.js): docs for support model access groups for wildcard routes

* feat(key_management_endpoints.py): add check if user is premium_user when adding model access group for wildcard route

* refactor(docs/): make control model access a root-level doc in proxy sidebar

easier to discover how to control model access on litellm

* docs: more cleanup

* feat(fireworks_ai/): add document inlining support

Enables user to call non-vision models with images/pdfs/etc.

* test(test_fireworks_ai_translation.py): add unit testing for fireworks ai transform inline helper util

* docs(docs/): add document inlining details to fireworks ai docs

* feat(fireworks_ai/): allow user to dynamically disable auto add transform inline

allows client-side disabling of this feature for proxy users

* feat(fireworks_ai/): return 'supports_vision' and 'supports_pdf_input' true on all fireworks ai models

now true as fireworks ai supports document inlining

* test: fix tests

* fix(router.py): add unit testing for _is_model_access_group_for_wildcard_route
This commit is contained in:
Krish Dholakia 2024-12-28 19:38:06 -08:00 committed by GitHub
parent 3eb962c594
commit cfb6890b9f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
19 changed files with 832 additions and 305 deletions

View file

@ -174,6 +174,7 @@ from openai import OpenAIError as OriginalError
from litellm.llms.base_llm.audio_transcription.transformation import (
BaseAudioTranscriptionConfig,
)
from litellm.llms.base_llm.base_utils import BaseLLMModelInfo
from litellm.llms.base_llm.chat.transformation import BaseConfig
from litellm.llms.base_llm.completion.transformation import BaseTextCompletionConfig
from litellm.llms.base_llm.embedding.transformation import BaseEmbeddingConfig
@ -1989,6 +1990,7 @@ def get_litellm_params(
hf_model_name: Optional[str] = None,
custom_prompt_dict: Optional[dict] = None,
litellm_metadata: Optional[dict] = None,
disable_add_transform_inline_image_block: Optional[bool] = None,
):
litellm_params = {
"acompletion": acompletion,
@ -2021,6 +2023,7 @@ def get_litellm_params(
"hf_model_name": hf_model_name,
"custom_prompt_dict": custom_prompt_dict,
"litellm_metadata": litellm_metadata,
"disable_add_transform_inline_image_block": disable_add_transform_inline_image_block,
}
return litellm_params
@ -4373,6 +4376,17 @@ def _get_model_info_helper( # noqa: PLR0915
model_info=_model_info, custom_llm_provider=custom_llm_provider
):
_model_info = None
if _model_info is None and ProviderConfigManager.get_provider_model_info(
model=model, provider=LlmProviders(custom_llm_provider)
):
provider_config = ProviderConfigManager.get_provider_model_info(
model=model, provider=LlmProviders(custom_llm_provider)
)
if provider_config is not None:
_model_info = cast(
dict, provider_config.get_model_info(model=model)
)
key = "provider_specific_model_info"
if _model_info is None or key is None:
raise ValueError(
"This model isn't mapped yet. Add it here - https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json"
@ -6338,6 +6352,15 @@ class ProviderConfigManager:
return litellm.TogetherAITextCompletionConfig()
return litellm.OpenAITextCompletionConfig()
@staticmethod
def get_provider_model_info(
model: str,
provider: LlmProviders,
) -> Optional[BaseLLMModelInfo]:
if LlmProviders.FIREWORKS_AI == provider:
return litellm.FireworksAIConfig()
return None
def get_end_user_id_for_cost_tracking(
litellm_params: dict,