mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
Add Google AI Studio /v1/files
upload API support (#9645)
* test: fix import for test
* fix: fix bad error string
* docs: cleanup files docs
* fix(files/main.py): cleanup error string
* style: initial commit with a provider/config pattern for files api
google ai studio files api onboarding
* fix: test
* feat(gemini/files/transformation.py): support gemini files api response transformation
* fix(gemini/files/transformation.py): return file id as gemini uri
allows id to be passed in to chat completion request, just like openai
* feat(llm_http_handler.py): support async route for files api on llm_http_handler
* fix: fix linting errors
* fix: fix model info check
* fix: fix ruff errors
* fix: fix linting errors
* Revert "fix: fix linting errors"
This reverts commit 926a5a527f
.
* fix: fix linting errors
* test: fix test
* test: fix tests
This commit is contained in:
parent
d1abb9b68b
commit
0519c0c507
40 changed files with 1006 additions and 245 deletions
|
@ -57,6 +57,8 @@ import litellm._service_logger # for storing API inputs, outputs, and metadata
|
|||
import litellm.litellm_core_utils
|
||||
import litellm.litellm_core_utils.audio_utils.utils
|
||||
import litellm.litellm_core_utils.json_validation_rule
|
||||
import litellm.llms
|
||||
import litellm.llms.gemini
|
||||
from litellm.caching._internal_lru_cache import lru_cache_wrapper
|
||||
from litellm.caching.caching import DualCache
|
||||
from litellm.caching.caching_handler import CachingHandlerResponse, LLMCachingHandler
|
||||
|
@ -207,6 +209,7 @@ from litellm.llms.base_llm.base_utils import (
|
|||
from litellm.llms.base_llm.chat.transformation import BaseConfig
|
||||
from litellm.llms.base_llm.completion.transformation import BaseTextCompletionConfig
|
||||
from litellm.llms.base_llm.embedding.transformation import BaseEmbeddingConfig
|
||||
from litellm.llms.base_llm.files.transformation import BaseFilesConfig
|
||||
from litellm.llms.base_llm.image_variations.transformation import (
|
||||
BaseImageVariationConfig,
|
||||
)
|
||||
|
@ -1259,6 +1262,7 @@ def client(original_function): # noqa: PLR0915
|
|||
logging_obj, kwargs = function_setup(
|
||||
original_function.__name__, rules_obj, start_time, *args, **kwargs
|
||||
)
|
||||
|
||||
kwargs["litellm_logging_obj"] = logging_obj
|
||||
## LOAD CREDENTIALS
|
||||
load_credentials_from_list(kwargs)
|
||||
|
@ -6426,6 +6430,19 @@ class ProviderConfigManager:
|
|||
return litellm.TopazImageVariationConfig()
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def get_provider_files_config(
|
||||
model: str,
|
||||
provider: LlmProviders,
|
||||
) -> Optional[BaseFilesConfig]:
|
||||
if LlmProviders.GEMINI == provider:
|
||||
from litellm.llms.gemini.files.transformation import (
|
||||
GoogleAIStudioFilesHandler, # experimental approach, to reduce bloat on __init__.py
|
||||
)
|
||||
|
||||
return GoogleAIStudioFilesHandler()
|
||||
return None
|
||||
|
||||
|
||||
def get_end_user_id_for_cost_tracking(
|
||||
litellm_params: dict,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue