mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 11:43:54 +00:00
Add Google AI Studio /v1/files
upload API support (#9645)
* test: fix import for test
* fix: fix bad error string
* docs: cleanup files docs
* fix(files/main.py): cleanup error string
* style: initial commit with a provider/config pattern for files api
google ai studio files api onboarding
* fix: test
* feat(gemini/files/transformation.py): support gemini files api response transformation
* fix(gemini/files/transformation.py): return file id as gemini uri
allows id to be passed in to chat completion request, just like openai
* feat(llm_http_handler.py): support async route for files api on llm_http_handler
* fix: fix linting errors
* fix: fix model info check
* fix: fix ruff errors
* fix: fix linting errors
* Revert "fix: fix linting errors"
This reverts commit 926a5a527f
.
* fix: fix linting errors
* test: fix test
* test: fix tests
This commit is contained in:
parent
d1abb9b68b
commit
0519c0c507
40 changed files with 1006 additions and 245 deletions
|
@ -1,17 +1,41 @@
|
|||
from typing import List, Optional
|
||||
from typing import List, Optional, Union
|
||||
|
||||
import httpx
|
||||
|
||||
import litellm
|
||||
from litellm.llms.base_llm.base_utils import BaseLLMModelInfo
|
||||
from litellm.llms.base_llm.chat.transformation import BaseLLMException
|
||||
from litellm.secret_managers.main import get_secret_str
|
||||
from litellm.types.llms.openai import AllMessageValues
|
||||
|
||||
|
||||
class GeminiError(BaseLLMException):
|
||||
pass
|
||||
|
||||
|
||||
class GeminiModelInfo(BaseLLMModelInfo):
|
||||
def validate_environment(
|
||||
self,
|
||||
headers: dict,
|
||||
model: str,
|
||||
messages: List[AllMessageValues],
|
||||
optional_params: dict,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> dict:
|
||||
"""Google AI Studio sends api key in query params"""
|
||||
return headers
|
||||
|
||||
@property
|
||||
def api_version(self) -> str:
|
||||
return "v1beta"
|
||||
|
||||
@staticmethod
|
||||
def get_api_base(api_base: Optional[str] = None) -> Optional[str]:
|
||||
return (
|
||||
api_base
|
||||
or get_secret_str("GEMINI_API_BASE")
|
||||
or "https://generativelanguage.googleapis.com/v1beta"
|
||||
or "https://generativelanguage.googleapis.com"
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
|
@ -27,13 +51,14 @@ class GeminiModelInfo(BaseLLMModelInfo):
|
|||
) -> List[str]:
|
||||
api_base = GeminiModelInfo.get_api_base(api_base)
|
||||
api_key = GeminiModelInfo.get_api_key(api_key)
|
||||
endpoint = f"/{self.api_version}/models"
|
||||
if api_base is None or api_key is None:
|
||||
raise ValueError(
|
||||
"GEMINI_API_BASE or GEMINI_API_KEY is not set. Please set the environment variable, to query Gemini's `/models` endpoint."
|
||||
)
|
||||
|
||||
response = litellm.module_level_client.get(
|
||||
url=f"{api_base}/models?key={api_key}",
|
||||
url=f"{api_base}{endpoint}?key={api_key}",
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
|
@ -49,3 +74,10 @@ class GeminiModelInfo(BaseLLMModelInfo):
|
|||
litellm_model_name = "gemini/" + stripped_model_name
|
||||
litellm_model_names.append(litellm_model_name)
|
||||
return litellm_model_names
|
||||
|
||||
def get_error_class(
|
||||
self, error_message: str, status_code: int, headers: Union[dict, httpx.Headers]
|
||||
) -> BaseLLMException:
|
||||
return GeminiError(
|
||||
status_code=status_code, message=error_message, headers=headers
|
||||
)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue