mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
Add /vllm/*
and /mistral/*
passthrough endpoints (adds support for Mistral OCR via passthrough)
* feat(llm_passthrough_endpoints.py): support mistral passthrough Closes https://github.com/BerriAI/litellm/issues/9051 * feat(llm_passthrough_endpoints.py): initial commit for adding vllm passthrough route * feat(vllm/common_utils.py): add new vllm model info route make it possible to use vllm passthrough route via factory function * fix(llm_passthrough_endpoints.py): add all methods to vllm passthrough route * fix: fix linting error * fix: fix linting error * fix: fix ruff check * fix(proxy/_types.py): add new passthrough routes * docs(config_settings.md): add mistral env vars to docs
This commit is contained in:
parent
8faf56922c
commit
9b0f871129
12 changed files with 450 additions and 176 deletions
75
litellm/llms/vllm/common_utils.py
Normal file
75
litellm/llms/vllm/common_utils.py
Normal file
|
@ -0,0 +1,75 @@
|
|||
from typing import List, Optional, Union
|
||||
|
||||
import httpx
|
||||
|
||||
import litellm
|
||||
from litellm.llms.base_llm.base_utils import BaseLLMModelInfo
|
||||
from litellm.llms.base_llm.chat.transformation import BaseLLMException
|
||||
from litellm.secret_managers.main import get_secret_str
|
||||
from litellm.types.llms.openai import AllMessageValues
|
||||
from litellm.utils import _add_path_to_api_base
|
||||
|
||||
|
||||
class VLLMError(BaseLLMException):
|
||||
pass
|
||||
|
||||
|
||||
class VLLMModelInfo(BaseLLMModelInfo):
|
||||
def validate_environment(
|
||||
self,
|
||||
headers: dict,
|
||||
model: str,
|
||||
messages: List[AllMessageValues],
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> dict:
|
||||
"""Google AI Studio sends api key in query params"""
|
||||
return headers
|
||||
|
||||
@staticmethod
|
||||
def get_api_base(api_base: Optional[str] = None) -> Optional[str]:
|
||||
api_base = api_base or get_secret_str("VLLM_API_BASE")
|
||||
if api_base is None:
|
||||
raise ValueError(
|
||||
"VLLM_API_BASE is not set. Please set the environment variable, to use VLLM's pass-through - `{LITELLM_API_BASE}/vllm/{endpoint}`."
|
||||
)
|
||||
return api_base
|
||||
|
||||
@staticmethod
|
||||
def get_api_key(api_key: Optional[str] = None) -> Optional[str]:
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def get_base_model(model: str) -> Optional[str]:
|
||||
return model
|
||||
|
||||
def get_models(
|
||||
self, api_key: Optional[str] = None, api_base: Optional[str] = None
|
||||
) -> List[str]:
|
||||
api_base = VLLMModelInfo.get_api_base(api_base)
|
||||
api_key = VLLMModelInfo.get_api_key(api_key)
|
||||
endpoint = "/v1/models"
|
||||
if api_base is None or api_key is None:
|
||||
raise ValueError(
|
||||
"GEMINI_API_BASE or GEMINI_API_KEY is not set. Please set the environment variable, to query Gemini's `/models` endpoint."
|
||||
)
|
||||
|
||||
url = _add_path_to_api_base(api_base, endpoint)
|
||||
response = litellm.module_level_client.get(
|
||||
url=url,
|
||||
)
|
||||
|
||||
response.raise_for_status()
|
||||
|
||||
models = response.json()["data"]
|
||||
|
||||
return [model["id"] for model in models]
|
||||
|
||||
def get_error_class(
|
||||
self, error_message: str, status_code: int, headers: Union[dict, httpx.Headers]
|
||||
) -> BaseLLMException:
|
||||
return VLLMError(
|
||||
status_code=status_code, message=error_message, headers=headers
|
||||
)
|
Loading…
Add table
Add a link
Reference in a new issue