litellm-mirror/litellm/llms/watsonx/chat/handler.py
Krish Dholakia 053b0e741f
All checks were successful
Read Version from pyproject.toml / read-version (push) Successful in 16s
Helm unit test / unit-test (push) Successful in 23s
Add Google AI Studio /v1/files upload API support (#9645)
* test: fix import for test

* fix: fix bad error string

* docs: cleanup files docs

* fix(files/main.py): cleanup error string

* style: initial commit with a provider/config pattern for files api

google ai studio files api onboarding

* fix: test

* feat(gemini/files/transformation.py): support gemini files api response transformation

* fix(gemini/files/transformation.py): return file id as gemini uri

allows id to be passed in to chat completion request, just like openai

* feat(llm_http_handler.py): support async route for files api on llm_http_handler

* fix: fix linting errors

* fix: fix model info check

* fix: fix ruff errors

* fix: fix linting errors

* Revert "fix: fix linting errors"

This reverts commit 926a5a527f.

* fix: fix linting errors

* test: fix test

* test: fix tests
2025-04-02 08:56:58 -07:00

91 lines
2.9 KiB
Python

from typing import Callable, Optional, Union
import httpx
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler
from litellm.types.utils import CustomStreamingDecoder, ModelResponse
from ...openai_like.chat.handler import OpenAILikeChatHandler
from ..common_utils import _get_api_params
from .transformation import IBMWatsonXChatConfig
watsonx_chat_transformation = IBMWatsonXChatConfig()
class WatsonXChatHandler(OpenAILikeChatHandler):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def completion(
self,
*,
model: str,
messages: list,
api_base: str,
custom_llm_provider: str,
custom_prompt_dict: dict,
model_response: ModelResponse,
print_verbose: Callable,
encoding,
api_key: Optional[str],
logging_obj,
optional_params: dict,
acompletion=None,
litellm_params: dict = {},
headers: Optional[dict] = None,
logger_fn=None,
timeout: Optional[Union[float, httpx.Timeout]] = None,
client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None,
custom_endpoint: Optional[bool] = None,
streaming_decoder: Optional[CustomStreamingDecoder] = None,
fake_stream: bool = False,
):
api_params = _get_api_params(params=optional_params)
## UPDATE HEADERS
headers = watsonx_chat_transformation.validate_environment(
headers=headers or {},
model=model,
messages=messages,
optional_params=optional_params,
api_key=api_key,
)
## UPDATE PAYLOAD (optional params)
watsonx_auth_payload = watsonx_chat_transformation._prepare_payload(
model=model,
api_params=api_params,
)
optional_params.update(watsonx_auth_payload)
## GET API URL
api_base = watsonx_chat_transformation.get_complete_url(
api_base=api_base,
api_key=api_key,
model=model,
optional_params=optional_params,
litellm_params=litellm_params,
stream=optional_params.get("stream", False),
)
return super().completion(
model=model,
messages=messages,
api_base=api_base,
custom_llm_provider=custom_llm_provider,
custom_prompt_dict=custom_prompt_dict,
model_response=model_response,
print_verbose=print_verbose,
encoding=encoding,
api_key=api_key,
logging_obj=logging_obj,
optional_params=optional_params,
acompletion=acompletion,
litellm_params=litellm_params,
logger_fn=logger_fn,
headers=headers,
timeout=timeout,
client=client,
custom_endpoint=True,
streaming_decoder=streaming_decoder,
)