chore: more mypy checks (ollama, vllm, ...) (#1777)

# What does this PR do?

- **chore: mypy for strong_typing**
- **chore: mypy for remote::vllm**
- **chore: mypy for remote::ollama**
- **chore: mypy for providers.datatype**

---------

Signed-off-by: Ihar Hrachyshka <ihar.hrachyshka@gmail.com>
This commit is contained in:
Ihar Hrachyshka 2025-04-01 11:12:39 -04:00 committed by GitHub
parent d5e0f32485
commit 66d6c2580e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
15 changed files with 103 additions and 72 deletions

View file

@ -137,7 +137,10 @@ def get_sampling_strategy_options(params: SamplingParams) -> dict:
return options
def get_sampling_options(params: SamplingParams) -> dict:
def get_sampling_options(params: SamplingParams | None) -> dict:
if not params:
return {}
options = {}
if params:
options.update(get_sampling_strategy_options(params))
@ -297,7 +300,7 @@ def process_chat_completion_response(
async def process_completion_stream_response(
stream: AsyncGenerator[OpenAICompatCompletionResponse, None],
) -> AsyncGenerator:
) -> AsyncGenerator[CompletionResponseStreamChunk, None]:
stop_reason = None
async for chunk in stream:
@ -334,7 +337,7 @@ async def process_completion_stream_response(
async def process_chat_completion_stream_response(
stream: AsyncGenerator[OpenAICompatCompletionResponse, None],
request: ChatCompletionRequest,
) -> AsyncGenerator:
) -> AsyncGenerator[ChatCompletionResponseStreamChunk, None]:
yield ChatCompletionResponseStreamChunk(
event=ChatCompletionResponseEvent(
event_type=ChatCompletionResponseEventType.start,