fix pre-commit issues

This commit is contained in:
Sumit Jaiswal 2025-06-01 16:00:18 +05:30
parent ae85dd6182
commit afa9db5a6b
No known key found for this signature in database
GPG key ID: A4604B39D64D6AEC
2 changed files with 4 additions and 10 deletions

View file

@ -9,7 +9,6 @@ from collections.abc import AsyncGenerator, AsyncIterator
from typing import Any from typing import Any
import httpx import httpx
import requests
from openai import AsyncOpenAI from openai import AsyncOpenAI
from openai.types.chat.chat_completion_chunk import ( from openai.types.chat.chat_completion_chunk import (
ChatCompletionChunk as OpenAIChatCompletionChunk, ChatCompletionChunk as OpenAIChatCompletionChunk,
@ -314,14 +313,9 @@ class VLLMInferenceAdapter(Inference, ModelsProtocolPrivate):
try: try:
client = self._create_client() if self.client is None else self.client client = self._create_client() if self.client is None else self.client
client.models.list() # Ensure the client is initialized client.models.list() # Ensure the client is initialized
return HealthResponse( return HealthResponse(status=HealthStatus.OK)
status=HealthStatus.OK
)
except Exception as ex: except Exception as ex:
return HealthResponse( return HealthResponse(status=HealthStatus.ERROR, message=f"Health check failed: {str(ex)}")
status=HealthStatus.ERROR,
message=f"Health check failed: {str(ex)}"
)
async def _get_model(self, model_id: str) -> Model: async def _get_model(self, model_id: str) -> Model:
if not self.model_store: if not self.model_store:

View file

@ -653,7 +653,7 @@ async def test_health_status_success(vllm_inference_adapter):
# Mock the client.models.list method to return successfully # Mock the client.models.list method to return successfully
# Set vllm_inference_adapter.client to None to ensure _create_client is called # Set vllm_inference_adapter.client to None to ensure _create_client is called
vllm_inference_adapter.client = None vllm_inference_adapter.client = None
with patch.object(vllm_inference_adapter, '_create_client') as mock_create_client: with patch.object(vllm_inference_adapter, "_create_client") as mock_create_client:
# Create mock client and models # Create mock client and models
mock_client = MagicMock() mock_client = MagicMock()
mock_models = MagicMock() mock_models = MagicMock()
@ -678,7 +678,7 @@ async def test_health_status_failure(vllm_inference_adapter):
""" """
vllm_inference_adapter.client = None vllm_inference_adapter.client = None
# Mock the client.models.list method to raise an exception # Mock the client.models.list method to raise an exception
with patch.object(vllm_inference_adapter, '_create_client') as mock_create_client: with patch.object(vllm_inference_adapter, "_create_client") as mock_create_client:
# Create mock client and models # Create mock client and models
mock_client = MagicMock() mock_client = MagicMock()
mock_models = MagicMock() mock_models = MagicMock()