chore: indicate to mypy that InferenceProvider.rerank is concrete (#3238)

This commit is contained in:
Matthew Farrellee 2025-08-22 14:02:13 -05:00 committed by GitHub
parent da73f1a180
commit 2ee898cc4c
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
6 changed files with 1 additions and 62 deletions

View file

@ -39,15 +39,12 @@ from llama_stack.apis.inference import (
Message,
ModelStore,
OpenAIChatCompletion,
OpenAIChatCompletionContentPartImageParam,
OpenAIChatCompletionContentPartTextParam,
OpenAICompletion,
OpenAIEmbeddingData,
OpenAIEmbeddingsResponse,
OpenAIEmbeddingUsage,
OpenAIMessageParam,
OpenAIResponseFormatParam,
RerankResponse,
ResponseFormat,
SamplingParams,
TextTruncation,
@ -736,12 +733,3 @@ class VLLMInferenceAdapter(Inference, ModelsProtocolPrivate):
logprobs: LogProbConfig | None = None,
):
raise NotImplementedError("Batch chat completion is not supported for vLLM")
async def rerank(
self,
model: str,
query: str | OpenAIChatCompletionContentPartTextParam | OpenAIChatCompletionContentPartImageParam,
items: list[str | OpenAIChatCompletionContentPartTextParam | OpenAIChatCompletionContentPartImageParam],
max_num_results: int | None = None,
) -> RerankResponse:
raise NotImplementedError("Reranking is not supported for vLLM")