mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-04 20:14:13 +00:00
chore: indicate to mypy that InferenceProvider.rerank is concrete (#3238)
This commit is contained in:
parent
da73f1a180
commit
2ee898cc4c
6 changed files with 1 additions and 62 deletions
|
@ -1170,6 +1170,7 @@ class InferenceProvider(Protocol):
|
||||||
:returns: RerankResponse with indices sorted by relevance score (descending).
|
:returns: RerankResponse with indices sorted by relevance score (descending).
|
||||||
"""
|
"""
|
||||||
raise NotImplementedError("Reranking is not implemented")
|
raise NotImplementedError("Reranking is not implemented")
|
||||||
|
return # this is so mypy's safe-super rule will consider the method concrete
|
||||||
|
|
||||||
@webmethod(route="/openai/v1/completions", method="POST")
|
@webmethod(route="/openai/v1/completions", method="POST")
|
||||||
async def openai_completion(
|
async def openai_completion(
|
||||||
|
|
|
@ -33,9 +33,6 @@ from llama_stack.apis.inference import (
|
||||||
InterleavedContent,
|
InterleavedContent,
|
||||||
LogProbConfig,
|
LogProbConfig,
|
||||||
Message,
|
Message,
|
||||||
OpenAIChatCompletionContentPartImageParam,
|
|
||||||
OpenAIChatCompletionContentPartTextParam,
|
|
||||||
RerankResponse,
|
|
||||||
ResponseFormat,
|
ResponseFormat,
|
||||||
SamplingParams,
|
SamplingParams,
|
||||||
StopReason,
|
StopReason,
|
||||||
|
@ -445,15 +442,6 @@ class MetaReferenceInferenceImpl(
|
||||||
results = await self._nonstream_chat_completion(request_batch)
|
results = await self._nonstream_chat_completion(request_batch)
|
||||||
return BatchChatCompletionResponse(batch=results)
|
return BatchChatCompletionResponse(batch=results)
|
||||||
|
|
||||||
async def rerank(
|
|
||||||
self,
|
|
||||||
model: str,
|
|
||||||
query: str | OpenAIChatCompletionContentPartTextParam | OpenAIChatCompletionContentPartImageParam,
|
|
||||||
items: list[str | OpenAIChatCompletionContentPartTextParam | OpenAIChatCompletionContentPartImageParam],
|
|
||||||
max_num_results: int | None = None,
|
|
||||||
) -> RerankResponse:
|
|
||||||
raise NotImplementedError("Reranking is not supported for Meta Reference")
|
|
||||||
|
|
||||||
async def _nonstream_chat_completion(
|
async def _nonstream_chat_completion(
|
||||||
self, request_batch: list[ChatCompletionRequest]
|
self, request_batch: list[ChatCompletionRequest]
|
||||||
) -> list[ChatCompletionResponse]:
|
) -> list[ChatCompletionResponse]:
|
||||||
|
|
|
@ -12,9 +12,6 @@ from llama_stack.apis.inference import (
|
||||||
InterleavedContent,
|
InterleavedContent,
|
||||||
LogProbConfig,
|
LogProbConfig,
|
||||||
Message,
|
Message,
|
||||||
OpenAIChatCompletionContentPartImageParam,
|
|
||||||
OpenAIChatCompletionContentPartTextParam,
|
|
||||||
RerankResponse,
|
|
||||||
ResponseFormat,
|
ResponseFormat,
|
||||||
SamplingParams,
|
SamplingParams,
|
||||||
ToolChoice,
|
ToolChoice,
|
||||||
|
@ -125,12 +122,3 @@ class SentenceTransformersInferenceImpl(
|
||||||
logprobs: LogProbConfig | None = None,
|
logprobs: LogProbConfig | None = None,
|
||||||
):
|
):
|
||||||
raise NotImplementedError("Batch chat completion is not supported for Sentence Transformers")
|
raise NotImplementedError("Batch chat completion is not supported for Sentence Transformers")
|
||||||
|
|
||||||
async def rerank(
|
|
||||||
self,
|
|
||||||
model: str,
|
|
||||||
query: str | OpenAIChatCompletionContentPartTextParam | OpenAIChatCompletionContentPartImageParam,
|
|
||||||
items: list[str | OpenAIChatCompletionContentPartTextParam | OpenAIChatCompletionContentPartImageParam],
|
|
||||||
max_num_results: int | None = None,
|
|
||||||
) -> RerankResponse:
|
|
||||||
raise NotImplementedError("Reranking is not supported for Sentence Transformers")
|
|
||||||
|
|
|
@ -3,11 +3,6 @@
|
||||||
#
|
#
|
||||||
# This source code is licensed under the terms described in the LICENSE file in
|
# This source code is licensed under the terms described in the LICENSE file in
|
||||||
# the root directory of this source tree.
|
# the root directory of this source tree.
|
||||||
from llama_stack.apis.inference import (
|
|
||||||
OpenAIChatCompletionContentPartImageParam,
|
|
||||||
OpenAIChatCompletionContentPartTextParam,
|
|
||||||
RerankResponse,
|
|
||||||
)
|
|
||||||
from llama_stack.log import get_logger
|
from llama_stack.log import get_logger
|
||||||
from llama_stack.providers.remote.inference.llama_openai_compat.config import LlamaCompatConfig
|
from llama_stack.providers.remote.inference.llama_openai_compat.config import LlamaCompatConfig
|
||||||
from llama_stack.providers.utils.inference.litellm_openai_mixin import LiteLLMOpenAIMixin
|
from llama_stack.providers.utils.inference.litellm_openai_mixin import LiteLLMOpenAIMixin
|
||||||
|
@ -59,12 +54,3 @@ class LlamaCompatInferenceAdapter(OpenAIMixin, LiteLLMOpenAIMixin):
|
||||||
|
|
||||||
async def shutdown(self):
|
async def shutdown(self):
|
||||||
await super().shutdown()
|
await super().shutdown()
|
||||||
|
|
||||||
async def rerank(
|
|
||||||
self,
|
|
||||||
model: str,
|
|
||||||
query: str | OpenAIChatCompletionContentPartTextParam | OpenAIChatCompletionContentPartImageParam,
|
|
||||||
items: list[str | OpenAIChatCompletionContentPartTextParam | OpenAIChatCompletionContentPartImageParam],
|
|
||||||
max_num_results: int | None = None,
|
|
||||||
) -> RerankResponse:
|
|
||||||
raise NotImplementedError("Reranking is not supported for Llama OpenAI Compat")
|
|
||||||
|
|
|
@ -37,14 +37,11 @@ from llama_stack.apis.inference import (
|
||||||
Message,
|
Message,
|
||||||
OpenAIChatCompletion,
|
OpenAIChatCompletion,
|
||||||
OpenAIChatCompletionChunk,
|
OpenAIChatCompletionChunk,
|
||||||
OpenAIChatCompletionContentPartImageParam,
|
|
||||||
OpenAIChatCompletionContentPartTextParam,
|
|
||||||
OpenAICompletion,
|
OpenAICompletion,
|
||||||
OpenAIEmbeddingsResponse,
|
OpenAIEmbeddingsResponse,
|
||||||
OpenAIEmbeddingUsage,
|
OpenAIEmbeddingUsage,
|
||||||
OpenAIMessageParam,
|
OpenAIMessageParam,
|
||||||
OpenAIResponseFormatParam,
|
OpenAIResponseFormatParam,
|
||||||
RerankResponse,
|
|
||||||
ResponseFormat,
|
ResponseFormat,
|
||||||
SamplingParams,
|
SamplingParams,
|
||||||
TextTruncation,
|
TextTruncation,
|
||||||
|
@ -644,15 +641,6 @@ class OllamaInferenceAdapter(
|
||||||
):
|
):
|
||||||
raise NotImplementedError("Batch chat completion is not supported for Ollama")
|
raise NotImplementedError("Batch chat completion is not supported for Ollama")
|
||||||
|
|
||||||
async def rerank(
|
|
||||||
self,
|
|
||||||
model: str,
|
|
||||||
query: str | OpenAIChatCompletionContentPartTextParam | OpenAIChatCompletionContentPartImageParam,
|
|
||||||
items: list[str | OpenAIChatCompletionContentPartTextParam | OpenAIChatCompletionContentPartImageParam],
|
|
||||||
max_num_results: int | None = None,
|
|
||||||
) -> RerankResponse:
|
|
||||||
raise NotImplementedError("Reranking is not supported for Ollama")
|
|
||||||
|
|
||||||
|
|
||||||
async def convert_message_to_openai_dict_for_ollama(message: Message) -> list[dict]:
|
async def convert_message_to_openai_dict_for_ollama(message: Message) -> list[dict]:
|
||||||
async def _convert_content(content) -> dict:
|
async def _convert_content(content) -> dict:
|
||||||
|
|
|
@ -39,15 +39,12 @@ from llama_stack.apis.inference import (
|
||||||
Message,
|
Message,
|
||||||
ModelStore,
|
ModelStore,
|
||||||
OpenAIChatCompletion,
|
OpenAIChatCompletion,
|
||||||
OpenAIChatCompletionContentPartImageParam,
|
|
||||||
OpenAIChatCompletionContentPartTextParam,
|
|
||||||
OpenAICompletion,
|
OpenAICompletion,
|
||||||
OpenAIEmbeddingData,
|
OpenAIEmbeddingData,
|
||||||
OpenAIEmbeddingsResponse,
|
OpenAIEmbeddingsResponse,
|
||||||
OpenAIEmbeddingUsage,
|
OpenAIEmbeddingUsage,
|
||||||
OpenAIMessageParam,
|
OpenAIMessageParam,
|
||||||
OpenAIResponseFormatParam,
|
OpenAIResponseFormatParam,
|
||||||
RerankResponse,
|
|
||||||
ResponseFormat,
|
ResponseFormat,
|
||||||
SamplingParams,
|
SamplingParams,
|
||||||
TextTruncation,
|
TextTruncation,
|
||||||
|
@ -736,12 +733,3 @@ class VLLMInferenceAdapter(Inference, ModelsProtocolPrivate):
|
||||||
logprobs: LogProbConfig | None = None,
|
logprobs: LogProbConfig | None = None,
|
||||||
):
|
):
|
||||||
raise NotImplementedError("Batch chat completion is not supported for vLLM")
|
raise NotImplementedError("Batch chat completion is not supported for vLLM")
|
||||||
|
|
||||||
async def rerank(
|
|
||||||
self,
|
|
||||||
model: str,
|
|
||||||
query: str | OpenAIChatCompletionContentPartTextParam | OpenAIChatCompletionContentPartImageParam,
|
|
||||||
items: list[str | OpenAIChatCompletionContentPartTextParam | OpenAIChatCompletionContentPartImageParam],
|
|
||||||
max_num_results: int | None = None,
|
|
||||||
) -> RerankResponse:
|
|
||||||
raise NotImplementedError("Reranking is not supported for vLLM")
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue