mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-17 15:49:49 +00:00
Fix docker failing to start container
This commit is contained in:
parent
52106d95d3
commit
83ede71e76
4 changed files with 22 additions and 11 deletions
|
|
@ -116,7 +116,7 @@ def available_providers() -> list[ProviderSpec]:
|
|||
adapter=AdapterSpec(
|
||||
adapter_type="fireworks",
|
||||
pip_packages=[
|
||||
"fireworks-ai",
|
||||
"fireworks-ai==0.17.16",
|
||||
],
|
||||
module="llama_stack.providers.remote.inference.fireworks",
|
||||
config_class="llama_stack.providers.remote.inference.fireworks.FireworksImplConfig",
|
||||
|
|
|
|||
|
|
@ -6,7 +6,9 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from llama_stack.apis.inference import ChatCompletionRequest
|
||||
from openai.types.chat import ChatCompletionContentPartImageParam, ChatCompletionContentPartTextParam
|
||||
|
||||
from llama_stack.apis.inference import ChatCompletionRequest, RerankResponse
|
||||
from llama_stack.providers.utils.inference.litellm_openai_mixin import (
|
||||
LiteLLMOpenAIMixin,
|
||||
)
|
||||
|
|
@ -50,3 +52,12 @@ class VertexAIInferenceAdapter(LiteLLMOpenAIMixin):
|
|||
params.pop("api_key", None)
|
||||
|
||||
return params
|
||||
|
||||
async def rerank(
|
||||
self,
|
||||
model: str,
|
||||
query: str | ChatCompletionContentPartTextParam | ChatCompletionContentPartImageParam,
|
||||
items: list[str | ChatCompletionContentPartTextParam | ChatCompletionContentPartImageParam],
|
||||
max_num_results: int | None = None,
|
||||
) -> RerankResponse:
|
||||
raise NotImplementedError("Reranking is not supported for Vertex AI")
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue