mirror of
https://github.com/meta-llama/llama-stack.git
synced 2026-01-03 16:22:16 +00:00
Add unsupported OpenAI mixin to all remaining inference providers
This commit is contained in:
parent
00c4493bda
commit
15d37fde19
7 changed files with 56 additions and 7 deletions
|
|
@ -40,8 +40,10 @@ from llama_stack.providers.utils.inference.model_registry import (
|
|||
build_hf_repo_model_entry,
|
||||
)
|
||||
from llama_stack.providers.utils.inference.openai_compat import (
|
||||
OpenAIChatCompletionUnsupportedMixin,
|
||||
OpenAICompatCompletionChoice,
|
||||
OpenAICompatCompletionResponse,
|
||||
OpenAICompletionUnsupportedMixin,
|
||||
get_sampling_options,
|
||||
process_chat_completion_response,
|
||||
process_chat_completion_stream_response,
|
||||
|
|
@ -69,7 +71,12 @@ def build_hf_repo_model_entries():
|
|||
]
|
||||
|
||||
|
||||
class _HfAdapter(Inference, ModelsProtocolPrivate):
|
||||
class _HfAdapter(
|
||||
Inference,
|
||||
OpenAIChatCompletionUnsupportedMixin,
|
||||
OpenAICompletionUnsupportedMixin,
|
||||
ModelsProtocolPrivate,
|
||||
):
|
||||
client: AsyncInferenceClient
|
||||
max_tokens: int
|
||||
model_id: str
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue