mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-08 19:10:56 +00:00
rename augment_messages
This commit is contained in:
parent
336cf7a674
commit
640c5c54f7
10 changed files with 34 additions and 33 deletions
|
|
@ -19,10 +19,6 @@ from vllm.sampling_params import SamplingParams
|
|||
|
||||
from llama_stack.apis.inference import * # noqa: F403
|
||||
|
||||
from llama_stack.providers.utils.inference.augment_messages import (
|
||||
chat_completion_request_to_prompt,
|
||||
)
|
||||
|
||||
from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper
|
||||
from llama_stack.providers.utils.inference.openai_compat import (
|
||||
OpenAICompatCompletionChoice,
|
||||
|
|
@ -30,6 +26,9 @@ from llama_stack.providers.utils.inference.openai_compat import (
|
|||
process_chat_completion_response,
|
||||
process_chat_completion_stream_response,
|
||||
)
|
||||
from llama_stack.providers.utils.inference.prompt_adapter import (
|
||||
chat_completion_request_to_prompt,
|
||||
)
|
||||
|
||||
from .config import VLLMConfig
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue