mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-12 04:50:39 +00:00
Remove commented code
This commit is contained in:
parent
5d54c2ee70
commit
c08ca19d79
1 changed files with 0 additions and 2 deletions
|
@ -172,8 +172,6 @@ def _convert_sampling_params(
|
|||
# vLLM allows top-p and top-k at the same time.
|
||||
vllm_sampling_params = vllm.SamplingParams.from_optional(
|
||||
max_tokens=(None if sampling_params.max_tokens == 0 else sampling_params.max_tokens),
|
||||
# Assume that vLLM's default stop token will work
|
||||
# stop_token_ids=[tokenizer.eos_token_id],
|
||||
temperature=vllm_temperature,
|
||||
top_p=vllm_top_p,
|
||||
top_k=vllm_top_k,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue