Run the script to produce vllm outputs

This commit is contained in:
Ashwin Bharambe 2024-11-17 14:09:36 -08:00
parent 0218e68849
commit 9bb07ce298
10 changed files with 109 additions and 71 deletions

View file

@ -41,6 +41,7 @@ def get_distribution_template() -> DistributionTemplate:
name="remote-vllm",
distro_type="self_hosted",
description="Use (an external) vLLM server for running LLM inference",
docker_image="llamastack/distribution-remote-vllm:test-0.0.52rc3",
template_path=Path(__file__).parent / "doc_template.md",
providers=providers,
default_models=[inference_model, safety_model],