Run the script to produce vllm outputs

This commit is contained in:
Ashwin Bharambe 2024-11-17 14:09:36 -08:00
parent 0218e68849
commit 9bb07ce298
10 changed files with 109 additions and 71 deletions

View file

@ -1,12 +1,19 @@
version: '2'
name: remote-vllm
distribution_spec:
description: Use (an external) vLLM server for running LLM inference
docker_image: llamastack/distribution-remote-vllm:test-0.0.52rc3
providers:
inference: remote::vllm
inference:
- remote::vllm
memory:
- inline::faiss
- remote::chromadb
- remote::pgvector
safety: inline::llama-guard
agents: inline::meta-reference
telemetry: inline::meta-reference
safety:
- inline::llama-guard
agents:
- inline::meta-reference
telemetry:
- inline::meta-reference
image_type: conda

View file

@ -41,6 +41,7 @@ def get_distribution_template() -> DistributionTemplate:
name="remote-vllm",
distro_type="self_hosted",
description="Use (an external) vLLM server for running LLM inference",
docker_image="llamastack/distribution-remote-vllm:test-0.0.52rc3",
template_path=Path(__file__).parent / "doc_template.md",
providers=providers,
default_models=[inference_model, safety_model],

View file

@ -91,7 +91,7 @@ class RunConfigSettings(BaseModel):
apis=list(apis),
providers=provider_configs,
metadata_store=SqliteKVStoreConfig.sample_run_config(
dir=f"distributions/{name}",
__distro_dir__=f"distributions/{name}",
db_name="registry.db",
),
models=self.default_models,