mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-05 12:21:52 +00:00
chore: update the vLLM inference impl to use OpenAIMixin for openai-compat functions
inference recordings from Qwen3-0.6B and vLLM 0.8.3 - ``` docker run --gpus all -v ~/.cache/huggingface:/root/.cache/huggingface -p 8000:8000 --ipc=host \ vllm/vllm-openai:latest \ --model Qwen/Qwen3-0.6B --enable-auto-tool-choice --tool-call-parser hermes ``` test with - ``` ./scripts/integration-tests.sh --stack-config server:ci-tests --setup vllm --subdirs inference ```
This commit is contained in:
parent
c86e45496e
commit
c2a9c65fff
33 changed files with 51813 additions and 203 deletions
|
@ -78,7 +78,7 @@ SETUP_DEFINITIONS: dict[str, Setup] = {
|
|||
"VLLM_URL": "http://localhost:8000/v1",
|
||||
},
|
||||
defaults={
|
||||
"text_model": "vllm/meta-llama/Llama-3.2-1B-Instruct",
|
||||
"text_model": "vllm/Qwen/Qwen3-0.6B",
|
||||
"embedding_model": "sentence-transformers/all-MiniLM-L6-v2",
|
||||
},
|
||||
),
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue