chore: update the vLLM inference impl to use OpenAIMixin for openai-compat functions

inference recordings from Qwen3-0.6B and vLLM 0.8.3 -
```
docker run --gpus all -v ~/.cache/huggingface:/root/.cache/huggingface -p 8000:8000 --ipc=host \
    vllm/vllm-openai:latest \
    --model Qwen/Qwen3-0.6B --enable-auto-tool-choice --tool-call-parser hermes
```

test with -

```
./scripts/integration-tests.sh --stack-config server:ci-tests --setup vllm --subdirs inference
```
This commit is contained in:
Matthew Farrellee 2025-09-10 10:10:10 -04:00
parent c86e45496e
commit c2a9c65fff
33 changed files with 51813 additions and 203 deletions

View file

@ -78,7 +78,7 @@ SETUP_DEFINITIONS: dict[str, Setup] = {
"VLLM_URL": "http://localhost:8000/v1",
},
defaults={
"text_model": "vllm/meta-llama/Llama-3.2-1B-Instruct",
"text_model": "vllm/Qwen/Qwen3-0.6B",
"embedding_model": "sentence-transformers/all-MiniLM-L6-v2",
},
),