mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-11 19:56:03 +00:00
fix(mypy): use correct OpenAIChatCompletionChunk import in vllm
Import OpenAIChatCompletionChunk from llama_stack.apis.inference instead of aliasing from openai package to match parent class signature. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
parent
864a5ff19b
commit
7e5020b61f
1 changed files with 1 additions and 3 deletions
|
|
@ -7,13 +7,11 @@ from collections.abc import AsyncIterator
|
|||
from urllib.parse import urljoin
|
||||
|
||||
import httpx
|
||||
from openai.types.chat.chat_completion_chunk import (
|
||||
ChatCompletionChunk as OpenAIChatCompletionChunk,
|
||||
)
|
||||
from pydantic import ConfigDict
|
||||
|
||||
from llama_stack.apis.inference import (
|
||||
OpenAIChatCompletion,
|
||||
OpenAIChatCompletionChunk,
|
||||
OpenAIChatCompletionRequestWithExtraBody,
|
||||
ToolChoice,
|
||||
)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue