mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-11 19:56:03 +00:00
fix(mypy): use correct OpenAIChatCompletionChunk import in vllm
Import OpenAIChatCompletionChunk from llama_stack.apis.inference instead of aliasing from openai package to match parent class signature. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
parent
7e37790647
commit
0944b6a734
1 changed files with 1 additions and 3 deletions
|
|
@ -7,13 +7,11 @@ from collections.abc import AsyncIterator
|
||||||
from urllib.parse import urljoin
|
from urllib.parse import urljoin
|
||||||
|
|
||||||
import httpx
|
import httpx
|
||||||
from openai.types.chat.chat_completion_chunk import (
|
|
||||||
ChatCompletionChunk as OpenAIChatCompletionChunk,
|
|
||||||
)
|
|
||||||
from pydantic import ConfigDict
|
from pydantic import ConfigDict
|
||||||
|
|
||||||
from llama_stack.apis.inference import (
|
from llama_stack.apis.inference import (
|
||||||
OpenAIChatCompletion,
|
OpenAIChatCompletion,
|
||||||
|
OpenAIChatCompletionChunk,
|
||||||
OpenAIChatCompletionRequestWithExtraBody,
|
OpenAIChatCompletionRequestWithExtraBody,
|
||||||
ToolChoice,
|
ToolChoice,
|
||||||
)
|
)
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue