mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-05 18:22:41 +00:00
ruff
Signed-off-by: Ihar Hrachyshka <ihar.hrachyshka@gmail.com>
This commit is contained in:
parent
fcf3b0a835
commit
58a040a805
1 changed files with 3 additions and 1 deletions
|
@ -336,7 +336,9 @@ class VLLMInferenceAdapter(Inference, ModelsProtocolPrivate):
|
|||
r = await self.client.completions.create(**params)
|
||||
return process_completion_response(r)
|
||||
|
||||
async def _stream_completion(self, request: CompletionRequest) -> AsyncGenerator[CompletionResponseStreamChunk, None]:
|
||||
async def _stream_completion(
|
||||
self, request: CompletionRequest
|
||||
) -> AsyncGenerator[CompletionResponseStreamChunk, None]:
|
||||
assert self.client is not None
|
||||
params = await self._get_params(request)
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue