mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-28 02:53:30 +00:00
add completion api support to nvidia inference provider (#533)
# What does this PR do? add the completion api to the nvidia inference provider ## Test Plan while running the meta/llama-3.1-8b-instruct NIM from https://build.nvidia.com/meta/llama-3_1-8b-instruct?snippet_tab=Docker ``` ➜ pytest -s -v --providers inference=nvidia llama_stack/providers/tests/inference/ --env NVIDIA_BASE_URL=http://localhost:8000 -k test_completion --inference-model Llama3.1-8B-Instruct =============================================== test session starts =============================================== platform linux -- Python 3.10.15, pytest-8.3.3, pluggy-1.5.0 -- /home/matt/.conda/envs/stack/bin/python cachedir: .pytest_cache rootdir: /home/matt/Documents/Repositories/meta-llama/llama-stack configfile: pyproject.toml plugins: anyio-4.6.2.post1, asyncio-0.24.0, httpx-0.34.0 asyncio: mode=strict, default_loop_scope=None collected 20 items / 18 deselected / 2 selected llama_stack/providers/tests/inference/test_text_inference.py::TestInference::test_completion[-nvidia] PASSED llama_stack/providers/tests/inference/test_text_inference.py::TestInference::test_completion_structured_output[-nvidia] SKIPPED ============================= 1 passed, 1 skipped, 18 deselected, 6 warnings in 5.40s ============================= ``` the structured output functionality works but the accuracy fails ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [x] Ran pre-commit to handle lint / formatting issues. - [x] Read the [contributor guideline](https://github.com/meta-llama/llama-stack/blob/main/CONTRIBUTING.md), Pull Request section? - [ ] Updated relevant documentation. - [x] Wrote necessary unit or integration tests.
This commit is contained in:
parent
07c72c4256
commit
b52df5fe5b
3 changed files with 208 additions and 7 deletions
|
@ -9,6 +9,7 @@ from typing import AsyncIterator, List, Optional, Union
|
|||
|
||||
from llama_models.datatypes import SamplingParams
|
||||
from llama_models.llama3.api.datatypes import (
|
||||
ImageMedia,
|
||||
InterleavedTextMedia,
|
||||
Message,
|
||||
ToolChoice,
|
||||
|
@ -22,6 +23,7 @@ from llama_stack.apis.inference import (
|
|||
ChatCompletionRequest,
|
||||
ChatCompletionResponse,
|
||||
ChatCompletionResponseStreamChunk,
|
||||
CompletionRequest,
|
||||
CompletionResponse,
|
||||
CompletionResponseStreamChunk,
|
||||
EmbeddingsResponse,
|
||||
|
@ -37,8 +39,11 @@ from llama_stack.providers.utils.inference.model_registry import (
|
|||
from . import NVIDIAConfig
|
||||
from .openai_utils import (
|
||||
convert_chat_completion_request,
|
||||
convert_completion_request,
|
||||
convert_openai_chat_completion_choice,
|
||||
convert_openai_chat_completion_stream,
|
||||
convert_openai_completion_choice,
|
||||
convert_openai_completion_stream,
|
||||
)
|
||||
from .utils import _is_nvidia_hosted, check_health
|
||||
|
||||
|
@ -115,7 +120,7 @@ class NVIDIAInferenceAdapter(Inference, ModelRegistryHelper):
|
|||
timeout=self._config.timeout,
|
||||
)
|
||||
|
||||
def completion(
|
||||
async def completion(
|
||||
self,
|
||||
model_id: str,
|
||||
content: InterleavedTextMedia,
|
||||
|
@ -124,7 +129,38 @@ class NVIDIAInferenceAdapter(Inference, ModelRegistryHelper):
|
|||
stream: Optional[bool] = False,
|
||||
logprobs: Optional[LogProbConfig] = None,
|
||||
) -> Union[CompletionResponse, AsyncIterator[CompletionResponseStreamChunk]]:
|
||||
raise NotImplementedError()
|
||||
if isinstance(content, ImageMedia) or (
|
||||
isinstance(content, list)
|
||||
and any(isinstance(c, ImageMedia) for c in content)
|
||||
):
|
||||
raise NotImplementedError("ImageMedia is not supported")
|
||||
|
||||
await check_health(self._config) # this raises errors
|
||||
|
||||
request = convert_completion_request(
|
||||
request=CompletionRequest(
|
||||
model=self.get_provider_model_id(model_id),
|
||||
content=content,
|
||||
sampling_params=sampling_params,
|
||||
response_format=response_format,
|
||||
stream=stream,
|
||||
logprobs=logprobs,
|
||||
),
|
||||
n=1,
|
||||
)
|
||||
|
||||
try:
|
||||
response = await self._client.completions.create(**request)
|
||||
except APIConnectionError as e:
|
||||
raise ConnectionError(
|
||||
f"Failed to connect to NVIDIA NIM at {self._config.url}: {e}"
|
||||
) from e
|
||||
|
||||
if stream:
|
||||
return convert_openai_completion_stream(response)
|
||||
else:
|
||||
# we pass n=1 to get only one completion
|
||||
return convert_openai_completion_choice(response.choices[0])
|
||||
|
||||
async def embeddings(
|
||||
self,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue