mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-04 20:14:13 +00:00
chore(apis): unpublish deprecated /v1/inference apis
This commit is contained in:
parent
478b4ff1e6
commit
26f4f3fe14
6 changed files with 1286 additions and 3770 deletions
2319
docs/_static/llama-stack-spec.html
vendored
2319
docs/_static/llama-stack-spec.html
vendored
File diff suppressed because it is too large
Load diff
1810
docs/_static/llama-stack-spec.yaml
vendored
1810
docs/_static/llama-stack-spec.yaml
vendored
File diff suppressed because it is too large
Load diff
|
@ -1026,7 +1026,6 @@ class InferenceProvider(Protocol):
|
||||||
|
|
||||||
model_store: ModelStore | None = None
|
model_store: ModelStore | None = None
|
||||||
|
|
||||||
@webmethod(route="/inference/completion", method="POST")
|
|
||||||
async def completion(
|
async def completion(
|
||||||
self,
|
self,
|
||||||
model_id: str,
|
model_id: str,
|
||||||
|
@ -1049,7 +1048,6 @@ class InferenceProvider(Protocol):
|
||||||
"""
|
"""
|
||||||
...
|
...
|
||||||
|
|
||||||
@webmethod(route="/inference/batch-completion", method="POST", experimental=True)
|
|
||||||
async def batch_completion(
|
async def batch_completion(
|
||||||
self,
|
self,
|
||||||
model_id: str,
|
model_id: str,
|
||||||
|
@ -1070,7 +1068,6 @@ class InferenceProvider(Protocol):
|
||||||
raise NotImplementedError("Batch completion is not implemented")
|
raise NotImplementedError("Batch completion is not implemented")
|
||||||
return # this is so mypy's safe-super rule will consider the method concrete
|
return # this is so mypy's safe-super rule will consider the method concrete
|
||||||
|
|
||||||
@webmethod(route="/inference/chat-completion", method="POST")
|
|
||||||
async def chat_completion(
|
async def chat_completion(
|
||||||
self,
|
self,
|
||||||
model_id: str,
|
model_id: str,
|
||||||
|
@ -1110,7 +1107,6 @@ class InferenceProvider(Protocol):
|
||||||
"""
|
"""
|
||||||
...
|
...
|
||||||
|
|
||||||
@webmethod(route="/inference/batch-chat-completion", method="POST", experimental=True)
|
|
||||||
async def batch_chat_completion(
|
async def batch_chat_completion(
|
||||||
self,
|
self,
|
||||||
model_id: str,
|
model_id: str,
|
||||||
|
@ -1135,7 +1131,6 @@ class InferenceProvider(Protocol):
|
||||||
raise NotImplementedError("Batch chat completion is not implemented")
|
raise NotImplementedError("Batch chat completion is not implemented")
|
||||||
return # this is so mypy's safe-super rule will consider the method concrete
|
return # this is so mypy's safe-super rule will consider the method concrete
|
||||||
|
|
||||||
@webmethod(route="/inference/embeddings", method="POST")
|
|
||||||
async def embeddings(
|
async def embeddings(
|
||||||
self,
|
self,
|
||||||
model_id: str,
|
model_id: str,
|
||||||
|
|
|
@ -1,76 +0,0 @@
|
||||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
||||||
# All rights reserved.
|
|
||||||
#
|
|
||||||
# This source code is licensed under the terms described in the LICENSE file in
|
|
||||||
# the root directory of this source tree.
|
|
||||||
|
|
||||||
|
|
||||||
import pytest
|
|
||||||
|
|
||||||
from ..test_cases.test_case import TestCase
|
|
||||||
|
|
||||||
|
|
||||||
def skip_if_provider_doesnt_support_batch_inference(client_with_models, model_id):
|
|
||||||
models = {m.identifier: m for m in client_with_models.models.list()}
|
|
||||||
models.update({m.provider_resource_id: m for m in client_with_models.models.list()})
|
|
||||||
provider_id = models[model_id].provider_id
|
|
||||||
providers = {p.provider_id: p for p in client_with_models.providers.list()}
|
|
||||||
provider = providers[provider_id]
|
|
||||||
if provider.provider_type not in ("inline::meta-reference",):
|
|
||||||
pytest.skip(f"Model {model_id} hosted by {provider.provider_type} doesn't support batch inference")
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"test_case",
|
|
||||||
[
|
|
||||||
"inference:completion:batch_completion",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
def test_batch_completion_non_streaming(client_with_models, text_model_id, test_case):
|
|
||||||
skip_if_provider_doesnt_support_batch_inference(client_with_models, text_model_id)
|
|
||||||
tc = TestCase(test_case)
|
|
||||||
|
|
||||||
content_batch = tc["contents"]
|
|
||||||
response = client_with_models.inference.batch_completion(
|
|
||||||
content_batch=content_batch,
|
|
||||||
model_id=text_model_id,
|
|
||||||
sampling_params={
|
|
||||||
"max_tokens": 50,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
assert len(response.batch) == len(content_batch)
|
|
||||||
for i, r in enumerate(response.batch):
|
|
||||||
print(f"response {i}: {r.content}")
|
|
||||||
assert len(r.content) > 10
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"test_case",
|
|
||||||
[
|
|
||||||
"inference:chat_completion:batch_completion",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
def test_batch_chat_completion_non_streaming(client_with_models, text_model_id, test_case):
|
|
||||||
skip_if_provider_doesnt_support_batch_inference(client_with_models, text_model_id)
|
|
||||||
tc = TestCase(test_case)
|
|
||||||
qa_pairs = tc["qa_pairs"]
|
|
||||||
|
|
||||||
message_batch = [
|
|
||||||
[
|
|
||||||
{
|
|
||||||
"role": "user",
|
|
||||||
"content": qa["question"],
|
|
||||||
}
|
|
||||||
]
|
|
||||||
for qa in qa_pairs
|
|
||||||
]
|
|
||||||
|
|
||||||
response = client_with_models.inference.batch_chat_completion(
|
|
||||||
messages_batch=message_batch,
|
|
||||||
model_id=text_model_id,
|
|
||||||
)
|
|
||||||
assert len(response.batch) == len(qa_pairs)
|
|
||||||
for i, r in enumerate(response.batch):
|
|
||||||
print(f"response {i}: {r.completion_message.content}")
|
|
||||||
assert len(r.completion_message.content) > 0
|
|
||||||
assert qa_pairs[i]["answer"].lower() in r.completion_message.content.lower()
|
|
|
@ -1,303 +0,0 @@
|
||||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
||||||
# All rights reserved.
|
|
||||||
#
|
|
||||||
# This source code is licensed under the terms described in the LICENSE file in
|
|
||||||
# the root directory of this source tree.
|
|
||||||
|
|
||||||
|
|
||||||
#
|
|
||||||
# Test plan:
|
|
||||||
#
|
|
||||||
# Types of input:
|
|
||||||
# - array of a string
|
|
||||||
# - array of a image (ImageContentItem, either URL or base64 string)
|
|
||||||
# - array of a text (TextContentItem)
|
|
||||||
# Types of output:
|
|
||||||
# - list of list of floats
|
|
||||||
# Params:
|
|
||||||
# - text_truncation
|
|
||||||
# - absent w/ long text -> error
|
|
||||||
# - none w/ long text -> error
|
|
||||||
# - absent w/ short text -> ok
|
|
||||||
# - none w/ short text -> ok
|
|
||||||
# - end w/ long text -> ok
|
|
||||||
# - end w/ short text -> ok
|
|
||||||
# - start w/ long text -> ok
|
|
||||||
# - start w/ short text -> ok
|
|
||||||
# - output_dimension
|
|
||||||
# - response dimension matches
|
|
||||||
# - task_type, only for asymmetric models
|
|
||||||
# - query embedding != passage embedding
|
|
||||||
# Negative:
|
|
||||||
# - long string
|
|
||||||
# - long text
|
|
||||||
#
|
|
||||||
# Todo:
|
|
||||||
# - negative tests
|
|
||||||
# - empty
|
|
||||||
# - empty list
|
|
||||||
# - empty string
|
|
||||||
# - empty text
|
|
||||||
# - empty image
|
|
||||||
# - long
|
|
||||||
# - large image
|
|
||||||
# - appropriate combinations
|
|
||||||
# - batch size
|
|
||||||
# - many inputs
|
|
||||||
# - invalid
|
|
||||||
# - invalid URL
|
|
||||||
# - invalid base64
|
|
||||||
#
|
|
||||||
# Notes:
|
|
||||||
# - use llama_stack_client fixture
|
|
||||||
# - use pytest.mark.parametrize when possible
|
|
||||||
# - no accuracy tests: only check the type of output, not the content
|
|
||||||
#
|
|
||||||
|
|
||||||
import pytest
|
|
||||||
from llama_stack_client import BadRequestError as LlamaStackBadRequestError
|
|
||||||
from llama_stack_client.types import EmbeddingsResponse
|
|
||||||
from llama_stack_client.types.shared.interleaved_content import (
|
|
||||||
ImageContentItem,
|
|
||||||
ImageContentItemImage,
|
|
||||||
ImageContentItemImageURL,
|
|
||||||
TextContentItem,
|
|
||||||
)
|
|
||||||
from openai import BadRequestError as OpenAIBadRequestError
|
|
||||||
|
|
||||||
from llama_stack.core.library_client import LlamaStackAsLibraryClient
|
|
||||||
|
|
||||||
DUMMY_STRING = "hello"
|
|
||||||
DUMMY_STRING2 = "world"
|
|
||||||
DUMMY_LONG_STRING = "NVDA " * 10240
|
|
||||||
DUMMY_TEXT = TextContentItem(text=DUMMY_STRING, type="text")
|
|
||||||
DUMMY_TEXT2 = TextContentItem(text=DUMMY_STRING2, type="text")
|
|
||||||
DUMMY_LONG_TEXT = TextContentItem(text=DUMMY_LONG_STRING, type="text")
|
|
||||||
# TODO(mf): add a real image URL and base64 string
|
|
||||||
DUMMY_IMAGE_URL = ImageContentItem(
|
|
||||||
image=ImageContentItemImage(url=ImageContentItemImageURL(uri="https://example.com/image.jpg")), type="image"
|
|
||||||
)
|
|
||||||
DUMMY_IMAGE_BASE64 = ImageContentItem(image=ImageContentItemImage(data="base64string"), type="image")
|
|
||||||
SUPPORTED_PROVIDERS = {"remote::nvidia"}
|
|
||||||
MODELS_SUPPORTING_MEDIA = {}
|
|
||||||
MODELS_SUPPORTING_OUTPUT_DIMENSION = {"nvidia/llama-3.2-nv-embedqa-1b-v2"}
|
|
||||||
MODELS_REQUIRING_TASK_TYPE = {
|
|
||||||
"nvidia/llama-3.2-nv-embedqa-1b-v2",
|
|
||||||
"nvidia/nv-embedqa-e5-v5",
|
|
||||||
"nvidia/nv-embedqa-mistral-7b-v2",
|
|
||||||
"snowflake/arctic-embed-l",
|
|
||||||
}
|
|
||||||
MODELS_SUPPORTING_TASK_TYPE = MODELS_REQUIRING_TASK_TYPE
|
|
||||||
|
|
||||||
|
|
||||||
def default_task_type(model_id):
|
|
||||||
"""
|
|
||||||
Some models require a task type parameter. This provides a default value for
|
|
||||||
testing those models.
|
|
||||||
"""
|
|
||||||
if model_id in MODELS_REQUIRING_TASK_TYPE:
|
|
||||||
return {"task_type": "query"}
|
|
||||||
return {}
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"contents",
|
|
||||||
[
|
|
||||||
[DUMMY_STRING, DUMMY_STRING2],
|
|
||||||
[DUMMY_TEXT, DUMMY_TEXT2],
|
|
||||||
],
|
|
||||||
ids=[
|
|
||||||
"list[string]",
|
|
||||||
"list[text]",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
def test_embedding_text(llama_stack_client, embedding_model_id, contents, inference_provider_type):
|
|
||||||
if inference_provider_type not in SUPPORTED_PROVIDERS:
|
|
||||||
pytest.xfail(f"{inference_provider_type} doesn't support embedding model yet")
|
|
||||||
response = llama_stack_client.inference.embeddings(
|
|
||||||
model_id=embedding_model_id, contents=contents, **default_task_type(embedding_model_id)
|
|
||||||
)
|
|
||||||
assert isinstance(response, EmbeddingsResponse)
|
|
||||||
assert len(response.embeddings) == sum(len(content) if isinstance(content, list) else 1 for content in contents)
|
|
||||||
assert isinstance(response.embeddings[0], list)
|
|
||||||
assert isinstance(response.embeddings[0][0], float)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"contents",
|
|
||||||
[
|
|
||||||
[DUMMY_IMAGE_URL, DUMMY_IMAGE_BASE64],
|
|
||||||
[DUMMY_IMAGE_URL, DUMMY_STRING, DUMMY_IMAGE_BASE64, DUMMY_TEXT],
|
|
||||||
],
|
|
||||||
ids=[
|
|
||||||
"list[url,base64]",
|
|
||||||
"list[url,string,base64,text]",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
def test_embedding_image(llama_stack_client, embedding_model_id, contents, inference_provider_type):
|
|
||||||
if inference_provider_type not in SUPPORTED_PROVIDERS:
|
|
||||||
pytest.xfail(f"{inference_provider_type} doesn't support embedding model yet")
|
|
||||||
if embedding_model_id not in MODELS_SUPPORTING_MEDIA:
|
|
||||||
pytest.xfail(f"{embedding_model_id} doesn't support media")
|
|
||||||
response = llama_stack_client.inference.embeddings(
|
|
||||||
model_id=embedding_model_id, contents=contents, **default_task_type(embedding_model_id)
|
|
||||||
)
|
|
||||||
assert isinstance(response, EmbeddingsResponse)
|
|
||||||
assert len(response.embeddings) == sum(len(content) if isinstance(content, list) else 1 for content in contents)
|
|
||||||
assert isinstance(response.embeddings[0], list)
|
|
||||||
assert isinstance(response.embeddings[0][0], float)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"text_truncation",
|
|
||||||
[
|
|
||||||
"end",
|
|
||||||
"start",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"contents",
|
|
||||||
[
|
|
||||||
[DUMMY_LONG_TEXT],
|
|
||||||
[DUMMY_STRING],
|
|
||||||
],
|
|
||||||
ids=[
|
|
||||||
"long",
|
|
||||||
"short",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
def test_embedding_truncation(
|
|
||||||
llama_stack_client, embedding_model_id, text_truncation, contents, inference_provider_type
|
|
||||||
):
|
|
||||||
if inference_provider_type not in SUPPORTED_PROVIDERS:
|
|
||||||
pytest.xfail(f"{inference_provider_type} doesn't support embedding model yet")
|
|
||||||
response = llama_stack_client.inference.embeddings(
|
|
||||||
model_id=embedding_model_id,
|
|
||||||
contents=contents,
|
|
||||||
text_truncation=text_truncation,
|
|
||||||
**default_task_type(embedding_model_id),
|
|
||||||
)
|
|
||||||
assert isinstance(response, EmbeddingsResponse)
|
|
||||||
assert len(response.embeddings) == 1
|
|
||||||
assert isinstance(response.embeddings[0], list)
|
|
||||||
assert isinstance(response.embeddings[0][0], float)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"text_truncation",
|
|
||||||
[
|
|
||||||
None,
|
|
||||||
"none",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"contents",
|
|
||||||
[
|
|
||||||
[DUMMY_LONG_TEXT],
|
|
||||||
[DUMMY_LONG_STRING],
|
|
||||||
],
|
|
||||||
ids=[
|
|
||||||
"long-text",
|
|
||||||
"long-str",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
def test_embedding_truncation_error(
|
|
||||||
llama_stack_client, embedding_model_id, text_truncation, contents, inference_provider_type
|
|
||||||
):
|
|
||||||
if inference_provider_type not in SUPPORTED_PROVIDERS:
|
|
||||||
pytest.xfail(f"{inference_provider_type} doesn't support embedding model yet")
|
|
||||||
# Using LlamaStackClient from llama_stack_client will raise llama_stack_client.BadRequestError
|
|
||||||
# While using LlamaStackAsLibraryClient from llama_stack.distribution.library_client will raise the error that the backend raises
|
|
||||||
error_type = (
|
|
||||||
OpenAIBadRequestError
|
|
||||||
if isinstance(llama_stack_client, LlamaStackAsLibraryClient)
|
|
||||||
else LlamaStackBadRequestError
|
|
||||||
)
|
|
||||||
with pytest.raises(error_type):
|
|
||||||
llama_stack_client.inference.embeddings(
|
|
||||||
model_id=embedding_model_id,
|
|
||||||
contents=[DUMMY_LONG_TEXT],
|
|
||||||
text_truncation=text_truncation,
|
|
||||||
**default_task_type(embedding_model_id),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def test_embedding_output_dimension(llama_stack_client, embedding_model_id, inference_provider_type):
|
|
||||||
if inference_provider_type not in SUPPORTED_PROVIDERS:
|
|
||||||
pytest.xfail(f"{inference_provider_type} doesn't support embedding model yet")
|
|
||||||
if embedding_model_id not in MODELS_SUPPORTING_OUTPUT_DIMENSION:
|
|
||||||
pytest.xfail(f"{embedding_model_id} doesn't support output_dimension")
|
|
||||||
base_response = llama_stack_client.inference.embeddings(
|
|
||||||
model_id=embedding_model_id, contents=[DUMMY_STRING], **default_task_type(embedding_model_id)
|
|
||||||
)
|
|
||||||
test_response = llama_stack_client.inference.embeddings(
|
|
||||||
model_id=embedding_model_id,
|
|
||||||
contents=[DUMMY_STRING],
|
|
||||||
**default_task_type(embedding_model_id),
|
|
||||||
output_dimension=32,
|
|
||||||
)
|
|
||||||
assert len(base_response.embeddings[0]) != len(test_response.embeddings[0])
|
|
||||||
assert len(test_response.embeddings[0]) == 32
|
|
||||||
|
|
||||||
|
|
||||||
def test_embedding_task_type(llama_stack_client, embedding_model_id, inference_provider_type):
|
|
||||||
if inference_provider_type not in SUPPORTED_PROVIDERS:
|
|
||||||
pytest.xfail(f"{inference_provider_type} doesn't support embedding model yet")
|
|
||||||
if embedding_model_id not in MODELS_SUPPORTING_TASK_TYPE:
|
|
||||||
pytest.xfail(f"{embedding_model_id} doesn't support task_type")
|
|
||||||
query_embedding = llama_stack_client.inference.embeddings(
|
|
||||||
model_id=embedding_model_id, contents=[DUMMY_STRING], task_type="query"
|
|
||||||
)
|
|
||||||
document_embedding = llama_stack_client.inference.embeddings(
|
|
||||||
model_id=embedding_model_id, contents=[DUMMY_STRING], task_type="document"
|
|
||||||
)
|
|
||||||
assert query_embedding.embeddings != document_embedding.embeddings
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"text_truncation",
|
|
||||||
[
|
|
||||||
None,
|
|
||||||
"none",
|
|
||||||
"end",
|
|
||||||
"start",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
def test_embedding_text_truncation(llama_stack_client, embedding_model_id, text_truncation, inference_provider_type):
|
|
||||||
if inference_provider_type not in SUPPORTED_PROVIDERS:
|
|
||||||
pytest.xfail(f"{inference_provider_type} doesn't support embedding model yet")
|
|
||||||
response = llama_stack_client.inference.embeddings(
|
|
||||||
model_id=embedding_model_id,
|
|
||||||
contents=[DUMMY_STRING],
|
|
||||||
text_truncation=text_truncation,
|
|
||||||
**default_task_type(embedding_model_id),
|
|
||||||
)
|
|
||||||
assert isinstance(response, EmbeddingsResponse)
|
|
||||||
assert len(response.embeddings) == 1
|
|
||||||
assert isinstance(response.embeddings[0], list)
|
|
||||||
assert isinstance(response.embeddings[0][0], float)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"text_truncation",
|
|
||||||
[
|
|
||||||
"NONE",
|
|
||||||
"END",
|
|
||||||
"START",
|
|
||||||
"left",
|
|
||||||
"right",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
def test_embedding_text_truncation_error(
|
|
||||||
llama_stack_client, embedding_model_id, text_truncation, inference_provider_type
|
|
||||||
):
|
|
||||||
if inference_provider_type not in SUPPORTED_PROVIDERS:
|
|
||||||
pytest.xfail(f"{inference_provider_type} doesn't support embedding model yet")
|
|
||||||
error_type = ValueError if isinstance(llama_stack_client, LlamaStackAsLibraryClient) else LlamaStackBadRequestError
|
|
||||||
with pytest.raises(error_type):
|
|
||||||
llama_stack_client.inference.embeddings(
|
|
||||||
model_id=embedding_model_id,
|
|
||||||
contents=[DUMMY_STRING],
|
|
||||||
text_truncation=text_truncation,
|
|
||||||
**default_task_type(embedding_model_id),
|
|
||||||
)
|
|
|
@ -1,543 +0,0 @@
|
||||||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
||||||
# All rights reserved.
|
|
||||||
#
|
|
||||||
# This source code is licensed under the terms described in the LICENSE file in
|
|
||||||
# the root directory of this source tree.
|
|
||||||
|
|
||||||
|
|
||||||
from time import sleep
|
|
||||||
|
|
||||||
import pytest
|
|
||||||
from pydantic import BaseModel
|
|
||||||
|
|
||||||
from llama_stack.models.llama.sku_list import resolve_model
|
|
||||||
|
|
||||||
from ..test_cases.test_case import TestCase
|
|
||||||
|
|
||||||
PROVIDER_LOGPROBS_TOP_K = {"remote::together", "remote::fireworks", "remote::vllm"}
|
|
||||||
|
|
||||||
|
|
||||||
def skip_if_model_doesnt_support_completion(client_with_models, model_id):
|
|
||||||
models = {m.identifier: m for m in client_with_models.models.list()}
|
|
||||||
models.update({m.provider_resource_id: m for m in client_with_models.models.list()})
|
|
||||||
provider_id = models[model_id].provider_id
|
|
||||||
providers = {p.provider_id: p for p in client_with_models.providers.list()}
|
|
||||||
provider = providers[provider_id]
|
|
||||||
if (
|
|
||||||
provider.provider_type
|
|
||||||
in (
|
|
||||||
"remote::openai",
|
|
||||||
"remote::anthropic",
|
|
||||||
"remote::gemini",
|
|
||||||
"remote::vertexai",
|
|
||||||
"remote::groq",
|
|
||||||
"remote::sambanova",
|
|
||||||
)
|
|
||||||
or "openai-compat" in provider.provider_type
|
|
||||||
):
|
|
||||||
pytest.skip(f"Model {model_id} hosted by {provider.provider_type} doesn't support completion")
|
|
||||||
|
|
||||||
|
|
||||||
def skip_if_model_doesnt_support_json_schema_structured_output(client_with_models, model_id):
|
|
||||||
models = {m.identifier: m for m in client_with_models.models.list()}
|
|
||||||
models.update({m.provider_resource_id: m for m in client_with_models.models.list()})
|
|
||||||
provider_id = models[model_id].provider_id
|
|
||||||
providers = {p.provider_id: p for p in client_with_models.providers.list()}
|
|
||||||
provider = providers[provider_id]
|
|
||||||
if provider.provider_type in ("remote::sambanova",):
|
|
||||||
pytest.skip(
|
|
||||||
f"Model {model_id} hosted by {provider.provider_type} doesn't support json_schema structured output"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def get_llama_model(client_with_models, model_id):
|
|
||||||
models = {}
|
|
||||||
for m in client_with_models.models.list():
|
|
||||||
models[m.identifier] = m
|
|
||||||
models[m.provider_resource_id] = m
|
|
||||||
|
|
||||||
assert model_id in models, f"Model {model_id} not found"
|
|
||||||
|
|
||||||
model = models[model_id]
|
|
||||||
ids = (model.identifier, model.provider_resource_id)
|
|
||||||
for mid in ids:
|
|
||||||
if resolve_model(mid):
|
|
||||||
return mid
|
|
||||||
|
|
||||||
return model.metadata.get("llama_model", None)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"test_case",
|
|
||||||
[
|
|
||||||
"inference:completion:sanity",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
def test_text_completion_non_streaming(client_with_models, text_model_id, test_case):
|
|
||||||
skip_if_model_doesnt_support_completion(client_with_models, text_model_id)
|
|
||||||
tc = TestCase(test_case)
|
|
||||||
|
|
||||||
response = client_with_models.inference.completion(
|
|
||||||
content=tc["content"],
|
|
||||||
stream=False,
|
|
||||||
model_id=text_model_id,
|
|
||||||
sampling_params={
|
|
||||||
"max_tokens": 50,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
assert len(response.content) > 10
|
|
||||||
# assert "blue" in response.content.lower().strip()
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"test_case",
|
|
||||||
[
|
|
||||||
"inference:completion:sanity",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
def test_text_completion_streaming(client_with_models, text_model_id, test_case):
|
|
||||||
skip_if_model_doesnt_support_completion(client_with_models, text_model_id)
|
|
||||||
tc = TestCase(test_case)
|
|
||||||
|
|
||||||
response = client_with_models.inference.completion(
|
|
||||||
content=tc["content"],
|
|
||||||
stream=True,
|
|
||||||
model_id=text_model_id,
|
|
||||||
sampling_params={
|
|
||||||
"max_tokens": 50,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
streamed_content = [chunk.delta for chunk in response]
|
|
||||||
content_str = "".join(streamed_content).lower().strip()
|
|
||||||
# assert "blue" in content_str
|
|
||||||
assert len(content_str) > 10
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"test_case",
|
|
||||||
[
|
|
||||||
"inference:completion:stop_sequence",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
def test_text_completion_stop_sequence(client_with_models, text_model_id, inference_provider_type, test_case):
|
|
||||||
skip_if_model_doesnt_support_completion(client_with_models, text_model_id)
|
|
||||||
# This is only supported/tested for remote vLLM: https://github.com/meta-llama/llama-stack/issues/1771
|
|
||||||
if inference_provider_type != "remote::vllm":
|
|
||||||
pytest.xfail(f"{inference_provider_type} doesn't support 'stop' parameter yet")
|
|
||||||
tc = TestCase(test_case)
|
|
||||||
|
|
||||||
response = client_with_models.inference.completion(
|
|
||||||
content=tc["content"],
|
|
||||||
stream=True,
|
|
||||||
model_id=text_model_id,
|
|
||||||
sampling_params={
|
|
||||||
"max_tokens": 50,
|
|
||||||
"stop": ["1963"],
|
|
||||||
},
|
|
||||||
)
|
|
||||||
streamed_content = [chunk.delta for chunk in response]
|
|
||||||
content_str = "".join(streamed_content).lower().strip()
|
|
||||||
assert "1963" not in content_str
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"test_case",
|
|
||||||
[
|
|
||||||
"inference:completion:log_probs",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
def test_text_completion_log_probs_non_streaming(client_with_models, text_model_id, inference_provider_type, test_case):
|
|
||||||
skip_if_model_doesnt_support_completion(client_with_models, text_model_id)
|
|
||||||
if inference_provider_type not in PROVIDER_LOGPROBS_TOP_K:
|
|
||||||
pytest.xfail(f"{inference_provider_type} doesn't support log probs yet")
|
|
||||||
|
|
||||||
tc = TestCase(test_case)
|
|
||||||
|
|
||||||
response = client_with_models.inference.completion(
|
|
||||||
content=tc["content"],
|
|
||||||
stream=False,
|
|
||||||
model_id=text_model_id,
|
|
||||||
sampling_params={
|
|
||||||
"max_tokens": 5,
|
|
||||||
},
|
|
||||||
logprobs={
|
|
||||||
"top_k": 1,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
assert response.logprobs, "Logprobs should not be empty"
|
|
||||||
assert 1 <= len(response.logprobs) <= 5 # each token has 1 logprob and here max_tokens=5
|
|
||||||
assert all(len(logprob.logprobs_by_token) == 1 for logprob in response.logprobs)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"test_case",
|
|
||||||
[
|
|
||||||
"inference:completion:log_probs",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
def test_text_completion_log_probs_streaming(client_with_models, text_model_id, inference_provider_type, test_case):
|
|
||||||
skip_if_model_doesnt_support_completion(client_with_models, text_model_id)
|
|
||||||
if inference_provider_type not in PROVIDER_LOGPROBS_TOP_K:
|
|
||||||
pytest.xfail(f"{inference_provider_type} doesn't support log probs yet")
|
|
||||||
|
|
||||||
tc = TestCase(test_case)
|
|
||||||
|
|
||||||
response = client_with_models.inference.completion(
|
|
||||||
content=tc["content"],
|
|
||||||
stream=True,
|
|
||||||
model_id=text_model_id,
|
|
||||||
sampling_params={
|
|
||||||
"max_tokens": 5,
|
|
||||||
},
|
|
||||||
logprobs={
|
|
||||||
"top_k": 1,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
streamed_content = list(response)
|
|
||||||
for chunk in streamed_content:
|
|
||||||
if chunk.delta: # if there's a token, we expect logprobs
|
|
||||||
assert chunk.logprobs, "Logprobs should not be empty"
|
|
||||||
assert all(len(logprob.logprobs_by_token) == 1 for logprob in chunk.logprobs)
|
|
||||||
else: # no token, no logprobs
|
|
||||||
assert not chunk.logprobs, "Logprobs should be empty"
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"test_case",
|
|
||||||
[
|
|
||||||
"inference:completion:structured_output",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
def test_text_completion_structured_output(client_with_models, text_model_id, test_case):
|
|
||||||
skip_if_model_doesnt_support_completion(client_with_models, text_model_id)
|
|
||||||
|
|
||||||
class AnswerFormat(BaseModel):
|
|
||||||
name: str
|
|
||||||
year_born: str
|
|
||||||
year_retired: str
|
|
||||||
|
|
||||||
tc = TestCase(test_case)
|
|
||||||
|
|
||||||
user_input = tc["user_input"]
|
|
||||||
response = client_with_models.inference.completion(
|
|
||||||
model_id=text_model_id,
|
|
||||||
content=user_input,
|
|
||||||
stream=False,
|
|
||||||
sampling_params={
|
|
||||||
"max_tokens": 50,
|
|
||||||
},
|
|
||||||
response_format={
|
|
||||||
"type": "json_schema",
|
|
||||||
"json_schema": AnswerFormat.model_json_schema(),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
answer = AnswerFormat.model_validate_json(response.content)
|
|
||||||
expected = tc["expected"]
|
|
||||||
assert answer.name == expected["name"]
|
|
||||||
assert answer.year_born == expected["year_born"]
|
|
||||||
assert answer.year_retired == expected["year_retired"]
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"test_case",
|
|
||||||
[
|
|
||||||
"inference:chat_completion:non_streaming_01",
|
|
||||||
"inference:chat_completion:non_streaming_02",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
def test_text_chat_completion_non_streaming(client_with_models, text_model_id, test_case):
|
|
||||||
tc = TestCase(test_case)
|
|
||||||
question = tc["question"]
|
|
||||||
expected = tc["expected"]
|
|
||||||
|
|
||||||
response = client_with_models.inference.chat_completion(
|
|
||||||
model_id=text_model_id,
|
|
||||||
messages=[
|
|
||||||
{
|
|
||||||
"role": "user",
|
|
||||||
"content": question,
|
|
||||||
}
|
|
||||||
],
|
|
||||||
stream=False,
|
|
||||||
)
|
|
||||||
message_content = response.completion_message.content.lower().strip()
|
|
||||||
assert len(message_content) > 0
|
|
||||||
assert expected.lower() in message_content
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"test_case",
|
|
||||||
[
|
|
||||||
"inference:chat_completion:streaming_01",
|
|
||||||
"inference:chat_completion:streaming_02",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
def test_text_chat_completion_streaming(client_with_models, text_model_id, test_case):
|
|
||||||
tc = TestCase(test_case)
|
|
||||||
question = tc["question"]
|
|
||||||
expected = tc["expected"]
|
|
||||||
|
|
||||||
response = client_with_models.inference.chat_completion(
|
|
||||||
model_id=text_model_id,
|
|
||||||
messages=[{"role": "user", "content": question}],
|
|
||||||
stream=True,
|
|
||||||
timeout=120, # Increase timeout to 2 minutes for large conversation history
|
|
||||||
)
|
|
||||||
streamed_content = [str(chunk.event.delta.text.lower().strip()) for chunk in response]
|
|
||||||
assert len(streamed_content) > 0
|
|
||||||
assert expected.lower() in "".join(streamed_content)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"test_case",
|
|
||||||
[
|
|
||||||
"inference:chat_completion:tool_calling",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
def test_text_chat_completion_with_tool_calling_and_non_streaming(client_with_models, text_model_id, test_case):
|
|
||||||
tc = TestCase(test_case)
|
|
||||||
|
|
||||||
response = client_with_models.inference.chat_completion(
|
|
||||||
model_id=text_model_id,
|
|
||||||
messages=tc["messages"],
|
|
||||||
tools=tc["tools"],
|
|
||||||
tool_choice="auto",
|
|
||||||
stream=False,
|
|
||||||
)
|
|
||||||
# some models can return content for the response in addition to the tool call
|
|
||||||
assert response.completion_message.role == "assistant"
|
|
||||||
|
|
||||||
assert len(response.completion_message.tool_calls) == 1
|
|
||||||
assert response.completion_message.tool_calls[0].tool_name == tc["tools"][0]["tool_name"]
|
|
||||||
assert response.completion_message.tool_calls[0].arguments == tc["expected"]
|
|
||||||
|
|
||||||
|
|
||||||
# Will extract streamed text and separate it from tool invocation content
|
|
||||||
# The returned tool inovcation content will be a string so it's easy to comapare with expected value
|
|
||||||
# e.g. "[get_weather, {'location': 'San Francisco, CA'}]"
|
|
||||||
def extract_tool_invocation_content(response):
|
|
||||||
tool_invocation_content: str = ""
|
|
||||||
for chunk in response:
|
|
||||||
delta = chunk.event.delta
|
|
||||||
if delta.type == "tool_call" and delta.parse_status == "succeeded":
|
|
||||||
call = delta.tool_call
|
|
||||||
tool_invocation_content += f"[{call.tool_name}, {call.arguments}]"
|
|
||||||
return tool_invocation_content
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"test_case",
|
|
||||||
[
|
|
||||||
"inference:chat_completion:tool_calling",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
def test_text_chat_completion_with_tool_calling_and_streaming(client_with_models, text_model_id, test_case):
|
|
||||||
tc = TestCase(test_case)
|
|
||||||
|
|
||||||
response = client_with_models.inference.chat_completion(
|
|
||||||
model_id=text_model_id,
|
|
||||||
messages=tc["messages"],
|
|
||||||
tools=tc["tools"],
|
|
||||||
tool_choice="auto",
|
|
||||||
stream=True,
|
|
||||||
)
|
|
||||||
tool_invocation_content = extract_tool_invocation_content(response)
|
|
||||||
expected_tool_name = tc["tools"][0]["tool_name"]
|
|
||||||
expected_argument = tc["expected"]
|
|
||||||
assert tool_invocation_content == f"[{expected_tool_name}, {expected_argument}]"
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"test_case",
|
|
||||||
[
|
|
||||||
"inference:chat_completion:tool_calling",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
def test_text_chat_completion_with_tool_choice_required(client_with_models, text_model_id, test_case):
|
|
||||||
tc = TestCase(test_case)
|
|
||||||
|
|
||||||
response = client_with_models.inference.chat_completion(
|
|
||||||
model_id=text_model_id,
|
|
||||||
messages=tc["messages"],
|
|
||||||
tools=tc["tools"],
|
|
||||||
tool_config={
|
|
||||||
"tool_choice": "required",
|
|
||||||
},
|
|
||||||
stream=True,
|
|
||||||
)
|
|
||||||
tool_invocation_content = extract_tool_invocation_content(response)
|
|
||||||
expected_tool_name = tc["tools"][0]["tool_name"]
|
|
||||||
expected_argument = tc["expected"]
|
|
||||||
assert tool_invocation_content == f"[{expected_tool_name}, {expected_argument}]"
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"test_case",
|
|
||||||
[
|
|
||||||
"inference:chat_completion:tool_calling",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
def test_text_chat_completion_with_tool_choice_none(client_with_models, text_model_id, test_case):
|
|
||||||
tc = TestCase(test_case)
|
|
||||||
|
|
||||||
response = client_with_models.inference.chat_completion(
|
|
||||||
model_id=text_model_id,
|
|
||||||
messages=tc["messages"],
|
|
||||||
tools=tc["tools"],
|
|
||||||
tool_config={"tool_choice": "none"},
|
|
||||||
stream=True,
|
|
||||||
)
|
|
||||||
tool_invocation_content = extract_tool_invocation_content(response)
|
|
||||||
assert tool_invocation_content == ""
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"test_case",
|
|
||||||
[
|
|
||||||
"inference:chat_completion:structured_output",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
def test_text_chat_completion_structured_output(client_with_models, text_model_id, test_case):
|
|
||||||
skip_if_model_doesnt_support_json_schema_structured_output(client_with_models, text_model_id)
|
|
||||||
|
|
||||||
class NBAStats(BaseModel):
|
|
||||||
year_for_draft: int
|
|
||||||
num_seasons_in_nba: int
|
|
||||||
|
|
||||||
class AnswerFormat(BaseModel):
|
|
||||||
first_name: str
|
|
||||||
last_name: str
|
|
||||||
year_of_birth: int
|
|
||||||
nba_stats: NBAStats
|
|
||||||
|
|
||||||
tc = TestCase(test_case)
|
|
||||||
|
|
||||||
response = client_with_models.inference.chat_completion(
|
|
||||||
model_id=text_model_id,
|
|
||||||
messages=tc["messages"],
|
|
||||||
response_format={
|
|
||||||
"type": "json_schema",
|
|
||||||
"json_schema": AnswerFormat.model_json_schema(),
|
|
||||||
},
|
|
||||||
stream=False,
|
|
||||||
)
|
|
||||||
answer = AnswerFormat.model_validate_json(response.completion_message.content)
|
|
||||||
expected = tc["expected"]
|
|
||||||
assert answer.first_name == expected["first_name"]
|
|
||||||
assert answer.last_name == expected["last_name"]
|
|
||||||
assert answer.year_of_birth == expected["year_of_birth"]
|
|
||||||
assert answer.nba_stats.num_seasons_in_nba == expected["num_seasons_in_nba"]
|
|
||||||
assert answer.nba_stats.year_for_draft == expected["year_for_draft"]
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("streaming", [True, False])
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"test_case",
|
|
||||||
[
|
|
||||||
"inference:chat_completion:tool_calling_tools_absent",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
def test_text_chat_completion_tool_calling_tools_not_in_request(
|
|
||||||
client_with_models, text_model_id, test_case, streaming
|
|
||||||
):
|
|
||||||
tc = TestCase(test_case)
|
|
||||||
|
|
||||||
# TODO: more dynamic lookup on tool_prompt_format for model family
|
|
||||||
tool_prompt_format = "json" if "3.1" in text_model_id else "python_list"
|
|
||||||
request = {
|
|
||||||
"model_id": text_model_id,
|
|
||||||
"messages": tc["messages"],
|
|
||||||
"tools": tc["tools"],
|
|
||||||
"tool_choice": "auto",
|
|
||||||
"tool_prompt_format": tool_prompt_format,
|
|
||||||
"stream": streaming,
|
|
||||||
}
|
|
||||||
|
|
||||||
response = client_with_models.inference.chat_completion(**request)
|
|
||||||
|
|
||||||
if streaming:
|
|
||||||
for chunk in response:
|
|
||||||
delta = chunk.event.delta
|
|
||||||
if delta.type == "tool_call" and delta.parse_status == "succeeded":
|
|
||||||
assert delta.tool_call.tool_name == "get_object_namespace_list"
|
|
||||||
if delta.type == "tool_call" and delta.parse_status == "failed":
|
|
||||||
# expect raw message that failed to parse in tool_call
|
|
||||||
assert isinstance(delta.tool_call, str)
|
|
||||||
assert len(delta.tool_call) > 0
|
|
||||||
else:
|
|
||||||
for tc in response.completion_message.tool_calls:
|
|
||||||
assert tc.tool_name == "get_object_namespace_list"
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
"test_case",
|
|
||||||
[
|
|
||||||
# Tests if the model can handle simple messages like "Hi" or
|
|
||||||
# a message unrelated to one of the tool calls
|
|
||||||
"inference:chat_completion:text_then_tool",
|
|
||||||
# Tests if the model can do full tool call with responses correctly
|
|
||||||
"inference:chat_completion:tool_then_answer",
|
|
||||||
# Tests if model can generate multiple params and
|
|
||||||
# read outputs correctly
|
|
||||||
"inference:chat_completion:array_parameter",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
def test_text_chat_completion_with_multi_turn_tool_calling(client_with_models, text_model_id, test_case):
|
|
||||||
"""This test tests the model's tool calling loop in various scenarios"""
|
|
||||||
if "llama-4" not in text_model_id.lower() and "llama4" not in text_model_id.lower():
|
|
||||||
pytest.xfail("Not tested for non-llama4 models yet")
|
|
||||||
|
|
||||||
tc = TestCase(test_case)
|
|
||||||
messages = []
|
|
||||||
|
|
||||||
# keep going until either
|
|
||||||
# 1. we have messages to test in multi-turn
|
|
||||||
# 2. no messages bust last message is tool response
|
|
||||||
while len(tc["messages"]) > 0 or (len(messages) > 0 and messages[-1]["role"] == "tool"):
|
|
||||||
# do not take new messages if last message is tool response
|
|
||||||
if len(messages) == 0 or messages[-1]["role"] != "tool":
|
|
||||||
new_messages = tc["messages"].pop(0)
|
|
||||||
messages += new_messages
|
|
||||||
|
|
||||||
# pprint(messages)
|
|
||||||
response = client_with_models.inference.chat_completion(
|
|
||||||
model_id=text_model_id,
|
|
||||||
messages=messages,
|
|
||||||
tools=tc["tools"],
|
|
||||||
stream=False,
|
|
||||||
sampling_params={
|
|
||||||
"strategy": {
|
|
||||||
"type": "top_p",
|
|
||||||
"top_p": 0.9,
|
|
||||||
"temperature": 0.6,
|
|
||||||
}
|
|
||||||
},
|
|
||||||
)
|
|
||||||
op_msg = response.completion_message
|
|
||||||
messages.append(op_msg.model_dump())
|
|
||||||
# print(op_msg)
|
|
||||||
|
|
||||||
assert op_msg.role == "assistant"
|
|
||||||
expected = tc["expected"].pop(0)
|
|
||||||
assert len(op_msg.tool_calls) == expected["num_tool_calls"]
|
|
||||||
|
|
||||||
if expected["num_tool_calls"] > 0:
|
|
||||||
assert op_msg.tool_calls[0].tool_name == expected["tool_name"]
|
|
||||||
assert op_msg.tool_calls[0].arguments == expected["tool_arguments"]
|
|
||||||
|
|
||||||
tool_response = tc["tool_responses"].pop(0)
|
|
||||||
messages.append(
|
|
||||||
# Tool Response Message
|
|
||||||
{
|
|
||||||
"role": "tool",
|
|
||||||
"call_id": op_msg.tool_calls[0].call_id,
|
|
||||||
"content": tool_response["response"],
|
|
||||||
}
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
actual_answer = op_msg.content.lower()
|
|
||||||
# pprint(actual_answer)
|
|
||||||
assert expected["answer"] in actual_answer
|
|
||||||
|
|
||||||
# sleep to avoid rate limit
|
|
||||||
sleep(1)
|
|
Loading…
Add table
Add a link
Reference in a new issue