mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-29 11:24:19 +00:00
[bugfix] fix inference sdk test for v1 (#775)
# What does this PR do? - fixes client sdk tests ## Test Plan ``` LLAMA_STACK_BASE_URL="http://localhost:5000" pytest -v tests/client-sdk/inference/test_inference.py ``` <img width="1359" alt="image" src="https://github.com/user-attachments/assets/a720e0ca-c441-465e-bc6b-9b98091afa23" /> ## Sources Please link relevant resources if necessary. ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Ran pre-commit to handle lint / formatting issues. - [ ] Read the [contributor guideline](https://github.com/meta-llama/llama-stack/blob/main/CONTRIBUTING.md), Pull Request section? - [ ] Updated relevant documentation. - [ ] Wrote necessary unit or integration tests.
This commit is contained in:
parent
67450e4024
commit
3e518c049a
1 changed files with 17 additions and 21 deletions
|
@ -28,19 +28,18 @@ def provider_tool_format(inference_provider_type):
|
||||||
@pytest.fixture(scope="session")
|
@pytest.fixture(scope="session")
|
||||||
def inference_provider_type(llama_stack_client):
|
def inference_provider_type(llama_stack_client):
|
||||||
providers = llama_stack_client.providers.list()
|
providers = llama_stack_client.providers.list()
|
||||||
if "inference" not in providers:
|
assert len(providers.inference) > 0
|
||||||
pytest.fail("No inference providers available")
|
return providers.inference[0]["provider_type"]
|
||||||
assert len(providers["inference"]) > 0
|
|
||||||
return providers["inference"][0].provider_type
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
@pytest.fixture(scope="session")
|
||||||
def text_model_id(llama_stack_client):
|
def text_model_id(llama_stack_client):
|
||||||
available_models = [
|
available_models = [
|
||||||
model.identifier
|
model.identifier
|
||||||
for model in llama_stack_client.models.list()
|
for model in llama_stack_client.models.list().data
|
||||||
if model.identifier.startswith("meta-llama") and "405" not in model.identifier
|
if model.identifier.startswith("meta-llama") and "405" not in model.identifier
|
||||||
]
|
]
|
||||||
|
print(available_models)
|
||||||
assert len(available_models) > 0
|
assert len(available_models) > 0
|
||||||
return available_models[0]
|
return available_models[0]
|
||||||
|
|
||||||
|
@ -49,7 +48,7 @@ def text_model_id(llama_stack_client):
|
||||||
def vision_model_id(llama_stack_client):
|
def vision_model_id(llama_stack_client):
|
||||||
available_models = [
|
available_models = [
|
||||||
model.identifier
|
model.identifier
|
||||||
for model in llama_stack_client.models.list()
|
for model in llama_stack_client.models.list().data
|
||||||
if "vision" in model.identifier.lower()
|
if "vision" in model.identifier.lower()
|
||||||
]
|
]
|
||||||
if len(available_models) == 0:
|
if len(available_models) == 0:
|
||||||
|
@ -245,19 +244,13 @@ def test_text_chat_completion_with_tool_calling_and_non_streaming(
|
||||||
# The returned tool inovcation content will be a string so it's easy to comapare with expected value
|
# The returned tool inovcation content will be a string so it's easy to comapare with expected value
|
||||||
# e.g. "[get_weather, {'location': 'San Francisco, CA'}]"
|
# e.g. "[get_weather, {'location': 'San Francisco, CA'}]"
|
||||||
def extract_tool_invocation_content(response):
|
def extract_tool_invocation_content(response):
|
||||||
text_content: str = ""
|
|
||||||
tool_invocation_content: str = ""
|
tool_invocation_content: str = ""
|
||||||
for chunk in response:
|
for chunk in response:
|
||||||
delta = chunk.event.delta
|
delta = chunk.event.delta
|
||||||
if delta.type == "text":
|
if delta.type == "tool_call" and delta.parse_status == "succeeded":
|
||||||
text_content += delta.text
|
call = delta.content
|
||||||
elif delta.type == "tool_call":
|
tool_invocation_content += f"[{call.tool_name}, {call.arguments}]"
|
||||||
if isinstance(delta.content, str):
|
return tool_invocation_content
|
||||||
tool_invocation_content += delta.content
|
|
||||||
else:
|
|
||||||
call = delta.content
|
|
||||||
tool_invocation_content += f"[{call.tool_name}, {call.arguments}]"
|
|
||||||
return text_content, tool_invocation_content
|
|
||||||
|
|
||||||
|
|
||||||
def test_text_chat_completion_with_tool_calling_and_streaming(
|
def test_text_chat_completion_with_tool_calling_and_streaming(
|
||||||
|
@ -274,8 +267,11 @@ def test_text_chat_completion_with_tool_calling_and_streaming(
|
||||||
tool_prompt_format=provider_tool_format,
|
tool_prompt_format=provider_tool_format,
|
||||||
stream=True,
|
stream=True,
|
||||||
)
|
)
|
||||||
text_content, tool_invocation_content = extract_tool_invocation_content(response)
|
tool_invocation_content = extract_tool_invocation_content(response)
|
||||||
|
print(
|
||||||
|
"!!!!tool_invocation_content",
|
||||||
|
tool_invocation_content,
|
||||||
|
)
|
||||||
assert tool_invocation_content == "[get_weather, {'location': 'San Francisco, CA'}]"
|
assert tool_invocation_content == "[get_weather, {'location': 'San Francisco, CA'}]"
|
||||||
|
|
||||||
|
|
||||||
|
@ -362,8 +358,8 @@ def test_image_chat_completion_streaming(llama_stack_client, vision_model_id):
|
||||||
messages=[message],
|
messages=[message],
|
||||||
stream=True,
|
stream=True,
|
||||||
)
|
)
|
||||||
streamed_content = [
|
streamed_content = ""
|
||||||
str(chunk.event.delta.text.lower().strip()) for chunk in response
|
for chunk in response:
|
||||||
]
|
streamed_content += chunk.event.delta.text.lower()
|
||||||
assert len(streamed_content) > 0
|
assert len(streamed_content) > 0
|
||||||
assert any(expected in streamed_content for expected in {"dog", "puppy", "pup"})
|
assert any(expected in streamed_content for expected in {"dog", "puppy", "pup"})
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue