mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-28 19:04:19 +00:00
# What does this PR do? - as title, cleaning up `import *`'s - upgrade tests to make them more robust to bad model outputs - remove import *'s in llama_stack/apis/* (skip __init__ modules) <img width="465" alt="image" src="https://github.com/user-attachments/assets/d8339c13-3b40-4ba5-9c53-0d2329726ee2" /> - run `sh run_openapi_generator.sh`, no types gets affected ## Test Plan ### Providers Tests **agents** ``` pytest -v -s llama_stack/providers/tests/agents/test_agents.py -m "together" --safety-shield meta-llama/Llama-Guard-3-8B --inference-model meta-llama/Llama-3.1-405B-Instruct-FP8 ``` **inference** ```bash # meta-reference torchrun $CONDA_PREFIX/bin/pytest -v -s -k "meta_reference" --inference-model="meta-llama/Llama-3.1-8B-Instruct" ./llama_stack/providers/tests/inference/test_text_inference.py torchrun $CONDA_PREFIX/bin/pytest -v -s -k "meta_reference" --inference-model="meta-llama/Llama-3.2-11B-Vision-Instruct" ./llama_stack/providers/tests/inference/test_vision_inference.py # together pytest -v -s -k "together" --inference-model="meta-llama/Llama-3.1-8B-Instruct" ./llama_stack/providers/tests/inference/test_text_inference.py pytest -v -s -k "together" --inference-model="meta-llama/Llama-3.2-11B-Vision-Instruct" ./llama_stack/providers/tests/inference/test_vision_inference.py pytest ./llama_stack/providers/tests/inference/test_prompt_adapter.py ``` **safety** ``` pytest -v -s llama_stack/providers/tests/safety/test_safety.py -m together --safety-shield meta-llama/Llama-Guard-3-8B ``` **memory** ``` pytest -v -s llama_stack/providers/tests/memory/test_memory.py -m "sentence_transformers" --env EMBEDDING_DIMENSION=384 ``` **scoring** ``` pytest -v -s -m llm_as_judge_scoring_together_inference llama_stack/providers/tests/scoring/test_scoring.py --judge-model meta-llama/Llama-3.2-3B-Instruct pytest -v -s -m basic_scoring_together_inference llama_stack/providers/tests/scoring/test_scoring.py pytest -v -s -m braintrust_scoring_together_inference llama_stack/providers/tests/scoring/test_scoring.py ``` **datasetio** ``` pytest -v -s -m localfs llama_stack/providers/tests/datasetio/test_datasetio.py pytest -v -s -m huggingface llama_stack/providers/tests/datasetio/test_datasetio.py ``` **eval** ``` pytest -v -s -m meta_reference_eval_together_inference llama_stack/providers/tests/eval/test_eval.py pytest -v -s -m meta_reference_eval_together_inference_huggingface_datasetio llama_stack/providers/tests/eval/test_eval.py ``` ### Client-SDK Tests ``` LLAMA_STACK_BASE_URL=http://localhost:5000 pytest -v ./tests/client-sdk ``` ### llama-stack-apps ``` PORT=5000 LOCALHOST=localhost python -m examples.agents.hello $LOCALHOST $PORT python -m examples.agents.inflation $LOCALHOST $PORT python -m examples.agents.podcast_transcript $LOCALHOST $PORT python -m examples.agents.rag_as_attachments $LOCALHOST $PORT python -m examples.agents.rag_with_memory_bank $LOCALHOST $PORT python -m examples.safety.llama_guard_demo_mm $LOCALHOST $PORT python -m examples.agents.e2e_loop_with_custom_tools $LOCALHOST $PORT # Vision model python -m examples.interior_design_assistant.app python -m examples.agent_store.app $LOCALHOST $PORT ``` ### CLI ``` which llama llama model prompt-format -m Llama3.2-11B-Vision-Instruct llama model list llama stack list-apis llama stack list-providers inference llama stack build --template ollama --image-type conda ``` ### Distributions Tests **ollama** ``` llama stack build --template ollama --image-type conda ollama run llama3.2:1b-instruct-fp16 llama stack run ./llama_stack/templates/ollama/run.yaml --env INFERENCE_MODEL=meta-llama/Llama-3.2-1B-Instruct ``` **fireworks** ``` llama stack build --template fireworks --image-type conda llama stack run ./llama_stack/templates/fireworks/run.yaml ``` **together** ``` llama stack build --template together --image-type conda llama stack run ./llama_stack/templates/together/run.yaml ``` **tgi** ``` llama stack run ./llama_stack/templates/tgi/run.yaml --env TGI_URL=http://0.0.0.0:5009 --env INFERENCE_MODEL=meta-llama/Llama-3.1-8B-Instruct ``` ## Sources Please link relevant resources if necessary. ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Ran pre-commit to handle lint / formatting issues. - [ ] Read the [contributor guideline](https://github.com/meta-llama/llama-stack/blob/main/CONTRIBUTING.md), Pull Request section? - [ ] Updated relevant documentation. - [ ] Wrote necessary unit or integration tests.
134 lines
4.8 KiB
Python
134 lines
4.8 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
import unittest
|
|
|
|
from llama_models.llama3.api.datatypes import (
|
|
BuiltinTool,
|
|
ToolDefinition,
|
|
ToolParamDefinition,
|
|
ToolPromptFormat,
|
|
)
|
|
|
|
from llama_stack.apis.inference import ChatCompletionRequest, SystemMessage, UserMessage
|
|
from llama_stack.providers.utils.inference.prompt_adapter import (
|
|
chat_completion_request_to_messages,
|
|
)
|
|
|
|
MODEL = "Llama3.1-8B-Instruct"
|
|
|
|
|
|
class PrepareMessagesTests(unittest.IsolatedAsyncioTestCase):
|
|
async def test_system_default(self):
|
|
content = "Hello !"
|
|
request = ChatCompletionRequest(
|
|
model=MODEL,
|
|
messages=[
|
|
UserMessage(content=content),
|
|
],
|
|
)
|
|
messages = chat_completion_request_to_messages(request, MODEL)
|
|
self.assertEqual(len(messages), 2)
|
|
self.assertEqual(messages[-1].content, content)
|
|
self.assertTrue("Cutting Knowledge Date: December 2023" in messages[0].content)
|
|
|
|
async def test_system_builtin_only(self):
|
|
content = "Hello !"
|
|
request = ChatCompletionRequest(
|
|
model=MODEL,
|
|
messages=[
|
|
UserMessage(content=content),
|
|
],
|
|
tools=[
|
|
ToolDefinition(tool_name=BuiltinTool.code_interpreter),
|
|
ToolDefinition(tool_name=BuiltinTool.brave_search),
|
|
],
|
|
)
|
|
messages = chat_completion_request_to_messages(request, MODEL)
|
|
self.assertEqual(len(messages), 2)
|
|
self.assertEqual(messages[-1].content, content)
|
|
self.assertTrue("Cutting Knowledge Date: December 2023" in messages[0].content)
|
|
self.assertTrue("Tools: brave_search" in messages[0].content)
|
|
|
|
async def test_system_custom_only(self):
|
|
content = "Hello !"
|
|
request = ChatCompletionRequest(
|
|
model=MODEL,
|
|
messages=[
|
|
UserMessage(content=content),
|
|
],
|
|
tools=[
|
|
ToolDefinition(
|
|
tool_name="custom1",
|
|
description="custom1 tool",
|
|
parameters={
|
|
"param1": ToolParamDefinition(
|
|
param_type="str",
|
|
description="param1 description",
|
|
required=True,
|
|
),
|
|
},
|
|
)
|
|
],
|
|
tool_prompt_format=ToolPromptFormat.json,
|
|
)
|
|
messages = chat_completion_request_to_messages(request, MODEL)
|
|
self.assertEqual(len(messages), 3)
|
|
self.assertTrue("Environment: ipython" in messages[0].content)
|
|
|
|
self.assertTrue("Return function calls in JSON format" in messages[1].content)
|
|
self.assertEqual(messages[-1].content, content)
|
|
|
|
async def test_system_custom_and_builtin(self):
|
|
content = "Hello !"
|
|
request = ChatCompletionRequest(
|
|
model=MODEL,
|
|
messages=[
|
|
UserMessage(content=content),
|
|
],
|
|
tools=[
|
|
ToolDefinition(tool_name=BuiltinTool.code_interpreter),
|
|
ToolDefinition(tool_name=BuiltinTool.brave_search),
|
|
ToolDefinition(
|
|
tool_name="custom1",
|
|
description="custom1 tool",
|
|
parameters={
|
|
"param1": ToolParamDefinition(
|
|
param_type="str",
|
|
description="param1 description",
|
|
required=True,
|
|
),
|
|
},
|
|
),
|
|
],
|
|
)
|
|
messages = chat_completion_request_to_messages(request, MODEL)
|
|
self.assertEqual(len(messages), 3)
|
|
|
|
self.assertTrue("Environment: ipython" in messages[0].content)
|
|
self.assertTrue("Tools: brave_search" in messages[0].content)
|
|
|
|
self.assertTrue("Return function calls in JSON format" in messages[1].content)
|
|
self.assertEqual(messages[-1].content, content)
|
|
|
|
async def test_user_provided_system_message(self):
|
|
content = "Hello !"
|
|
system_prompt = "You are a pirate"
|
|
request = ChatCompletionRequest(
|
|
model=MODEL,
|
|
messages=[
|
|
SystemMessage(content=system_prompt),
|
|
UserMessage(content=content),
|
|
],
|
|
tools=[
|
|
ToolDefinition(tool_name=BuiltinTool.code_interpreter),
|
|
],
|
|
)
|
|
messages = chat_completion_request_to_messages(request, MODEL)
|
|
self.assertEqual(len(messages), 2, messages)
|
|
self.assertTrue(messages[0].content.endswith(system_prompt))
|
|
|
|
self.assertEqual(messages[-1].content, content)
|