From 1deab94ea00109c887a455993bb0746e004a1fb3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Fri, 25 Apr 2025 21:16:57 +0200 Subject: [PATCH 1/8] chore: exclude test, provider, and template directories from coverage (#2028) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What does this PR do? Introduce a `.coveragerc` file to omit: - test files (*/tests/*) - provider code (*/llama_stack/providers/*) - template files (*/llama_stack/templates/*) - virtual environment (.venv/*) This ensures coverage reports focus on core application logic (API and CLI). Note: I'm opening this for discussing as well - we might decide to ignore more and or re-add some directories! Signed-off-by: Sébastien Han --- .coveragerc | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 .coveragerc diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 000000000..e16c2e461 --- /dev/null +++ b/.coveragerc @@ -0,0 +1,6 @@ +[run] +omit = + */tests/* + */llama_stack/providers/* + */llama_stack/templates/* + .venv/* From 0e4307de0f4fa531ac382654a082b4bc5ba3b7b1 Mon Sep 17 00:00:00 2001 From: Derek Higgins Date: Fri, 25 Apr 2025 20:17:31 +0100 Subject: [PATCH 2/8] docs: Fix missing --gpu all flag in Docker run commands (#2026) adding the --gpu all flag to Docker run commands for meta-reference-gpu distributions ensures models are loaded into GPU instead of CPU. Remove docs for meta-reference-quantized-gpu The distribution was removed in #1887 but these files were left behind. Fixes: #1798 # What does this PR do? Fixes doc to add --gpu all command to docker run [//]: # (If resolving an issue, uncomment and update the line below) Closes #1798 ## Test Plan [Describe the tests you ran to verify your changes with result summaries. *Provide clear instructions so the plan can be easily re-executed.*] verified in docker documentation but untested --------- Signed-off-by: Derek Higgins --- README.md | 1 - docs/source/distributions/building_distro.md | 2 - .../self_hosted_distro/meta-reference-gpu.md | 2 + .../meta-reference-quantized-gpu.md | 123 ------------------ .../meta-reference-gpu/doc_template.md | 2 + 5 files changed, 4 insertions(+), 126 deletions(-) delete mode 100644 docs/source/distributions/self_hosted_distro/meta-reference-quantized-gpu.md diff --git a/README.md b/README.md index c2e688763..9a4f1a849 100644 --- a/README.md +++ b/README.md @@ -129,7 +129,6 @@ A Llama Stack Distribution (or "distro") is a pre-configured bundle of provider | **Distribution** | **Llama Stack Docker** | Start This Distribution | |:---------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------------:| | Meta Reference | [llamastack/distribution-meta-reference-gpu](https://hub.docker.com/repository/docker/llamastack/distribution-meta-reference-gpu/general) | [Guide](https://llama-stack.readthedocs.io/en/latest/distributions/self_hosted_distro/meta-reference-gpu.html) | -| Meta Reference Quantized | [llamastack/distribution-meta-reference-quantized-gpu](https://hub.docker.com/repository/docker/llamastack/distribution-meta-reference-quantized-gpu/general) | [Guide](https://llama-stack.readthedocs.io/en/latest/distributions/self_hosted_distro/meta-reference-quantized-gpu.html) | | SambaNova | [llamastack/distribution-sambanova](https://hub.docker.com/repository/docker/llamastack/distribution-sambanova/general) | [Guide](https://llama-stack.readthedocs.io/en/latest/distributions/self_hosted_distro/sambanova.html) | | Cerebras | [llamastack/distribution-cerebras](https://hub.docker.com/repository/docker/llamastack/distribution-cerebras/general) | [Guide](https://llama-stack.readthedocs.io/en/latest/distributions/self_hosted_distro/cerebras.html) | | Ollama | [llamastack/distribution-ollama](https://hub.docker.com/repository/docker/llamastack/distribution-ollama/general) | [Guide](https://llama-stack.readthedocs.io/en/latest/distributions/self_hosted_distro/ollama.html) | diff --git a/docs/source/distributions/building_distro.md b/docs/source/distributions/building_distro.md index 4c342b14b..56b8d30a8 100644 --- a/docs/source/distributions/building_distro.md +++ b/docs/source/distributions/building_distro.md @@ -109,8 +109,6 @@ llama stack build --list-templates +------------------------------+-----------------------------------------------------------------------------+ | nvidia | Use NVIDIA NIM for running LLM inference | +------------------------------+-----------------------------------------------------------------------------+ -| meta-reference-quantized-gpu | Use Meta Reference with fp8, int4 quantization for running LLM inference | -+------------------------------+-----------------------------------------------------------------------------+ | cerebras | Use Cerebras for running LLM inference | +------------------------------+-----------------------------------------------------------------------------+ | ollama | Use (an external) Ollama server for running LLM inference | diff --git a/docs/source/distributions/self_hosted_distro/meta-reference-gpu.md b/docs/source/distributions/self_hosted_distro/meta-reference-gpu.md index b90f75347..f58d7bbee 100644 --- a/docs/source/distributions/self_hosted_distro/meta-reference-gpu.md +++ b/docs/source/distributions/self_hosted_distro/meta-reference-gpu.md @@ -81,6 +81,7 @@ LLAMA_STACK_PORT=8321 docker run \ -it \ --pull always \ + --gpu all \ -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ -v ~/.llama:/root/.llama \ llamastack/distribution-meta-reference-gpu \ @@ -94,6 +95,7 @@ If you are using Llama Stack Safety / Shield APIs, use: docker run \ -it \ --pull always \ + --gpu all \ -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ -v ~/.llama:/root/.llama \ llamastack/distribution-meta-reference-gpu \ diff --git a/docs/source/distributions/self_hosted_distro/meta-reference-quantized-gpu.md b/docs/source/distributions/self_hosted_distro/meta-reference-quantized-gpu.md deleted file mode 100644 index c3e2b4f2c..000000000 --- a/docs/source/distributions/self_hosted_distro/meta-reference-quantized-gpu.md +++ /dev/null @@ -1,123 +0,0 @@ ---- -orphan: true ---- - -# Meta Reference Quantized Distribution - -```{toctree} -:maxdepth: 2 -:hidden: - -self -``` - -The `llamastack/distribution-meta-reference-quantized-gpu` distribution consists of the following provider configurations: - -| API | Provider(s) | -|-----|-------------| -| agents | `inline::meta-reference` | -| datasetio | `remote::huggingface`, `inline::localfs` | -| eval | `inline::meta-reference` | -| inference | `inline::meta-reference-quantized` | -| safety | `inline::llama-guard` | -| scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` | -| telemetry | `inline::meta-reference` | -| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime`, `remote::model-context-protocol` | -| vector_io | `inline::faiss`, `remote::chromadb`, `remote::pgvector` | - - -The only difference vs. the `meta-reference-gpu` distribution is that it has support for more efficient inference -- with fp8, int4 quantization, etc. - -Note that you need access to nvidia GPUs to run this distribution. This distribution is not compatible with CPU-only machines or machines with AMD GPUs. - -### Environment Variables - -The following environment variables can be configured: - -- `LLAMA_STACK_PORT`: Port for the Llama Stack distribution server (default: `8321`) -- `INFERENCE_MODEL`: Inference model loaded into the Meta Reference server (default: `meta-llama/Llama-3.2-3B-Instruct`) -- `INFERENCE_CHECKPOINT_DIR`: Directory containing the Meta Reference model checkpoint (default: `null`) - - -## Prerequisite: Downloading Models - -Please use `llama model list --downloaded` to check that you have llama model checkpoints downloaded in `~/.llama` before proceeding. See [installation guide](https://llama-stack.readthedocs.io/en/latest/references/llama_cli_reference/download_models.html) here to download the models. Run `llama model list` to see the available models to download, and `llama model download` to download the checkpoints. - -``` -$ llama model list --downloaded -┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━┓ -┃ Model ┃ Size ┃ Modified Time ┃ -┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━┩ -│ Llama3.2-1B-Instruct:int4-qlora-eo8 │ 1.53 GB │ 2025-02-26 11:22:28 │ -├─────────────────────────────────────────┼──────────┼─────────────────────┤ -│ Llama3.2-1B │ 2.31 GB │ 2025-02-18 21:48:52 │ -├─────────────────────────────────────────┼──────────┼─────────────────────┤ -│ Prompt-Guard-86M │ 0.02 GB │ 2025-02-26 11:29:28 │ -├─────────────────────────────────────────┼──────────┼─────────────────────┤ -│ Llama3.2-3B-Instruct:int4-spinquant-eo8 │ 3.69 GB │ 2025-02-26 11:37:41 │ -├─────────────────────────────────────────┼──────────┼─────────────────────┤ -│ Llama3.2-3B │ 5.99 GB │ 2025-02-18 21:51:26 │ -├─────────────────────────────────────────┼──────────┼─────────────────────┤ -│ Llama3.1-8B │ 14.97 GB │ 2025-02-16 10:36:37 │ -├─────────────────────────────────────────┼──────────┼─────────────────────┤ -│ Llama3.2-1B-Instruct:int4-spinquant-eo8 │ 1.51 GB │ 2025-02-26 11:35:02 │ -├─────────────────────────────────────────┼──────────┼─────────────────────┤ -│ Llama-Guard-3-1B │ 2.80 GB │ 2025-02-26 11:20:46 │ -├─────────────────────────────────────────┼──────────┼─────────────────────┤ -│ Llama-Guard-3-1B:int4 │ 0.43 GB │ 2025-02-26 11:33:33 │ -└─────────────────────────────────────────┴──────────┴─────────────────────┘ -``` - -## Running the Distribution - -You can do this via Conda (build code) or Docker which has a pre-built image. - -### Via Docker - -This method allows you to get started quickly without having to build the distribution code. - -```bash -LLAMA_STACK_PORT=8321 -docker run \ - -it \ - --pull always \ - -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ - -v ~/.llama:/root/.llama \ - llamastack/distribution-meta-reference-quantized-gpu \ - --port $LLAMA_STACK_PORT \ - --env INFERENCE_MODEL=meta-llama/Llama-3.2-3B-Instruct -``` - -If you are using Llama Stack Safety / Shield APIs, use: - -```bash -docker run \ - -it \ - --pull always \ - -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ - -v ~/.llama:/root/.llama \ - llamastack/distribution-meta-reference-quantized-gpu \ - --port $LLAMA_STACK_PORT \ - --env INFERENCE_MODEL=meta-llama/Llama-3.2-3B-Instruct \ - --env SAFETY_MODEL=meta-llama/Llama-Guard-3-1B -``` - -### Via Conda - -Make sure you have done `uv pip install llama-stack` and have the Llama Stack CLI available. - -```bash -llama stack build --template meta-reference-quantized-gpu --image-type conda -llama stack run distributions/meta-reference-quantized-gpu/run.yaml \ - --port $LLAMA_STACK_PORT \ - --env INFERENCE_MODEL=meta-llama/Llama-3.2-3B-Instruct -``` - -If you are using Llama Stack Safety / Shield APIs, use: - -```bash -llama stack run distributions/meta-reference-quantized-gpu/run-with-safety.yaml \ - --port $LLAMA_STACK_PORT \ - --env INFERENCE_MODEL=meta-llama/Llama-3.2-3B-Instruct \ - --env SAFETY_MODEL=meta-llama/Llama-Guard-3-1B -``` diff --git a/llama_stack/templates/meta-reference-gpu/doc_template.md b/llama_stack/templates/meta-reference-gpu/doc_template.md index a174331b4..2ca6793d7 100644 --- a/llama_stack/templates/meta-reference-gpu/doc_template.md +++ b/llama_stack/templates/meta-reference-gpu/doc_template.md @@ -69,6 +69,7 @@ LLAMA_STACK_PORT=8321 docker run \ -it \ --pull always \ + --gpu all \ -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ -v ~/.llama:/root/.llama \ llamastack/distribution-{{ name }} \ @@ -82,6 +83,7 @@ If you are using Llama Stack Safety / Shield APIs, use: docker run \ -it \ --pull always \ + --gpu all \ -p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \ -v ~/.llama:/root/.llama \ llamastack/distribution-{{ name }} \ From 4fb583b4076e245cbd6c9c76546d485652f78563 Mon Sep 17 00:00:00 2001 From: Ashwin Bharambe Date: Fri, 25 Apr 2025 12:23:33 -0700 Subject: [PATCH 3/8] fix: check that llama stack client plain can be used as a subst for OpenAI client (#2032) With https://github.com/meta-llama/llama-stack-client-python/pull/226, now we have llama-stack-client be able to used as a substitute for OpenAI client (duck-typed) so you don't need to change downstream library code. image --- .../inference/test_openai_completion.py | 32 ++++++++++++------- 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/tests/integration/inference/test_openai_completion.py b/tests/integration/inference/test_openai_completion.py index 75b53100c..46ec03d2e 100644 --- a/tests/integration/inference/test_openai_completion.py +++ b/tests/integration/inference/test_openai_completion.py @@ -75,19 +75,24 @@ def openai_client(client_with_models): return OpenAI(base_url=base_url, api_key="bar") +@pytest.fixture(params=["openai_client", "llama_stack_client"]) +def compat_client(request): + return request.getfixturevalue(request.param) + + @pytest.mark.parametrize( "test_case", [ "inference:completion:sanity", ], ) -def test_openai_completion_non_streaming(openai_client, client_with_models, text_model_id, test_case): +def test_openai_completion_non_streaming(llama_stack_client, client_with_models, text_model_id, test_case): skip_if_model_doesnt_support_openai_completion(client_with_models, text_model_id) tc = TestCase(test_case) # ollama needs more verbose prompting for some reason here... prompt = "Respond to this question and explain your answer. " + tc["content"] - response = openai_client.completions.create( + response = llama_stack_client.completions.create( model=text_model_id, prompt=prompt, stream=False, @@ -103,13 +108,13 @@ def test_openai_completion_non_streaming(openai_client, client_with_models, text "inference:completion:sanity", ], ) -def test_openai_completion_streaming(openai_client, client_with_models, text_model_id, test_case): +def test_openai_completion_streaming(llama_stack_client, client_with_models, text_model_id, test_case): skip_if_model_doesnt_support_openai_completion(client_with_models, text_model_id) tc = TestCase(test_case) # ollama needs more verbose prompting for some reason here... prompt = "Respond to this question and explain your answer. " + tc["content"] - response = openai_client.completions.create( + response = llama_stack_client.completions.create( model=text_model_id, prompt=prompt, stream=True, @@ -127,11 +132,11 @@ def test_openai_completion_streaming(openai_client, client_with_models, text_mod 0, ], ) -def test_openai_completion_prompt_logprobs(openai_client, client_with_models, text_model_id, prompt_logprobs): +def test_openai_completion_prompt_logprobs(llama_stack_client, client_with_models, text_model_id, prompt_logprobs): skip_if_provider_isnt_vllm(client_with_models, text_model_id) prompt = "Hello, world!" - response = openai_client.completions.create( + response = llama_stack_client.completions.create( model=text_model_id, prompt=prompt, stream=False, @@ -144,11 +149,11 @@ def test_openai_completion_prompt_logprobs(openai_client, client_with_models, te assert len(choice.prompt_logprobs) > 0 -def test_openai_completion_guided_choice(openai_client, client_with_models, text_model_id): +def test_openai_completion_guided_choice(llama_stack_client, client_with_models, text_model_id): skip_if_provider_isnt_vllm(client_with_models, text_model_id) prompt = "I am feeling really sad today." - response = openai_client.completions.create( + response = llama_stack_client.completions.create( model=text_model_id, prompt=prompt, stream=False, @@ -161,6 +166,9 @@ def test_openai_completion_guided_choice(openai_client, client_with_models, text assert choice.text in ["joy", "sadness"] +# Run the chat-completion tests with both the OpenAI client and the LlamaStack client + + @pytest.mark.parametrize( "test_case", [ @@ -168,13 +176,13 @@ def test_openai_completion_guided_choice(openai_client, client_with_models, text "inference:chat_completion:non_streaming_02", ], ) -def test_openai_chat_completion_non_streaming(openai_client, client_with_models, text_model_id, test_case): +def test_openai_chat_completion_non_streaming(compat_client, client_with_models, text_model_id, test_case): skip_if_model_doesnt_support_openai_chat_completion(client_with_models, text_model_id) tc = TestCase(test_case) question = tc["question"] expected = tc["expected"] - response = openai_client.chat.completions.create( + response = compat_client.chat.completions.create( model=text_model_id, messages=[ { @@ -196,13 +204,13 @@ def test_openai_chat_completion_non_streaming(openai_client, client_with_models, "inference:chat_completion:streaming_02", ], ) -def test_openai_chat_completion_streaming(openai_client, client_with_models, text_model_id, test_case): +def test_openai_chat_completion_streaming(compat_client, client_with_models, text_model_id, test_case): skip_if_model_doesnt_support_openai_chat_completion(client_with_models, text_model_id) tc = TestCase(test_case) question = tc["question"] expected = tc["expected"] - response = openai_client.chat.completions.create( + response = compat_client.chat.completions.create( model=text_model_id, messages=[{"role": "user", "content": question}], stream=True, From 1b2e116a2ad8f6e8661b951fdbd7d9bf9ec19994 Mon Sep 17 00:00:00 2001 From: ehhuang Date: Fri, 25 Apr 2025 13:16:16 -0700 Subject: [PATCH 4/8] fix: tool call encoded twice (#2034) # What does this PR do? ## Test Plan LLAMA_STACK_CONFIG=http://localhost:5002 pytest -s -v tests/integration/inference --safety-shield meta-llama/Llama-Guard-3-8B --vision-model meta-llama/Llama-4-Scout-17B-16E-Instruct --text-model meta-llama/Llama-4-Scout-17B-16E-Instruct --- llama_stack/models/llama/llama4/chat_format.py | 1 + 1 file changed, 1 insertion(+) diff --git a/llama_stack/models/llama/llama4/chat_format.py b/llama_stack/models/llama/llama4/chat_format.py index 1debadcc5..1574eeb5e 100644 --- a/llama_stack/models/llama/llama4/chat_format.py +++ b/llama_stack/models/llama/llama4/chat_format.py @@ -303,6 +303,7 @@ class ChatFormat: arguments_json=json.dumps(tool_arguments), ) ) + content = "" return RawMessage( role="assistant", From b5d8e44e81b2ff09c276308ea4fca8ed5dc94fb5 Mon Sep 17 00:00:00 2001 From: Ashwin Bharambe Date: Fri, 25 Apr 2025 13:15:52 -0700 Subject: [PATCH 5/8] fix: only sleep for tests when they pass or fail --- tests/integration/conftest.py | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 22290b519..131219e52 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -10,6 +10,7 @@ import platform import textwrap import time +import pytest from dotenv import load_dotenv from llama_stack.log import get_logger @@ -19,10 +20,29 @@ from .report import Report logger = get_logger(__name__, category="tests") +@pytest.hookimpl(hookwrapper=True) +def pytest_runtest_makereport(item, call): + outcome = yield + report = outcome.get_result() + if report.when == "call": + item.execution_outcome = report.outcome + item.was_xfail = getattr(report, "wasxfail", False) + + def pytest_runtest_teardown(item): - interval_seconds = os.getenv("LLAMA_STACK_TEST_INTERVAL_SECONDS") - if interval_seconds: - time.sleep(float(interval_seconds)) + # Check if the test actually ran and passed or failed, but was not skipped or an expected failure (xfail) + outcome = getattr(item, "execution_outcome", None) + was_xfail = getattr(item, "was_xfail", False) + + name = item.nodeid + if not any(x in name for x in ("inference/", "safety/", "agents/")): + return + + logger.debug(f"Test '{item.nodeid}' outcome was '{outcome}' (xfail={was_xfail})") + if outcome in ("passed", "failed") and not was_xfail: + interval_seconds = os.getenv("LLAMA_STACK_TEST_INTERVAL_SECONDS") + if interval_seconds: + time.sleep(float(interval_seconds)) def pytest_configure(config): From 8713d67ce3cf383bd615934dedd1da99ff2c905c Mon Sep 17 00:00:00 2001 From: Jash Gulabrai <37194352+JashG@users.noreply.github.com> Date: Fri, 25 Apr 2025 16:21:50 -0400 Subject: [PATCH 6/8] fix: Correctly parse algorithm_config when launching NVIDIA customization job; fix internal request handler (#2025) # What does this PR do? This addresses 2 bugs I ran into when launching a fine-tuning job with the NVIDIA Adapter: 1. Session handling in `_make_request` helper function returns an error. ``` INFO: 127.0.0.1:55831 - "POST /v1/post-training/supervised-fine-tune HTTP/1.1" 500 Internal Server Error 16:11:45.643 [END] /v1/post-training/supervised-fine-tune [StatusCode.OK] (270.44ms) 16:11:45.643 [ERROR] Error executing endpoint route='/v1/post-training/supervised-fine-tune' method='post' Traceback (most recent call last): File "/Users/jgulabrai/Projects/forks/llama-stack/llama_stack/distribution/server/server.py", line 201, in endpoint return await maybe_await(value) File "/Users/jgulabrai/Projects/forks/llama-stack/llama_stack/distribution/server/server.py", line 161, in maybe_await return await value File "/Users/jgulabrai/Projects/forks/llama-stack/llama_stack/providers/remote/post_training/nvidia/post_training.py", line 408, in supervised_fine_tune response = await self._make_request( File "/Users/jgulabrai/Projects/forks/llama-stack/llama_stack/providers/remote/post_training/nvidia/post_training.py", line 98, in _make_request async with self.session.request(method, url, params=params, json=json, **kwargs) as response: File "/Users/jgulabrai/Projects/forks/llama-stack/.venv/lib/python3.10/site-packages/aiohttp/client.py", line 1425, in __aenter__ self._resp: _RetType = await self._coro File "/Users/jgulabrai/Projects/forks/llama-stack/.venv/lib/python3.10/site-packages/aiohttp/client.py", line 579, in _request handle = tm.start() File "/Users/jgulabrai/Projects/forks/llama-stack/.venv/lib/python3.10/site-packages/aiohttp/helpers.py", line 587, in start return self._loop.call_at(when, self.__call__) File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/asyncio/base_events.py", line 724, in call_at self._check_closed() File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/asyncio/base_events.py", line 510, in _check_closed raise RuntimeError('Event loop is closed') RuntimeError: Event loop is closed ``` Note: This only occurred when initializing the client like so: ``` client = LlamaStackClient( base_url="http://0.0.0.0:8321" ) response = client.post_training.supervised_fine_tune(...) # Returns error ``` I didn't run into this issue when using the library client: ``` client = LlamaStackAsLibraryClient("nvidia") client.initialize() response = client.post_training.supervised_fine_tune(...) # Works fine ``` 2. The `algorithm_config` param in `supervised_fine_tune` is parsed as a `dict` when run from unit tests, but a Pydantic model when invoked using the Llama Stack client. So, the call fails outside of unit tests: ``` INFO: 127.0.0.1:54024 - "POST /v1/post-training/supervised-fine-tune HTTP/1.1" 500 Internal Server Error 21:14:02.315 [END] /v1/post-training/supervised-fine-tune [StatusCode.OK] (71.18ms) 21:14:02.314 [ERROR] Error executing endpoint route='/v1/post-training/supervised-fine-tune' method='post' Traceback (most recent call last): File "/Users/jgulabrai/Projects/forks/llama-stack/llama_stack/distribution/server/server.py", line 205, in endpoint return await maybe_await(value) File "/Users/jgulabrai/Projects/forks/llama-stack/llama_stack/distribution/server/server.py", line 164, in maybe_await return await value File "/Users/jgulabrai/Projects/forks/llama-stack/llama_stack/providers/remote/post_training/nvidia/post_training.py", line 407, in supervised_fine_tune "adapter_dim": algorithm_config.get("adapter_dim"), File "/Users/jgulabrai/Projects/forks/llama-stack/.venv/lib/python3.10/site-packages/pydantic/main.py", line 891, in __getattr__ raise AttributeError(f'{type(self).__name__!r} object has no attribute {item!r}') AttributeError: 'LoraFinetuningConfig' object has no attribute 'get' ``` The code assumes `algorithm_config` should be `dict`, so I just handle both cases. [//]: # (If resolving an issue, uncomment and update the line below) [//]: # (Closes #[issue-number]) ## Test Plan 1. I ran a local Llama Stack server with the necessary env vars: ``` lama stack run llama_stack/templates/nvidia/run.yaml --port 8321 --env ... ``` And invoked `supervised_fine_tune` to confirm neither of the errors above occur. ``` client = LlamaStackClient( base_url="http://0.0.0.0:8321" ) response = client.post_training.supervised_fine_tune(...) ``` 2. I confirmed the unit tests still pass: `./scripts/unit-tests.sh tests/unit/providers/nvidia/test_supervised_fine_tuning.py` [//]: # (## Documentation) --------- Co-authored-by: Jash Gulabrai --- .../post_training/nvidia/post_training.py | 37 +++++------ .../unit/providers/nvidia/test_parameters.py | 65 ++++++++++--------- .../nvidia/test_supervised_fine_tuning.py | 47 +++++++++----- 3 files changed, 83 insertions(+), 66 deletions(-) diff --git a/llama_stack/providers/remote/post_training/nvidia/post_training.py b/llama_stack/providers/remote/post_training/nvidia/post_training.py index d3de930f7..c74fb2a24 100644 --- a/llama_stack/providers/remote/post_training/nvidia/post_training.py +++ b/llama_stack/providers/remote/post_training/nvidia/post_training.py @@ -67,13 +67,18 @@ class NvidiaPostTrainingAdapter(ModelRegistryHelper): self.timeout = aiohttp.ClientTimeout(total=config.timeout) # TODO: filter by available models based on /config endpoint ModelRegistryHelper.__init__(self, model_entries=_MODEL_ENTRIES) - self.session = aiohttp.ClientSession(headers=self.headers, timeout=self.timeout) - self.customizer_url = config.customizer_url + self.session = None + self.customizer_url = config.customizer_url if not self.customizer_url: warnings.warn("Customizer URL is not set, using default value: http://nemo.test", stacklevel=2) self.customizer_url = "http://nemo.test" + async def _get_session(self) -> aiohttp.ClientSession: + if self.session is None or self.session.closed: + self.session = aiohttp.ClientSession(headers=self.headers, timeout=self.timeout) + return self.session + async def _make_request( self, method: str, @@ -94,8 +99,9 @@ class NvidiaPostTrainingAdapter(ModelRegistryHelper): if json and "Content-Type" not in request_headers: request_headers["Content-Type"] = "application/json" + session = await self._get_session() for _ in range(self.config.max_retries): - async with self.session.request(method, url, params=params, json=json, **kwargs) as response: + async with session.request(method, url, params=params, json=json, **kwargs) as response: if response.status >= 400: error_data = await response.json() raise Exception(f"API request failed: {error_data}") @@ -122,8 +128,8 @@ class NvidiaPostTrainingAdapter(ModelRegistryHelper): jobs = [] for job in response.get("data", []): job_id = job.pop("id") - job_status = job.pop("status", "unknown").lower() - mapped_status = STATUS_MAPPING.get(job_status, "unknown") + job_status = job.pop("status", "scheduled").lower() + mapped_status = STATUS_MAPPING.get(job_status, "scheduled") # Convert string timestamps to datetime objects created_at = ( @@ -177,7 +183,7 @@ class NvidiaPostTrainingAdapter(ModelRegistryHelper): ) api_status = response.pop("status").lower() - mapped_status = STATUS_MAPPING.get(api_status, "unknown") + mapped_status = STATUS_MAPPING.get(api_status, "scheduled") return NvidiaPostTrainingJobStatusResponse( status=JobStatus(mapped_status), @@ -239,6 +245,7 @@ class NvidiaPostTrainingAdapter(ModelRegistryHelper): Supported models: - meta/llama-3.1-8b-instruct + - meta/llama-3.2-1b-instruct Supported algorithm configs: - LoRA, SFT @@ -284,10 +291,6 @@ class NvidiaPostTrainingAdapter(ModelRegistryHelper): - LoRA config: ## NeMo customizer specific LoRA parameters - - adapter_dim: int - Adapter dimension - Default: 8 (supports powers of 2) - - adapter_dropout: float - Adapter dropout - Default: None (0.0-1.0) - alpha: int - Scaling factor for the LoRA update Default: 16 Note: @@ -297,7 +300,7 @@ class NvidiaPostTrainingAdapter(ModelRegistryHelper): User is informed about unsupported parameters via warnings. """ # Map model to nvidia model name - # ToDo: only supports llama-3.1-8b-instruct now, need to update this to support other models + # See `_MODEL_ENTRIES` for supported models nvidia_model = self.get_provider_model_id(model) # Check for unsupported method parameters @@ -330,7 +333,7 @@ class NvidiaPostTrainingAdapter(ModelRegistryHelper): }, "data_config": {"dataset_id", "batch_size"}, "optimizer_config": {"lr", "weight_decay"}, - "lora_config": {"type", "adapter_dim", "adapter_dropout", "alpha"}, + "lora_config": {"type", "alpha"}, } # Validate all parameters at once @@ -389,16 +392,10 @@ class NvidiaPostTrainingAdapter(ModelRegistryHelper): # Handle LoRA-specific configuration if algorithm_config: - if isinstance(algorithm_config, dict) and algorithm_config.get("type") == "LoRA": + if algorithm_config.type == "LoRA": warn_unsupported_params(algorithm_config, supported_params["lora_config"], "LoRA config") job_config["hyperparameters"]["lora"] = { - k: v - for k, v in { - "adapter_dim": algorithm_config.get("adapter_dim"), - "alpha": algorithm_config.get("alpha"), - "adapter_dropout": algorithm_config.get("adapter_dropout"), - }.items() - if v is not None + k: v for k, v in {"alpha": algorithm_config.alpha}.items() if v is not None } else: raise NotImplementedError(f"Unsupported algorithm config: {algorithm_config}") diff --git a/tests/unit/providers/nvidia/test_parameters.py b/tests/unit/providers/nvidia/test_parameters.py index cb1b92fba..ea12122a0 100644 --- a/tests/unit/providers/nvidia/test_parameters.py +++ b/tests/unit/providers/nvidia/test_parameters.py @@ -10,14 +10,17 @@ import warnings from unittest.mock import patch import pytest -from llama_stack_client.types.algorithm_config_param import LoraFinetuningConfig -from llama_stack_client.types.post_training_supervised_fine_tune_params import ( - TrainingConfig, - TrainingConfigDataConfig, - TrainingConfigEfficiencyConfig, - TrainingConfigOptimizerConfig, -) +from llama_stack.apis.post_training.post_training import ( + DataConfig, + DatasetFormat, + EfficiencyConfig, + LoraFinetuningConfig, + OptimizerConfig, + OptimizerType, + TrainingConfig, +) +from llama_stack.distribution.library_client import convert_pydantic_to_json_value from llama_stack.providers.remote.post_training.nvidia.post_training import ( NvidiaPostTrainingAdapter, NvidiaPostTrainingConfig, @@ -66,11 +69,8 @@ class TestNvidiaParameters(unittest.TestCase): def test_customizer_parameters_passed(self): """Test scenario 1: When an optional parameter is passed and value is correctly set.""" - custom_adapter_dim = 32 # Different from default of 8 algorithm_config = LoraFinetuningConfig( type="LoRA", - adapter_dim=custom_adapter_dim, - adapter_dropout=0.2, apply_lora_to_mlp=True, apply_lora_to_output=True, alpha=16, @@ -78,8 +78,15 @@ class TestNvidiaParameters(unittest.TestCase): lora_attn_modules=["q_proj", "k_proj", "v_proj", "o_proj"], ) - data_config = TrainingConfigDataConfig(dataset_id="test-dataset", batch_size=16) - optimizer_config = TrainingConfigOptimizerConfig(lr=0.0002) + data_config = DataConfig( + dataset_id="test-dataset", batch_size=16, shuffle=False, data_format=DatasetFormat.instruct + ) + optimizer_config = OptimizerConfig( + optimizer_type=OptimizerType.adam, + lr=0.0002, + weight_decay=0.01, + num_warmup_steps=100, + ) training_config = TrainingConfig( n_epochs=3, data_config=data_config, @@ -95,7 +102,7 @@ class TestNvidiaParameters(unittest.TestCase): model="meta-llama/Llama-3.1-8B-Instruct", checkpoint_dir="", algorithm_config=algorithm_config, - training_config=training_config, + training_config=convert_pydantic_to_json_value(training_config), logger_config={}, hyperparam_search_config={}, ) @@ -114,7 +121,7 @@ class TestNvidiaParameters(unittest.TestCase): self._assert_request_params( { "hyperparameters": { - "lora": {"adapter_dim": custom_adapter_dim, "adapter_dropout": 0.2, "alpha": 16}, + "lora": {"alpha": 16}, "epochs": 3, "learning_rate": 0.0002, "batch_size": 16, @@ -130,8 +137,6 @@ class TestNvidiaParameters(unittest.TestCase): algorithm_config = LoraFinetuningConfig( type="LoRA", - adapter_dim=16, - adapter_dropout=0.1, apply_lora_to_mlp=True, apply_lora_to_output=True, alpha=16, @@ -139,12 +144,16 @@ class TestNvidiaParameters(unittest.TestCase): lora_attn_modules=["q_proj", "k_proj", "v_proj", "o_proj"], ) - data_config = TrainingConfigDataConfig( - dataset_id=required_dataset_id, # Required parameter - batch_size=8, + data_config = DataConfig( + dataset_id=required_dataset_id, batch_size=8, shuffle=False, data_format=DatasetFormat.instruct ) - optimizer_config = TrainingConfigOptimizerConfig(lr=0.0001) + optimizer_config = OptimizerConfig( + optimizer_type=OptimizerType.adam, + lr=0.0001, + weight_decay=0.01, + num_warmup_steps=100, + ) training_config = TrainingConfig( n_epochs=1, @@ -161,7 +170,7 @@ class TestNvidiaParameters(unittest.TestCase): model=required_model, # Required parameter checkpoint_dir="", algorithm_config=algorithm_config, - training_config=training_config, + training_config=convert_pydantic_to_json_value(training_config), logger_config={}, hyperparam_search_config={}, ) @@ -186,24 +195,24 @@ class TestNvidiaParameters(unittest.TestCase): def test_unsupported_parameters_warning(self): """Test that warnings are raised for unsupported parameters.""" - data_config = TrainingConfigDataConfig( + data_config = DataConfig( dataset_id="test-dataset", batch_size=8, # Unsupported parameters shuffle=True, - data_format="instruct", + data_format=DatasetFormat.instruct, validation_dataset_id="val-dataset", ) - optimizer_config = TrainingConfigOptimizerConfig( + optimizer_config = OptimizerConfig( lr=0.0001, weight_decay=0.01, # Unsupported parameters - optimizer_type="adam", + optimizer_type=OptimizerType.adam, num_warmup_steps=100, ) - efficiency_config = TrainingConfigEfficiencyConfig( + efficiency_config = EfficiencyConfig( enable_activation_checkpointing=True # Unsupported parameter ) @@ -230,15 +239,13 @@ class TestNvidiaParameters(unittest.TestCase): checkpoint_dir="test-dir", # Unsupported parameter algorithm_config=LoraFinetuningConfig( type="LoRA", - adapter_dim=16, - adapter_dropout=0.1, apply_lora_to_mlp=True, apply_lora_to_output=True, alpha=16, rank=16, lora_attn_modules=["q_proj", "k_proj", "v_proj", "o_proj"], ), - training_config=training_config, + training_config=convert_pydantic_to_json_value(training_config), logger_config={"test": "value"}, # Unsupported parameter hyperparam_search_config={"test": "value"}, # Unsupported parameter ) diff --git a/tests/unit/providers/nvidia/test_supervised_fine_tuning.py b/tests/unit/providers/nvidia/test_supervised_fine_tuning.py index 09f67e4e6..319011be3 100644 --- a/tests/unit/providers/nvidia/test_supervised_fine_tuning.py +++ b/tests/unit/providers/nvidia/test_supervised_fine_tuning.py @@ -10,14 +10,18 @@ import warnings from unittest.mock import patch import pytest -from llama_stack_client.types.algorithm_config_param import LoraFinetuningConfig, QatFinetuningConfig -from llama_stack_client.types.post_training_supervised_fine_tune_params import ( - TrainingConfig, - TrainingConfigDataConfig, - TrainingConfigOptimizerConfig, -) from llama_stack.apis.models import Model, ModelType +from llama_stack.apis.post_training.post_training import ( + DataConfig, + DatasetFormat, + LoraFinetuningConfig, + OptimizerConfig, + OptimizerType, + QATFinetuningConfig, + TrainingConfig, +) +from llama_stack.distribution.library_client import convert_pydantic_to_json_value from llama_stack.providers.remote.inference.nvidia.nvidia import NVIDIAConfig, NVIDIAInferenceAdapter from llama_stack.providers.remote.post_training.nvidia.post_training import ( ListNvidiaPostTrainingJobs, @@ -121,7 +125,7 @@ class TestNvidiaPostTraining(unittest.TestCase): "batch_size": 16, "epochs": 2, "learning_rate": 0.0001, - "lora": {"adapter_dim": 16, "adapter_dropout": 0.1}, + "lora": {"alpha": 16}, }, "output_model": "default/job-1234", "status": "created", @@ -132,8 +136,6 @@ class TestNvidiaPostTraining(unittest.TestCase): algorithm_config = LoraFinetuningConfig( type="LoRA", - adapter_dim=16, - adapter_dropout=0.1, apply_lora_to_mlp=True, apply_lora_to_output=True, alpha=16, @@ -141,10 +143,15 @@ class TestNvidiaPostTraining(unittest.TestCase): lora_attn_modules=["q_proj", "k_proj", "v_proj", "o_proj"], ) - data_config = TrainingConfigDataConfig(dataset_id="sample-basic-test", batch_size=16) + data_config = DataConfig( + dataset_id="sample-basic-test", batch_size=16, shuffle=False, data_format=DatasetFormat.instruct + ) - optimizer_config = TrainingConfigOptimizerConfig( + optimizer_config = OptimizerConfig( + optimizer_type=OptimizerType.adam, lr=0.0001, + weight_decay=0.01, + num_warmup_steps=100, ) training_config = TrainingConfig( @@ -161,7 +168,7 @@ class TestNvidiaPostTraining(unittest.TestCase): model="meta-llama/Llama-3.1-8B-Instruct", checkpoint_dir="", algorithm_config=algorithm_config, - training_config=training_config, + training_config=convert_pydantic_to_json_value(training_config), logger_config={}, hyperparam_search_config={}, ) @@ -185,16 +192,22 @@ class TestNvidiaPostTraining(unittest.TestCase): "epochs": 2, "batch_size": 16, "learning_rate": 0.0001, - "lora": {"alpha": 16, "adapter_dim": 16, "adapter_dropout": 0.1}, + "weight_decay": 0.01, + "lora": {"alpha": 16}, }, }, ) def test_supervised_fine_tune_with_qat(self): - algorithm_config = QatFinetuningConfig(type="QAT", quantizer_name="quantizer_name", group_size=1) - data_config = TrainingConfigDataConfig(dataset_id="sample-basic-test", batch_size=16) - optimizer_config = TrainingConfigOptimizerConfig( + algorithm_config = QATFinetuningConfig(type="QAT", quantizer_name="quantizer_name", group_size=1) + data_config = DataConfig( + dataset_id="sample-basic-test", batch_size=16, shuffle=False, data_format=DatasetFormat.instruct + ) + optimizer_config = OptimizerConfig( + optimizer_type=OptimizerType.adam, lr=0.0001, + weight_decay=0.01, + num_warmup_steps=100, ) training_config = TrainingConfig( n_epochs=2, @@ -209,7 +222,7 @@ class TestNvidiaPostTraining(unittest.TestCase): model="meta-llama/Llama-3.1-8B-Instruct", checkpoint_dir="", algorithm_config=algorithm_config, - training_config=training_config, + training_config=convert_pydantic_to_json_value(training_config), logger_config={}, hyperparam_search_config={}, ) From bb1a85c9a0b34f5ecafacf4465dcad62737d0cb2 Mon Sep 17 00:00:00 2001 From: Ashwin Bharambe Date: Fri, 25 Apr 2025 15:23:53 -0700 Subject: [PATCH 7/8] fix: make sure test works equally well against llama stack as a server --- tests/integration/tool_runtime/test_registration.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/tool_runtime/test_registration.py b/tests/integration/tool_runtime/test_registration.py index e4241d813..b36237d05 100644 --- a/tests/integration/tool_runtime/test_registration.py +++ b/tests/integration/tool_runtime/test_registration.py @@ -114,7 +114,7 @@ def test_register_and_unregister_toolgroup(llama_stack_client, mcp_server): llama_stack_client.toolgroups.unregister(toolgroup_id=test_toolgroup_id) # Verify it is unregistered - with pytest.raises(ValueError, match=f"Tool group '{test_toolgroup_id}' not found"): + with pytest.raises(Exception, match=f"Tool group '{test_toolgroup_id}' not found"): llama_stack_client.toolgroups.get(toolgroup_id=test_toolgroup_id) # Verify tools are also unregistered From 0266b20535c6d3a7e2918161c8e0a7804cc08d44 Mon Sep 17 00:00:00 2001 From: ehhuang Date: Fri, 25 Apr 2025 15:52:15 -0700 Subject: [PATCH 8/8] docs: update prompt_format.md for llama4 (#2035) torchrun --nproc_per_node=8 scripts/generate_prompt_format.py meta-llama/Llama-4-Scout-17B-16E-Instruct ~/local/checkpoints// llama_stack.models.llama.llama4.prompts llama_stack/models/llama/llama4/prompt_format.md Co-authored-by: Eric Huang --- .../models/llama/llama4/prompt_format.md | 62 ++++++++++++++----- llama_stack/models/llama/llama4/prompts.py | 40 +++--------- 2 files changed, 54 insertions(+), 48 deletions(-) diff --git a/llama_stack/models/llama/llama4/prompt_format.md b/llama_stack/models/llama/llama4/prompt_format.md index 698571093..350a5517a 100644 --- a/llama_stack/models/llama/llama4/prompt_format.md +++ b/llama_stack/models/llama/llama4/prompt_format.md @@ -64,7 +64,7 @@ This example passes an image that is smaller than the tile size, to show the til ##### Model Response Format ``` -The image depicts a dog standing on a skateboard, with its front paws positioned on the board and its back paws hanging off the back. The dog has a distinctive coat pattern, featuring a white face, brown and black fur, and white paws, and is standing on a skateboard with red wheels, set against a blurred background of a street or alleyway with a teal door and beige wall.<|eot|> +The image depicts a dog standing on a skateboard, positioned centrally and facing the camera directly. The dog has a distinctive coat pattern featuring white, black, and brown fur, with floppy ears and a black nose, and is standing on a skateboard with red wheels.<|eot|> ``` @@ -91,7 +91,7 @@ Here is an example of how to pass an image to the model ##### Model Response Format ``` -This image shows a dog standing on a skateboard, with its front paws positioned near the front of the board and its back paws near the back. The dog has a white, black, and orange coat, and is standing on a gray skateboard with red wheels, in front of a blurred background that appears to be a street or alleyway.<|eot|> +The image depicts a dog standing on a skateboard, with the dog positioned centrally and facing forward. The dog has a distinctive coat featuring a mix of white, brown, and black fur, and is wearing a collar as it stands on the skateboard, which has red wheels.<|eot|> ``` @@ -117,7 +117,7 @@ Here is an example of how to pass an image to the model ##### Model Response Format ``` -The first image shows a dog standing on a skateboard, while the second image shows a plate of spaghetti with tomato sauce, parmesan cheese, and parsley. The two images are unrelated, with the first image featuring a dog and the second image featuring a food dish, and they do not share any common elements or themes.<|eot|> +The first image features a dog standing on a skateboard, while the second image showcases a plate of spaghetti with tomato sauce and cheese. The two images appear to be unrelated, with one depicting a playful scene of a dog on a skateboard and the other presenting a classic Italian dish.<|eom|> ``` @@ -135,13 +135,44 @@ We are continuing the format for zero shot function calling used in previous ver ``` <|begin_of_text|><|header_start|>system<|header_end|> -You are an expert in composing functions. You are given a question and a set of possible functions. -Based on the question, you will need to make one or more function/tool calls to achieve the purpose. -If none of the function can be used, point it out. If the given question lacks the parameters required by the function, -also point it out. You should only return the function call in tools call sections. +You are a helpful assistant and an expert in function composition. You can answer general questions using your internal knowledge OR invoke functions when necessary. Follow these strict guidelines: + +1. FUNCTION CALLS: +- ONLY use functions that are EXPLICITLY listed in the function list below +- If NO functions are listed (empty function list []), respond ONLY with internal knowledge or "I don't have access to [Unavailable service] information" +- If a function is not in the list, respond ONLY with internal knowledge or "I don't have access to [Unavailable service] information" +- If ALL required parameters are present AND the query EXACTLY matches a listed function's purpose: output ONLY the function call(s) +- Use exact format: [func_name1(param1=value1, param2=value2), func_name2(...)] +Examples: +CORRECT: [get_weather(location="Vancouver"), calculate_route(start="Boston", end="New York")] <- Only if get_weather and calculate_route are in function list +INCORRECT: get_weather(location="New York") +INCORRECT: Let me check the weather: [get_weather(location="New York")] +INCORRECT: [get_events(location="Singapore")] <- If function not in list + +2. RESPONSE RULES: +- For pure function requests matching a listed function: ONLY output the function call(s) +- For knowledge questions: ONLY output text +- For missing parameters: ONLY request the specific missing parameters +- For unavailable services (not in function list): output ONLY with internal knowledge or "I don't have access to [Unavailable service] information". Do NOT execute a function call. +- If the query asks for information beyond what a listed function provides: output ONLY with internal knowledge about your limitations +- NEVER combine text and function calls in the same response +- NEVER suggest alternative functions when the requested service is unavailable +- NEVER create or invent new functions not listed below + +3. STRICT BOUNDARIES: +- ONLY use functions from the list below - no exceptions +- NEVER use a function as an alternative to unavailable information +- NEVER call functions not present in the function list +- NEVER add explanatory text to function calls +- NEVER respond with empty brackets +- Use proper Python/JSON syntax for function calls +- Check the function list carefully before responding + +4. TOOL RESPONSE HANDLING: +- When receiving tool responses: provide concise, natural language responses +- Don't repeat tool response verbatim +- Don't add supplementary information -If you decide to invoke any of the function(s), you MUST put it in the format of [func_name1(params_name1=params_value1, params_name2=params_value2...), func_name2(params)] -You SHOULD NOT include any other text in the response. Here is a list of functions in JSON format that you can invoke. @@ -151,9 +182,7 @@ Here is a list of functions in JSON format that you can invoke. "description": "Get weather info for places", "parameters": { "type": "dict", - "required": [ - "city" - ], + "required": ["city"], "properties": { "city": { "type": "string", @@ -167,7 +196,10 @@ Here is a list of functions in JSON format that you can invoke. } } } -<|eot|><|header_start|>user<|header_end|> +] + +You can answer general questions or invoke tools when necessary. +In addition to tool calls, you should also augment your responses by using the tool outputs.<|eot|><|header_start|>user<|header_end|> What is the weather in SF and Seattle?<|eot|><|header_start|>assistant<|header_end|> @@ -176,7 +208,7 @@ What is the weather in SF and Seattle?<|eot|><|header_start|>assistant<|header_e ##### Model Response Format ``` -[get_weather(city='SF'), get_weather(city='Seattle')]<|eot|> +[get_weather(city="San Francisco"), get_weather(city="Seattle")]<|eot|> ``` @@ -273,5 +305,5 @@ Use tools to get latest trending songs<|eot|><|header_start|>assistant<|header_e ##### Model Response Format ``` -{"n": "10"}<|eot|> +{"n": 10}<|eot|> ``` diff --git a/llama_stack/models/llama/llama4/prompts.py b/llama_stack/models/llama/llama4/prompts.py index 13b96359a..fe9a59130 100644 --- a/llama_stack/models/llama/llama4/prompts.py +++ b/llama_stack/models/llama/llama4/prompts.py @@ -9,6 +9,10 @@ from io import BytesIO from pathlib import Path from typing import List +from llama_stack.models.llama.llama4.prompt_templates.system_prompts import ( + PythonListCustomToolGenerator, +) + from ..datatypes import RawMediaItem, RawMessage, RawTextItem from ..prompt_format import ( Llama4UseCase, @@ -177,39 +181,9 @@ def usecases(base_model: bool = False) -> List[UseCase | str]: [ RawMessage( role="system", - content="""You are an expert in composing functions. You are given a question and a set of possible functions. -Based on the question, you will need to make one or more function/tool calls to achieve the purpose. -If none of the function can be used, point it out. If the given question lacks the parameters required by the function, -also point it out. You should only return the function call in tools call sections. - -If you decide to invoke any of the function(s), you MUST put it in the format of [func_name1(params_name1=params_value1, params_name2=params_value2...), func_name2(params)] -You SHOULD NOT include any other text in the response. - -Here is a list of functions in JSON format that you can invoke. - -[ - { - "name": "get_weather", - "description": "Get weather info for places", - "parameters": { - "type": "dict", - "required": [ - "city" - ], - "properties": { - "city": { - "type": "string", - "description": "The name of the city to get the weather for" - }, - "metric": { - "type": "string", - "description": "The metric for weather. Options are: celsius, fahrenheit", - "default": "celsius" - } - } - } - } -""", + content=PythonListCustomToolGenerator() + .gen(PythonListCustomToolGenerator().data_examples()[0]) + .render(), ), RawMessage( role="user",