From 53f29eb8b96be078db69dd0e278fd61700076462 Mon Sep 17 00:00:00 2001 From: Suraj Subramanian Date: Mon, 7 Apr 2025 11:31:02 -0700 Subject: [PATCH] appease the lint gods --- .../models/llama/llama4/prompt_format.md | 6 ++--- llama_stack/models/llama/llama4/prompts.py | 24 +++++-------------- 2 files changed, 8 insertions(+), 22 deletions(-) diff --git a/llama_stack/models/llama/llama4/prompt_format.md b/llama_stack/models/llama/llama4/prompt_format.md index 6774b720d..c98b773a1 100644 --- a/llama_stack/models/llama/llama4/prompt_format.md +++ b/llama_stack/models/llama/llama4/prompt_format.md @@ -76,7 +76,7 @@ Notice the structure of the image section: <|image_start|><|image|><|patch|>...<|patch|><|image_end|> ``` This is due to the image being smaller than the tile size. - + ## Single image prompt format @@ -102,7 +102,7 @@ With a bigger image, the image will include the tile separator tokens. Additiona ``` <|image_start|><|patch|>...<|patch|><|tile_x_separator|><|patch|>...<|patch|><|tile_y_separator|><|patch|>...<|patch|><|image|><|patch|>...<|patch|><|image_end|> ``` - + ## Multiple images prompt format @@ -321,5 +321,3 @@ The top 2 latest trending songs are: - Tool outputs should be passed back to the model in the `tool` (a.k.a. `ipython`) role. - The model parses the tool output contents until it encounters the `<|eom|>` tag. It uses this to synthesize an appropriate response to the query. - - diff --git a/llama_stack/models/llama/llama4/prompts.py b/llama_stack/models/llama/llama4/prompts.py index 5025482eb..56b3f5303 100644 --- a/llama_stack/models/llama/llama4/prompts.py +++ b/llama_stack/models/llama/llama4/prompts.py @@ -74,17 +74,13 @@ def usecases(base_model: bool = False) -> List[UseCase | str]: Llama4UseCase( title="Text completion - Paris information", description="Text completion for Llama 4 base model uses this format.", - dialogs=[ - TextCompletionContent(content="The capital of France is Paris") - ], + dialogs=[TextCompletionContent(content="The capital of France is Paris")], ), Llama4UseCase( title="Text completion - The color of the sky", description="Text completion for Llama 4 base model uses this format.", dialogs=[ - TextCompletionContent( - content="The color of the sky is blue but sometimes it can also be" - ) + TextCompletionContent(content="The color of the sky is blue but sometimes it can also be") ], notes="", ), @@ -111,9 +107,7 @@ def usecases(base_model: bool = False) -> List[UseCase | str]: description="Here is a regular multi-turn user assistant conversation and how its formatted.", dialogs=[ [ - RawMessage( - role="system", content="You are a helpful assistant" - ), + RawMessage(role="system", content="You are a helpful assistant"), RawMessage( role="user", content="Answer who are you in the form of jeopardy?", @@ -133,9 +127,7 @@ def usecases(base_model: bool = False) -> List[UseCase | str]: role="user", content=[ RawMediaItem(data=BytesIO(img_small_dog)), - RawTextItem( - text="Describe this image in two sentences" - ), + RawTextItem(text="Describe this image in two sentences"), ], ) ] @@ -157,9 +149,7 @@ def usecases(base_model: bool = False) -> List[UseCase | str]: role="user", content=[ RawMediaItem(data=BytesIO(img_dog)), - RawTextItem( - text="Describe this image in two sentences" - ), + RawTextItem(text="Describe this image in two sentences"), ], ) ] @@ -181,9 +171,7 @@ def usecases(base_model: bool = False) -> List[UseCase | str]: content=[ RawMediaItem(data=BytesIO(img_dog)), RawMediaItem(data=BytesIO(img_pasta)), - RawTextItem( - text="Describe these images in two sentences" - ), + RawTextItem(text="Describe these images in two sentences"), ], ) ]