mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-05 10:13:05 +00:00
appease the lint gods
This commit is contained in:
parent
f436348124
commit
53f29eb8b9
2 changed files with 8 additions and 22 deletions
|
@ -76,7 +76,7 @@ Notice the structure of the image section:
|
||||||
<|image_start|><|image|><|patch|>...<|patch|><|image_end|>
|
<|image_start|><|image|><|patch|>...<|patch|><|image_end|>
|
||||||
```
|
```
|
||||||
This is due to the image being smaller than the tile size.
|
This is due to the image being smaller than the tile size.
|
||||||
|
|
||||||
|
|
||||||
## Single image prompt format
|
## Single image prompt format
|
||||||
|
|
||||||
|
@ -102,7 +102,7 @@ With a bigger image, the image will include the tile separator tokens. Additiona
|
||||||
```
|
```
|
||||||
<|image_start|><|patch|>...<|patch|><|tile_x_separator|><|patch|>...<|patch|><|tile_y_separator|><|patch|>...<|patch|><|image|><|patch|>...<|patch|><|image_end|>
|
<|image_start|><|patch|>...<|patch|><|tile_x_separator|><|patch|>...<|patch|><|tile_y_separator|><|patch|>...<|patch|><|image|><|patch|>...<|patch|><|image_end|>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
## Multiple images prompt format
|
## Multiple images prompt format
|
||||||
|
|
||||||
|
@ -321,5 +321,3 @@ The top 2 latest trending songs are:
|
||||||
|
|
||||||
- Tool outputs should be passed back to the model in the `tool` (a.k.a. `ipython`) role.
|
- Tool outputs should be passed back to the model in the `tool` (a.k.a. `ipython`) role.
|
||||||
- The model parses the tool output contents until it encounters the `<|eom|>` tag. It uses this to synthesize an appropriate response to the query.
|
- The model parses the tool output contents until it encounters the `<|eom|>` tag. It uses this to synthesize an appropriate response to the query.
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -74,17 +74,13 @@ def usecases(base_model: bool = False) -> List[UseCase | str]:
|
||||||
Llama4UseCase(
|
Llama4UseCase(
|
||||||
title="Text completion - Paris information",
|
title="Text completion - Paris information",
|
||||||
description="Text completion for Llama 4 base model uses this format.",
|
description="Text completion for Llama 4 base model uses this format.",
|
||||||
dialogs=[
|
dialogs=[TextCompletionContent(content="The capital of France is Paris")],
|
||||||
TextCompletionContent(content="The capital of France is Paris")
|
|
||||||
],
|
|
||||||
),
|
),
|
||||||
Llama4UseCase(
|
Llama4UseCase(
|
||||||
title="Text completion - The color of the sky",
|
title="Text completion - The color of the sky",
|
||||||
description="Text completion for Llama 4 base model uses this format.",
|
description="Text completion for Llama 4 base model uses this format.",
|
||||||
dialogs=[
|
dialogs=[
|
||||||
TextCompletionContent(
|
TextCompletionContent(content="The color of the sky is blue but sometimes it can also be")
|
||||||
content="The color of the sky is blue but sometimes it can also be"
|
|
||||||
)
|
|
||||||
],
|
],
|
||||||
notes="",
|
notes="",
|
||||||
),
|
),
|
||||||
|
@ -111,9 +107,7 @@ def usecases(base_model: bool = False) -> List[UseCase | str]:
|
||||||
description="Here is a regular multi-turn user assistant conversation and how its formatted.",
|
description="Here is a regular multi-turn user assistant conversation and how its formatted.",
|
||||||
dialogs=[
|
dialogs=[
|
||||||
[
|
[
|
||||||
RawMessage(
|
RawMessage(role="system", content="You are a helpful assistant"),
|
||||||
role="system", content="You are a helpful assistant"
|
|
||||||
),
|
|
||||||
RawMessage(
|
RawMessage(
|
||||||
role="user",
|
role="user",
|
||||||
content="Answer who are you in the form of jeopardy?",
|
content="Answer who are you in the form of jeopardy?",
|
||||||
|
@ -133,9 +127,7 @@ def usecases(base_model: bool = False) -> List[UseCase | str]:
|
||||||
role="user",
|
role="user",
|
||||||
content=[
|
content=[
|
||||||
RawMediaItem(data=BytesIO(img_small_dog)),
|
RawMediaItem(data=BytesIO(img_small_dog)),
|
||||||
RawTextItem(
|
RawTextItem(text="Describe this image in two sentences"),
|
||||||
text="Describe this image in two sentences"
|
|
||||||
),
|
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
|
@ -157,9 +149,7 @@ def usecases(base_model: bool = False) -> List[UseCase | str]:
|
||||||
role="user",
|
role="user",
|
||||||
content=[
|
content=[
|
||||||
RawMediaItem(data=BytesIO(img_dog)),
|
RawMediaItem(data=BytesIO(img_dog)),
|
||||||
RawTextItem(
|
RawTextItem(text="Describe this image in two sentences"),
|
||||||
text="Describe this image in two sentences"
|
|
||||||
),
|
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
|
@ -181,9 +171,7 @@ def usecases(base_model: bool = False) -> List[UseCase | str]:
|
||||||
content=[
|
content=[
|
||||||
RawMediaItem(data=BytesIO(img_dog)),
|
RawMediaItem(data=BytesIO(img_dog)),
|
||||||
RawMediaItem(data=BytesIO(img_pasta)),
|
RawMediaItem(data=BytesIO(img_pasta)),
|
||||||
RawTextItem(
|
RawTextItem(text="Describe these images in two sentences"),
|
||||||
text="Describe these images in two sentences"
|
|
||||||
),
|
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue