fix: notebook vision inference (#1423)

# What does this PR do?

- update to use library client throughout 

cc @jeffxtang 

[//]: # (If resolving an issue, uncomment and update the line below)
[//]: # (Closes #[issue-number])

## Test Plan
```
pytest -v -s --nbval-lax ./docs/getting_started.ipynb
```

[//]: # (## Documentation)
This commit is contained in:
Xi Yan 2025-03-06 13:40:21 -08:00 committed by GitHub
parent 46bc5f4a7a
commit 1a95271fab
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -141,7 +141,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 1, "execution_count": 18,
"id": "E1UFuJC570Tk", "id": "E1UFuJC570Tk",
"metadata": { "metadata": {
"colab": { "colab": {
@ -326,54 +326,108 @@
" type: sqlite\n", " type: sqlite\n",
"models:\n", "models:\n",
"- metadata: <span style=\"font-weight: bold\">{}</span>\n", "- metadata: <span style=\"font-weight: bold\">{}</span>\n",
" model_id: meta-llama/Meta-Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-8B-Instruct-Turbo\n",
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
" - llm\n",
" provider_id: together\n",
" provider_model_id: meta-llama/Meta-Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-8B-Instruct-Turbo\n",
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
" model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-8B-Instruct\n", " model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-8B-Instruct\n",
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
" - llm\n", " - llm\n",
" provider_id: together\n", " provider_id: together\n",
" provider_model_id: meta-llama/Meta-Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-8B-Instruct-Turbo\n", " provider_model_id: meta-llama/Meta-Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-8B-Instruct-Turbo\n",
"- metadata: <span style=\"font-weight: bold\">{}</span>\n", "- metadata: <span style=\"font-weight: bold\">{}</span>\n",
" model_id: meta-llama/Meta-Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-70B-Instruct-Turbo\n",
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
" - llm\n",
" provider_id: together\n",
" provider_model_id: meta-llama/Meta-Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-70B-Instruct-Turbo\n",
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
" model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-70B-Instruct\n", " model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-70B-Instruct\n",
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
" - llm\n", " - llm\n",
" provider_id: together\n", " provider_id: together\n",
" provider_model_id: meta-llama/Meta-Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-70B-Instruct-Turbo\n", " provider_model_id: meta-llama/Meta-Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-70B-Instruct-Turbo\n",
"- metadata: <span style=\"font-weight: bold\">{}</span>\n", "- metadata: <span style=\"font-weight: bold\">{}</span>\n",
" model_id: meta-llama/Meta-Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-405B-Instruct-Turbo\n",
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
" - llm\n",
" provider_id: together\n",
" provider_model_id: meta-llama/Meta-Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-405B-Instruct-Turbo\n",
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
" model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-405B-Instruct-FP8\n", " model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-405B-Instruct-FP8\n",
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
" - llm\n", " - llm\n",
" provider_id: together\n", " provider_id: together\n",
" provider_model_id: meta-llama/Meta-Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-405B-Instruct-Turbo\n", " provider_model_id: meta-llama/Meta-Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-405B-Instruct-Turbo\n",
"- metadata: <span style=\"font-weight: bold\">{}</span>\n", "- metadata: <span style=\"font-weight: bold\">{}</span>\n",
" model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-3B-Instruct-Turbo\n",
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
" - llm\n",
" provider_id: together\n",
" provider_model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-3B-Instruct-Turbo\n",
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
" model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-3B-Instruct\n", " model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-3B-Instruct\n",
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
" - llm\n", " - llm\n",
" provider_id: together\n", " provider_id: together\n",
" provider_model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-3B-Instruct-Turbo\n", " provider_model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-3B-Instruct-Turbo\n",
"- metadata: <span style=\"font-weight: bold\">{}</span>\n", "- metadata: <span style=\"font-weight: bold\">{}</span>\n",
" model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-11B-Vision-Instruct-Turbo\n",
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
" - llm\n",
" provider_id: together\n",
" provider_model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-11B-Vision-Instruct-Turbo\n",
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
" model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-11B-Vision-Instruct\n", " model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-11B-Vision-Instruct\n",
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
" - llm\n", " - llm\n",
" provider_id: together\n", " provider_id: together\n",
" provider_model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-11B-Vision-Instruct-Turbo\n", " provider_model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-11B-Vision-Instruct-Turbo\n",
"- metadata: <span style=\"font-weight: bold\">{}</span>\n", "- metadata: <span style=\"font-weight: bold\">{}</span>\n",
" model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-90B-Vision-Instruct-Turbo\n",
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
" - llm\n",
" provider_id: together\n",
" provider_model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-90B-Vision-Instruct-Turbo\n",
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
" model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-90B-Vision-Instruct\n", " model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-90B-Vision-Instruct\n",
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
" - llm\n", " - llm\n",
" provider_id: together\n", " provider_id: together\n",
" provider_model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-90B-Vision-Instruct-Turbo\n", " provider_model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-90B-Vision-Instruct-Turbo\n",
"- metadata: <span style=\"font-weight: bold\">{}</span>\n", "- metadata: <span style=\"font-weight: bold\">{}</span>\n",
" model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.3</span>-70B-Instruct-Turbo\n",
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
" - llm\n",
" provider_id: together\n",
" provider_model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.3</span>-70B-Instruct-Turbo\n",
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
" model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.3</span>-70B-Instruct\n", " model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.3</span>-70B-Instruct\n",
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
" - llm\n", " - llm\n",
" provider_id: together\n", " provider_id: together\n",
" provider_model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.3</span>-70B-Instruct-Turbo\n", " provider_model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.3</span>-70B-Instruct-Turbo\n",
"- metadata: <span style=\"font-weight: bold\">{}</span>\n", "- metadata: <span style=\"font-weight: bold\">{}</span>\n",
" model_id: meta-llama/Meta-Llama-Guard-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3</span>-8B\n",
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
" - llm\n",
" provider_id: together\n",
" provider_model_id: meta-llama/Meta-Llama-Guard-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3</span>-8B\n",
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
" model_id: meta-llama/Llama-Guard-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3</span>-8B\n", " model_id: meta-llama/Llama-Guard-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3</span>-8B\n",
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
" - llm\n", " - llm\n",
" provider_id: together\n", " provider_id: together\n",
" provider_model_id: meta-llama/Meta-Llama-Guard-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3</span>-8B\n", " provider_model_id: meta-llama/Meta-Llama-Guard-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3</span>-8B\n",
"- metadata: <span style=\"font-weight: bold\">{}</span>\n", "- metadata: <span style=\"font-weight: bold\">{}</span>\n",
" model_id: meta-llama/Llama-Guard-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3</span>-11B-Vision-Turbo\n",
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
" - llm\n",
" provider_id: together\n",
" provider_model_id: meta-llama/Llama-Guard-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3</span>-11B-Vision-Turbo\n",
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
" model_id: meta-llama/Llama-Guard-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3</span>-11B-Vision\n", " model_id: meta-llama/Llama-Guard-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3</span>-11B-Vision\n",
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
" - llm\n", " - llm\n",
@ -473,6 +527,9 @@
" - config: <span style=\"font-weight: bold\">{}</span>\n", " - config: <span style=\"font-weight: bold\">{}</span>\n",
" provider_id: model-context-protocol\n", " provider_id: model-context-protocol\n",
" provider_type: remote::model-context-protocol\n", " provider_type: remote::model-context-protocol\n",
" - config: <span style=\"font-weight: bold\">{}</span>\n",
" provider_id: wolfram-alpha\n",
" provider_type: remote::wolfram-alpha\n",
" vector_io:\n", " vector_io:\n",
" - config:\n", " - config:\n",
" kvstore:\n", " kvstore:\n",
@ -504,6 +561,10 @@
" mcp_endpoint: null\n", " mcp_endpoint: null\n",
" provider_id: code-interpreter\n", " provider_id: code-interpreter\n",
" toolgroup_id: builtin::code_interpreter\n", " toolgroup_id: builtin::code_interpreter\n",
"- args: null\n",
" mcp_endpoint: null\n",
" provider_id: wolfram-alpha\n",
" toolgroup_id: builtin::wolfram_alpha\n",
"vector_dbs: <span style=\"font-weight: bold\">[]</span>\n", "vector_dbs: <span style=\"font-weight: bold\">[]</span>\n",
"version: <span style=\"color: #008000; text-decoration-color: #008000\">'2'</span>\n", "version: <span style=\"color: #008000; text-decoration-color: #008000\">'2'</span>\n",
"\n", "\n",
@ -530,54 +591,108 @@
" type: sqlite\n", " type: sqlite\n",
"models:\n", "models:\n",
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n", "- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
" model_id: meta-llama/Meta-Llama-\u001b[1;36m3.1\u001b[0m-8B-Instruct-Turbo\n",
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
" - llm\n",
" provider_id: together\n",
" provider_model_id: meta-llama/Meta-Llama-\u001b[1;36m3.1\u001b[0m-8B-Instruct-Turbo\n",
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
" model_id: meta-llama/Llama-\u001b[1;36m3.1\u001b[0m-8B-Instruct\n", " model_id: meta-llama/Llama-\u001b[1;36m3.1\u001b[0m-8B-Instruct\n",
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
" - llm\n", " - llm\n",
" provider_id: together\n", " provider_id: together\n",
" provider_model_id: meta-llama/Meta-Llama-\u001b[1;36m3.1\u001b[0m-8B-Instruct-Turbo\n", " provider_model_id: meta-llama/Meta-Llama-\u001b[1;36m3.1\u001b[0m-8B-Instruct-Turbo\n",
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n", "- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
" model_id: meta-llama/Meta-Llama-\u001b[1;36m3.1\u001b[0m-70B-Instruct-Turbo\n",
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
" - llm\n",
" provider_id: together\n",
" provider_model_id: meta-llama/Meta-Llama-\u001b[1;36m3.1\u001b[0m-70B-Instruct-Turbo\n",
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
" model_id: meta-llama/Llama-\u001b[1;36m3.1\u001b[0m-70B-Instruct\n", " model_id: meta-llama/Llama-\u001b[1;36m3.1\u001b[0m-70B-Instruct\n",
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
" - llm\n", " - llm\n",
" provider_id: together\n", " provider_id: together\n",
" provider_model_id: meta-llama/Meta-Llama-\u001b[1;36m3.1\u001b[0m-70B-Instruct-Turbo\n", " provider_model_id: meta-llama/Meta-Llama-\u001b[1;36m3.1\u001b[0m-70B-Instruct-Turbo\n",
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n", "- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
" model_id: meta-llama/Meta-Llama-\u001b[1;36m3.1\u001b[0m-405B-Instruct-Turbo\n",
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
" - llm\n",
" provider_id: together\n",
" provider_model_id: meta-llama/Meta-Llama-\u001b[1;36m3.1\u001b[0m-405B-Instruct-Turbo\n",
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
" model_id: meta-llama/Llama-\u001b[1;36m3.1\u001b[0m-405B-Instruct-FP8\n", " model_id: meta-llama/Llama-\u001b[1;36m3.1\u001b[0m-405B-Instruct-FP8\n",
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
" - llm\n", " - llm\n",
" provider_id: together\n", " provider_id: together\n",
" provider_model_id: meta-llama/Meta-Llama-\u001b[1;36m3.1\u001b[0m-405B-Instruct-Turbo\n", " provider_model_id: meta-llama/Meta-Llama-\u001b[1;36m3.1\u001b[0m-405B-Instruct-Turbo\n",
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n", "- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
" model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-3B-Instruct-Turbo\n",
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
" - llm\n",
" provider_id: together\n",
" provider_model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-3B-Instruct-Turbo\n",
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
" model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-3B-Instruct\n", " model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-3B-Instruct\n",
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
" - llm\n", " - llm\n",
" provider_id: together\n", " provider_id: together\n",
" provider_model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-3B-Instruct-Turbo\n", " provider_model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-3B-Instruct-Turbo\n",
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n", "- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
" model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-11B-Vision-Instruct-Turbo\n",
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
" - llm\n",
" provider_id: together\n",
" provider_model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-11B-Vision-Instruct-Turbo\n",
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
" model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-11B-Vision-Instruct\n", " model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-11B-Vision-Instruct\n",
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
" - llm\n", " - llm\n",
" provider_id: together\n", " provider_id: together\n",
" provider_model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-11B-Vision-Instruct-Turbo\n", " provider_model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-11B-Vision-Instruct-Turbo\n",
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n", "- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
" model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-90B-Vision-Instruct-Turbo\n",
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
" - llm\n",
" provider_id: together\n",
" provider_model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-90B-Vision-Instruct-Turbo\n",
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
" model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-90B-Vision-Instruct\n", " model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-90B-Vision-Instruct\n",
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
" - llm\n", " - llm\n",
" provider_id: together\n", " provider_id: together\n",
" provider_model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-90B-Vision-Instruct-Turbo\n", " provider_model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-90B-Vision-Instruct-Turbo\n",
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n", "- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
" model_id: meta-llama/Llama-\u001b[1;36m3.3\u001b[0m-70B-Instruct-Turbo\n",
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
" - llm\n",
" provider_id: together\n",
" provider_model_id: meta-llama/Llama-\u001b[1;36m3.3\u001b[0m-70B-Instruct-Turbo\n",
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
" model_id: meta-llama/Llama-\u001b[1;36m3.3\u001b[0m-70B-Instruct\n", " model_id: meta-llama/Llama-\u001b[1;36m3.3\u001b[0m-70B-Instruct\n",
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
" - llm\n", " - llm\n",
" provider_id: together\n", " provider_id: together\n",
" provider_model_id: meta-llama/Llama-\u001b[1;36m3.3\u001b[0m-70B-Instruct-Turbo\n", " provider_model_id: meta-llama/Llama-\u001b[1;36m3.3\u001b[0m-70B-Instruct-Turbo\n",
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n", "- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
" model_id: meta-llama/Meta-Llama-Guard-\u001b[1;36m3\u001b[0m-8B\n",
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
" - llm\n",
" provider_id: together\n",
" provider_model_id: meta-llama/Meta-Llama-Guard-\u001b[1;36m3\u001b[0m-8B\n",
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
" model_id: meta-llama/Llama-Guard-\u001b[1;36m3\u001b[0m-8B\n", " model_id: meta-llama/Llama-Guard-\u001b[1;36m3\u001b[0m-8B\n",
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
" - llm\n", " - llm\n",
" provider_id: together\n", " provider_id: together\n",
" provider_model_id: meta-llama/Meta-Llama-Guard-\u001b[1;36m3\u001b[0m-8B\n", " provider_model_id: meta-llama/Meta-Llama-Guard-\u001b[1;36m3\u001b[0m-8B\n",
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n", "- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
" model_id: meta-llama/Llama-Guard-\u001b[1;36m3\u001b[0m-11B-Vision-Turbo\n",
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
" - llm\n",
" provider_id: together\n",
" provider_model_id: meta-llama/Llama-Guard-\u001b[1;36m3\u001b[0m-11B-Vision-Turbo\n",
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
" model_id: meta-llama/Llama-Guard-\u001b[1;36m3\u001b[0m-11B-Vision\n", " model_id: meta-llama/Llama-Guard-\u001b[1;36m3\u001b[0m-11B-Vision\n",
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n", " model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
" - llm\n", " - llm\n",
@ -677,6 +792,9 @@
" - config: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n", " - config: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
" provider_id: model-context-protocol\n", " provider_id: model-context-protocol\n",
" provider_type: remote::model-context-protocol\n", " provider_type: remote::model-context-protocol\n",
" - config: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
" provider_id: wolfram-alpha\n",
" provider_type: remote::wolfram-alpha\n",
" vector_io:\n", " vector_io:\n",
" - config:\n", " - config:\n",
" kvstore:\n", " kvstore:\n",
@ -708,6 +826,10 @@
" mcp_endpoint: null\n", " mcp_endpoint: null\n",
" provider_id: code-interpreter\n", " provider_id: code-interpreter\n",
" toolgroup_id: builtin::code_interpreter\n", " toolgroup_id: builtin::code_interpreter\n",
"- args: null\n",
" mcp_endpoint: null\n",
" provider_id: wolfram-alpha\n",
" toolgroup_id: builtin::wolfram_alpha\n",
"vector_dbs: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n", "vector_dbs: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n",
"version: \u001b[32m'2'\u001b[0m\n", "version: \u001b[32m'2'\u001b[0m\n",
"\n" "\n"
@ -4098,7 +4220,7 @@
"source": [ "source": [
"## 4. Image Understanding with Llama 3.2\n", "## 4. Image Understanding with Llama 3.2\n",
"\n", "\n",
"Below is a complete example of using Together's Llama Stack 0.1 server at https://llama-stack.together.ai to ask Llama 3.2 questions about an image." "Below is a complete example of to ask Llama 3.2 questions about an image."
] ]
}, },
{ {
@ -4106,14 +4228,12 @@
"id": "82e381ec", "id": "82e381ec",
"metadata": {}, "metadata": {},
"source": [ "source": [
"### 4.1 Setup and helpers\n", "### 4.1 Setup and helpers\n"
"\n",
"Below we install the Llama Stack client 0.1, download the example image, define two image helpers, and set Llama Stack Together server URL and Llama 3.2 model name.\n"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 3, "execution_count": 1,
"id": "44e05e16", "id": "44e05e16",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
@ -4123,7 +4243,7 @@
"text": [ "text": [
" % Total % Received % Xferd Average Speed Time Time Time Current\n", " % Total % Received % Xferd Average Speed Time Time Time Current\n",
" Dload Upload Total Spent Left Speed\n", " Dload Upload Total Spent Left Speed\n",
"100 275k 100 275k 0 0 780k 0 --:--:-- --:--:-- --:--:-- 780k\n" "100 275k 100 275k 0 0 905k 0 --:--:-- --:--:-- --:--:-- 906k\n"
] ]
} }
], ],
@ -4133,32 +4253,13 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 20,
"id": "469750f7",
"metadata": {},
"outputs": [],
"source": [
"# NBVAL_SKIP\n",
"from PIL import Image\n",
"import matplotlib.pyplot as plt\n",
"\n",
"def display_image(path):\n",
" img = Image.open(path)\n",
" plt.imshow(img)\n",
" plt.axis('off')\n",
" plt.show()\n",
"\n",
"display_image(\"Llama_Repo.jpeg\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a2c1e1c2", "id": "a2c1e1c2",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"import base64\n", "import base64\n",
"vision_model_id = \"meta-llama/Llama-3.2-11B-Vision-Instruct\"\n",
"\n", "\n",
"def encode_image(image_path):\n", "def encode_image(image_path):\n",
" with open(image_path, \"rb\") as image_file:\n", " with open(image_path, \"rb\") as image_file:\n",
@ -4167,19 +4268,6 @@
" return base64_url" " return base64_url"
] ]
}, },
{
"cell_type": "code",
"execution_count": null,
"id": "c565f99e",
"metadata": {},
"outputs": [],
"source": [
"from llama_stack_client import LlamaStackClient\n",
"\n",
"LLAMA_STACK_API_TOGETHER_URL=\"https://llama-stack.together.ai\"\n",
"LLAMA32_11B_INSTRUCT = \"meta-llama/Llama-3.2-11B-Vision-Instruct\""
]
},
{ {
"cell_type": "markdown", "cell_type": "markdown",
"id": "7737cd41", "id": "7737cd41",
@ -4192,55 +4280,44 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 21,
"id": "d7914894", "id": "d7914894",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"There are three llamas in the image. The llama in the middle is purple, the llama on the left is white, and the llama on the right is also white, but it is wearing a blue party hat. Therefore, there are two different colors of llama in the image: purple and white.\n"
]
}
],
"source": [ "source": [
"from llama_stack_client.lib.inference.event_logger import EventLogger\n", "response = client.inference.chat_completion(\n",
"\n", " messages=[\n",
"async def run_main(image_path: str, prompt):\n", " {\n",
" client = LlamaStackClient(\n", " \"role\": \"user\",\n",
" base_url=LLAMA_STACK_API_TOGETHER_URL,\n", " \"content\": [\n",
" )\n", " {\n",
"\n", " \"type\": \"image\",\n",
" message = {\n", " \"image\": {\n",
" \"role\": \"user\",\n", " \"url\": {\n",
" \"content\": [\n", " \"uri\": encode_image(\"Llama_Repo.jpeg\")\n",
" {\n", " }\n",
" \"type\": \"image\",\n", " }\n",
" \"image\": {\n", " },\n",
" \"url\": {\n", " {\n",
" \"uri\": encode_image(image_path)\n", " \"type\": \"text\",\n",
" }\n", " \"text\": \"How many different colors are those llamas? What are those colors?\",\n",
" }\n", " }\n",
" },\n", " ]\n",
" {\n", " }\n",
" \"type\": \"text\",\n", " ],\n",
" \"text\": prompt,\n", " model_id=vision_model_id,\n",
" }\n", " stream=False,\n",
" ]\n", ")\n",
" }\n",
"\n", "\n",
" response = client.inference.chat_completion(\n", "print(response.completion_message.content)"
" messages=[message],\n",
" model_id=LLAMA32_11B_INSTRUCT,\n",
" stream=False,\n",
" )\n",
"\n",
" print(response.completion_message.content.lower().strip())"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4ee09b97",
"metadata": {},
"outputs": [],
"source": [
"await run_main(\"Llama_Repo.jpeg\",\n",
" \"How many different colors are those llamas?\\\n",
" What are those colors?\")"
] ]
}, },
{ {
@ -4255,68 +4332,65 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 19,
"id": "f9a83275", "id": "f9a83275",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[33minference> \u001b[0m\u001b[33mThere\u001b[0m\u001b[33m are\u001b[0m\u001b[33m three\u001b[0m\u001b[33m different\u001b[0m\u001b[33m colors\u001b[0m\u001b[33m of\u001b[0m\u001b[33m ll\u001b[0m\u001b[33mamas\u001b[0m\u001b[33m in\u001b[0m\u001b[33m the\u001b[0m\u001b[33m image\u001b[0m\u001b[33m.\u001b[0m\u001b[33m The\u001b[0m\u001b[33m first\u001b[0m\u001b[33m llama\u001b[0m\u001b[33m on\u001b[0m\u001b[33m the\u001b[0m\u001b[33m left\u001b[0m\u001b[33m is\u001b[0m\u001b[33m white\u001b[0m\u001b[33m,\u001b[0m\u001b[33m the\u001b[0m\u001b[33m second\u001b[0m\u001b[33m llama\u001b[0m\u001b[33m in\u001b[0m\u001b[33m the\u001b[0m\u001b[33m middle\u001b[0m\u001b[33m is\u001b[0m\u001b[33m purple\u001b[0m\u001b[33m,\u001b[0m\u001b[33m and\u001b[0m\u001b[33m the\u001b[0m\u001b[33m third\u001b[0m\u001b[33m llama\u001b[0m\u001b[33m on\u001b[0m\u001b[33m the\u001b[0m\u001b[33m right\u001b[0m\u001b[33m is\u001b[0m\u001b[33m white\u001b[0m\u001b[33m with\u001b[0m\u001b[33m a\u001b[0m\u001b[33m blue\u001b[0m\u001b[33m party\u001b[0m\u001b[33m hat\u001b[0m\u001b[33m.\u001b[0m\u001b[97m\u001b[0m\n",
"\u001b[30m\u001b[0m"
]
}
],
"source": [ "source": [
"from llama_stack_client.lib.agents.agent import Agent\n",
"from llama_stack_client.lib.agents.event_logger import EventLogger\n",
"from llama_stack_client.types.agent_create_params import AgentConfig\n", "from llama_stack_client.types.agent_create_params import AgentConfig\n",
"\n", "\n",
"async def run_main(image_path, prompt):\n", "agent_config = AgentConfig(\n",
" base64_image = encode_image(image_path)\n", " model=vision_model_id,\n",
" instructions=\"You are a helpful assistant\",\n",
" enable_session_persistence=False,\n",
" toolgroups=[],\n",
")\n",
"\n", "\n",
" client = LlamaStackClient(\n", "agent = Agent(client, agent_config)\n",
" base_url=LLAMA_STACK_API_TOGETHER_URL,\n", "session_id = agent.create_session(\"test-session\")\n",
" )\n",
"\n", "\n",
" agent_config = AgentConfig(\n", "response = agent.create_turn(\n",
" model=LLAMA32_11B_INSTRUCT,\n", " messages=[{\n",
" instructions=\"You are a helpful assistant\",\n", " \"role\": \"user\",\n",
" enable_session_persistence=False,\n", " \"content\": [\n",
" toolgroups=[],\n", " {\n",
" )\n", " \"type\": \"image\",\n",
"\n", " \"image\": {\n",
" agent = Agent(client, agent_config)\n", " \"url\": {\n",
" session_id = agent.create_session(\"test-session\")\n", " \"uri\": encode_image(\"Llama_Repo.jpeg\")\n",
"\n", " }\n",
" response = agent.create_turn(\n",
" messages=[{\n",
" \"role\": \"user\",\n",
" \"content\": [\n",
" {\n",
" \"type\": \"image\",\n",
" \"image\": {\n",
" \"url\": {\n",
" \"uri\": encode_image(image_path)\n",
" }\n",
" }\n",
" },\n",
" {\n",
" \"type\": \"text\",\n",
" \"text\": prompt,\n",
" }\n", " }\n",
" ]\n", " },\n",
" }],\n", " {\n",
" session_id=session_id,\n", " \"type\": \"text\",\n",
" )\n", " \"text\": \"How many different colors are those llamas? What are those colors?\",\n",
" }\n",
" ]\n",
" }],\n",
" session_id=session_id,\n",
")\n",
"\n", "\n",
" for log in EventLogger().log(response):\n", "for log in EventLogger().log(response):\n",
" log.print()" " log.print()\n",
" "
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"id": "15d0098b", "id": "f3352379",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": []
"await run_main(\"Llama_Repo.jpeg\",\n",
" \"How many different colors are those llamas?\\\n",
" What are those colors?\")"
]
} }
], ],
"metadata": { "metadata": {