mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-31 09:23:54 +00:00
Merge branch 'refs/heads/main' into preprocessors
# Conflicts: # llama_stack/distribution/routers/routers.py # llama_stack/templates/ollama/build.yaml # llama_stack/templates/ollama/run-with-safety.yaml # llama_stack/templates/ollama/run.yaml # llama_stack/templates/remote-vllm/build.yaml # llama_stack/templates/remote-vllm/run-with-safety.yaml # llama_stack/templates/remote-vllm/run.yaml # llama_stack/templates/together/build.yaml # llama_stack/templates/together/run-with-safety.yaml # llama_stack/templates/together/run.yaml
This commit is contained in:
commit
6b9f673fdb
313 changed files with 181388 additions and 7064 deletions
1398
docs/_static/llama-stack-spec.html
vendored
1398
docs/_static/llama-stack-spec.html
vendored
File diff suppressed because it is too large
Load diff
1112
docs/_static/llama-stack-spec.yaml
vendored
1112
docs/_static/llama-stack-spec.yaml
vendored
File diff suppressed because it is too large
Load diff
|
|
@ -141,7 +141,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 18,
|
||||
"id": "E1UFuJC570Tk",
|
||||
"metadata": {
|
||||
"colab": {
|
||||
|
|
@ -326,54 +326,108 @@
|
|||
" type: sqlite\n",
|
||||
"models:\n",
|
||||
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
||||
" model_id: meta-llama/Meta-Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-8B-Instruct-Turbo\n",
|
||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||
" - llm\n",
|
||||
" provider_id: together\n",
|
||||
" provider_model_id: meta-llama/Meta-Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-8B-Instruct-Turbo\n",
|
||||
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
||||
" model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-8B-Instruct\n",
|
||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||
" - llm\n",
|
||||
" provider_id: together\n",
|
||||
" provider_model_id: meta-llama/Meta-Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-8B-Instruct-Turbo\n",
|
||||
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
||||
" model_id: meta-llama/Meta-Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-70B-Instruct-Turbo\n",
|
||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||
" - llm\n",
|
||||
" provider_id: together\n",
|
||||
" provider_model_id: meta-llama/Meta-Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-70B-Instruct-Turbo\n",
|
||||
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
||||
" model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-70B-Instruct\n",
|
||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||
" - llm\n",
|
||||
" provider_id: together\n",
|
||||
" provider_model_id: meta-llama/Meta-Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-70B-Instruct-Turbo\n",
|
||||
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
||||
" model_id: meta-llama/Meta-Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-405B-Instruct-Turbo\n",
|
||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||
" - llm\n",
|
||||
" provider_id: together\n",
|
||||
" provider_model_id: meta-llama/Meta-Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-405B-Instruct-Turbo\n",
|
||||
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
||||
" model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-405B-Instruct-FP8\n",
|
||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||
" - llm\n",
|
||||
" provider_id: together\n",
|
||||
" provider_model_id: meta-llama/Meta-Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.1</span>-405B-Instruct-Turbo\n",
|
||||
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
||||
" model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-3B-Instruct-Turbo\n",
|
||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||
" - llm\n",
|
||||
" provider_id: together\n",
|
||||
" provider_model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-3B-Instruct-Turbo\n",
|
||||
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
||||
" model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-3B-Instruct\n",
|
||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||
" - llm\n",
|
||||
" provider_id: together\n",
|
||||
" provider_model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-3B-Instruct-Turbo\n",
|
||||
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
||||
" model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-11B-Vision-Instruct-Turbo\n",
|
||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||
" - llm\n",
|
||||
" provider_id: together\n",
|
||||
" provider_model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-11B-Vision-Instruct-Turbo\n",
|
||||
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
||||
" model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-11B-Vision-Instruct\n",
|
||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||
" - llm\n",
|
||||
" provider_id: together\n",
|
||||
" provider_model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-11B-Vision-Instruct-Turbo\n",
|
||||
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
||||
" model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-90B-Vision-Instruct-Turbo\n",
|
||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||
" - llm\n",
|
||||
" provider_id: together\n",
|
||||
" provider_model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-90B-Vision-Instruct-Turbo\n",
|
||||
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
||||
" model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-90B-Vision-Instruct\n",
|
||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||
" - llm\n",
|
||||
" provider_id: together\n",
|
||||
" provider_model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.2</span>-90B-Vision-Instruct-Turbo\n",
|
||||
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
||||
" model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.3</span>-70B-Instruct-Turbo\n",
|
||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||
" - llm\n",
|
||||
" provider_id: together\n",
|
||||
" provider_model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.3</span>-70B-Instruct-Turbo\n",
|
||||
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
||||
" model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.3</span>-70B-Instruct\n",
|
||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||
" - llm\n",
|
||||
" provider_id: together\n",
|
||||
" provider_model_id: meta-llama/Llama-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3.3</span>-70B-Instruct-Turbo\n",
|
||||
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
||||
" model_id: meta-llama/Meta-Llama-Guard-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3</span>-8B\n",
|
||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||
" - llm\n",
|
||||
" provider_id: together\n",
|
||||
" provider_model_id: meta-llama/Meta-Llama-Guard-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3</span>-8B\n",
|
||||
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
||||
" model_id: meta-llama/Llama-Guard-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3</span>-8B\n",
|
||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||
" - llm\n",
|
||||
" provider_id: together\n",
|
||||
" provider_model_id: meta-llama/Meta-Llama-Guard-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3</span>-8B\n",
|
||||
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
||||
" model_id: meta-llama/Llama-Guard-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3</span>-11B-Vision-Turbo\n",
|
||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||
" - llm\n",
|
||||
" provider_id: together\n",
|
||||
" provider_model_id: meta-llama/Llama-Guard-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3</span>-11B-Vision-Turbo\n",
|
||||
"- metadata: <span style=\"font-weight: bold\">{}</span>\n",
|
||||
" model_id: meta-llama/Llama-Guard-<span style=\"color: #008080; text-decoration-color: #008080; font-weight: bold\">3</span>-11B-Vision\n",
|
||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||
" - llm\n",
|
||||
|
|
@ -473,6 +527,9 @@
|
|||
" - config: <span style=\"font-weight: bold\">{}</span>\n",
|
||||
" provider_id: model-context-protocol\n",
|
||||
" provider_type: remote::model-context-protocol\n",
|
||||
" - config: <span style=\"font-weight: bold\">{}</span>\n",
|
||||
" provider_id: wolfram-alpha\n",
|
||||
" provider_type: remote::wolfram-alpha\n",
|
||||
" vector_io:\n",
|
||||
" - config:\n",
|
||||
" kvstore:\n",
|
||||
|
|
@ -504,6 +561,10 @@
|
|||
" mcp_endpoint: null\n",
|
||||
" provider_id: code-interpreter\n",
|
||||
" toolgroup_id: builtin::code_interpreter\n",
|
||||
"- args: null\n",
|
||||
" mcp_endpoint: null\n",
|
||||
" provider_id: wolfram-alpha\n",
|
||||
" toolgroup_id: builtin::wolfram_alpha\n",
|
||||
"vector_dbs: <span style=\"font-weight: bold\">[]</span>\n",
|
||||
"version: <span style=\"color: #008000; text-decoration-color: #008000\">'2'</span>\n",
|
||||
"\n",
|
||||
|
|
@ -530,54 +591,108 @@
|
|||
" type: sqlite\n",
|
||||
"models:\n",
|
||||
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||
" model_id: meta-llama/Meta-Llama-\u001b[1;36m3.1\u001b[0m-8B-Instruct-Turbo\n",
|
||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||
" - llm\n",
|
||||
" provider_id: together\n",
|
||||
" provider_model_id: meta-llama/Meta-Llama-\u001b[1;36m3.1\u001b[0m-8B-Instruct-Turbo\n",
|
||||
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||
" model_id: meta-llama/Llama-\u001b[1;36m3.1\u001b[0m-8B-Instruct\n",
|
||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||
" - llm\n",
|
||||
" provider_id: together\n",
|
||||
" provider_model_id: meta-llama/Meta-Llama-\u001b[1;36m3.1\u001b[0m-8B-Instruct-Turbo\n",
|
||||
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||
" model_id: meta-llama/Meta-Llama-\u001b[1;36m3.1\u001b[0m-70B-Instruct-Turbo\n",
|
||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||
" - llm\n",
|
||||
" provider_id: together\n",
|
||||
" provider_model_id: meta-llama/Meta-Llama-\u001b[1;36m3.1\u001b[0m-70B-Instruct-Turbo\n",
|
||||
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||
" model_id: meta-llama/Llama-\u001b[1;36m3.1\u001b[0m-70B-Instruct\n",
|
||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||
" - llm\n",
|
||||
" provider_id: together\n",
|
||||
" provider_model_id: meta-llama/Meta-Llama-\u001b[1;36m3.1\u001b[0m-70B-Instruct-Turbo\n",
|
||||
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||
" model_id: meta-llama/Meta-Llama-\u001b[1;36m3.1\u001b[0m-405B-Instruct-Turbo\n",
|
||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||
" - llm\n",
|
||||
" provider_id: together\n",
|
||||
" provider_model_id: meta-llama/Meta-Llama-\u001b[1;36m3.1\u001b[0m-405B-Instruct-Turbo\n",
|
||||
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||
" model_id: meta-llama/Llama-\u001b[1;36m3.1\u001b[0m-405B-Instruct-FP8\n",
|
||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||
" - llm\n",
|
||||
" provider_id: together\n",
|
||||
" provider_model_id: meta-llama/Meta-Llama-\u001b[1;36m3.1\u001b[0m-405B-Instruct-Turbo\n",
|
||||
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||
" model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-3B-Instruct-Turbo\n",
|
||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||
" - llm\n",
|
||||
" provider_id: together\n",
|
||||
" provider_model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-3B-Instruct-Turbo\n",
|
||||
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||
" model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-3B-Instruct\n",
|
||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||
" - llm\n",
|
||||
" provider_id: together\n",
|
||||
" provider_model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-3B-Instruct-Turbo\n",
|
||||
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||
" model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-11B-Vision-Instruct-Turbo\n",
|
||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||
" - llm\n",
|
||||
" provider_id: together\n",
|
||||
" provider_model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-11B-Vision-Instruct-Turbo\n",
|
||||
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||
" model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-11B-Vision-Instruct\n",
|
||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||
" - llm\n",
|
||||
" provider_id: together\n",
|
||||
" provider_model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-11B-Vision-Instruct-Turbo\n",
|
||||
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||
" model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-90B-Vision-Instruct-Turbo\n",
|
||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||
" - llm\n",
|
||||
" provider_id: together\n",
|
||||
" provider_model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-90B-Vision-Instruct-Turbo\n",
|
||||
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||
" model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-90B-Vision-Instruct\n",
|
||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||
" - llm\n",
|
||||
" provider_id: together\n",
|
||||
" provider_model_id: meta-llama/Llama-\u001b[1;36m3.2\u001b[0m-90B-Vision-Instruct-Turbo\n",
|
||||
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||
" model_id: meta-llama/Llama-\u001b[1;36m3.3\u001b[0m-70B-Instruct-Turbo\n",
|
||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||
" - llm\n",
|
||||
" provider_id: together\n",
|
||||
" provider_model_id: meta-llama/Llama-\u001b[1;36m3.3\u001b[0m-70B-Instruct-Turbo\n",
|
||||
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||
" model_id: meta-llama/Llama-\u001b[1;36m3.3\u001b[0m-70B-Instruct\n",
|
||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||
" - llm\n",
|
||||
" provider_id: together\n",
|
||||
" provider_model_id: meta-llama/Llama-\u001b[1;36m3.3\u001b[0m-70B-Instruct-Turbo\n",
|
||||
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||
" model_id: meta-llama/Meta-Llama-Guard-\u001b[1;36m3\u001b[0m-8B\n",
|
||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||
" - llm\n",
|
||||
" provider_id: together\n",
|
||||
" provider_model_id: meta-llama/Meta-Llama-Guard-\u001b[1;36m3\u001b[0m-8B\n",
|
||||
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||
" model_id: meta-llama/Llama-Guard-\u001b[1;36m3\u001b[0m-8B\n",
|
||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||
" - llm\n",
|
||||
" provider_id: together\n",
|
||||
" provider_model_id: meta-llama/Meta-Llama-Guard-\u001b[1;36m3\u001b[0m-8B\n",
|
||||
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||
" model_id: meta-llama/Llama-Guard-\u001b[1;36m3\u001b[0m-11B-Vision-Turbo\n",
|
||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||
" - llm\n",
|
||||
" provider_id: together\n",
|
||||
" provider_model_id: meta-llama/Llama-Guard-\u001b[1;36m3\u001b[0m-11B-Vision-Turbo\n",
|
||||
"- metadata: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||
" model_id: meta-llama/Llama-Guard-\u001b[1;36m3\u001b[0m-11B-Vision\n",
|
||||
" model_type: !!python/object/apply:llama_stack.apis.models.models.ModelType\n",
|
||||
" - llm\n",
|
||||
|
|
@ -677,6 +792,9 @@
|
|||
" - config: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||
" provider_id: model-context-protocol\n",
|
||||
" provider_type: remote::model-context-protocol\n",
|
||||
" - config: \u001b[1m{\u001b[0m\u001b[1m}\u001b[0m\n",
|
||||
" provider_id: wolfram-alpha\n",
|
||||
" provider_type: remote::wolfram-alpha\n",
|
||||
" vector_io:\n",
|
||||
" - config:\n",
|
||||
" kvstore:\n",
|
||||
|
|
@ -708,6 +826,10 @@
|
|||
" mcp_endpoint: null\n",
|
||||
" provider_id: code-interpreter\n",
|
||||
" toolgroup_id: builtin::code_interpreter\n",
|
||||
"- args: null\n",
|
||||
" mcp_endpoint: null\n",
|
||||
" provider_id: wolfram-alpha\n",
|
||||
" toolgroup_id: builtin::wolfram_alpha\n",
|
||||
"vector_dbs: \u001b[1m[\u001b[0m\u001b[1m]\u001b[0m\n",
|
||||
"version: \u001b[32m'2'\u001b[0m\n",
|
||||
"\n"
|
||||
|
|
@ -1513,18 +1635,14 @@
|
|||
"source": [
|
||||
"from llama_stack_client.lib.agents.agent import Agent\n",
|
||||
"from llama_stack_client.lib.agents.event_logger import EventLogger\n",
|
||||
"from llama_stack_client.types.agent_create_params import AgentConfig\n",
|
||||
"from termcolor import cprint\n",
|
||||
"\n",
|
||||
"agent_config = AgentConfig(\n",
|
||||
"agent = Agent(\n",
|
||||
" client, \n",
|
||||
" model=model_id,\n",
|
||||
" instructions=\"You are a helpful assistant\",\n",
|
||||
" toolgroups=[\"builtin::websearch\"],\n",
|
||||
" input_shields=[],\n",
|
||||
" output_shields=[],\n",
|
||||
" enable_session_persistence=False,\n",
|
||||
" tools=[\"builtin::websearch\"],\n",
|
||||
")\n",
|
||||
"agent = Agent(client, agent_config)\n",
|
||||
"user_prompts = [\n",
|
||||
" \"Hello\",\n",
|
||||
" \"Which teams played in the NBA western conference finals of 2024\",\n",
|
||||
|
|
@ -1693,7 +1811,6 @@
|
|||
"import uuid\n",
|
||||
"from llama_stack_client.lib.agents.agent import Agent\n",
|
||||
"from llama_stack_client.lib.agents.event_logger import EventLogger\n",
|
||||
"from llama_stack_client.types.agent_create_params import AgentConfig\n",
|
||||
"from termcolor import cprint\n",
|
||||
"from llama_stack_client.types import Document\n",
|
||||
"\n",
|
||||
|
|
@ -1719,11 +1836,11 @@
|
|||
" vector_db_id=vector_db_id,\n",
|
||||
" chunk_size_in_tokens=512,\n",
|
||||
")\n",
|
||||
"agent_config = AgentConfig(\n",
|
||||
"rag_agent = Agent(\n",
|
||||
" client, \n",
|
||||
" model=model_id,\n",
|
||||
" instructions=\"You are a helpful assistant\",\n",
|
||||
" enable_session_persistence=False,\n",
|
||||
" toolgroups = [\n",
|
||||
" tools = [\n",
|
||||
" {\n",
|
||||
" \"name\": \"builtin::rag/knowledge_search\",\n",
|
||||
" \"args\" : {\n",
|
||||
|
|
@ -1732,7 +1849,6 @@
|
|||
" }\n",
|
||||
" ],\n",
|
||||
")\n",
|
||||
"rag_agent = Agent(client, agent_config)\n",
|
||||
"session_id = rag_agent.create_session(\"test-session\")\n",
|
||||
"user_prompts = [\n",
|
||||
" \"What are the top 5 topics that were explained? Only list succinct bullet points.\",\n",
|
||||
|
|
@ -1856,23 +1972,19 @@
|
|||
"source": [
|
||||
"from llama_stack_client.types.agents.turn_create_params import Document\n",
|
||||
"\n",
|
||||
"agent_config = AgentConfig(\n",
|
||||
"codex_agent = Agent(\n",
|
||||
" client, \n",
|
||||
" model=\"meta-llama/Llama-3.1-8B-Instruct\",\n",
|
||||
" instructions=\"You are a helpful assistant\",\n",
|
||||
" tools=[\n",
|
||||
" \"builtin::code_interpreter\",\n",
|
||||
" \"builtin::websearch\"\n",
|
||||
" ],\n",
|
||||
" sampling_params = {\n",
|
||||
" \"max_tokens\" : 4096,\n",
|
||||
" \"temperature\": 0.0\n",
|
||||
" },\n",
|
||||
" model=\"meta-llama/Llama-3.1-8B-Instruct\",\n",
|
||||
" instructions=\"You are a helpful assistant\",\n",
|
||||
" toolgroups=[\n",
|
||||
" \"builtin::code_interpreter\",\n",
|
||||
" \"builtin::websearch\"\n",
|
||||
" ],\n",
|
||||
" tool_choice=\"auto\",\n",
|
||||
" input_shields=[],\n",
|
||||
" output_shields=[],\n",
|
||||
" enable_session_persistence=False,\n",
|
||||
")\n",
|
||||
"codex_agent = Agent(client, agent_config)\n",
|
||||
"session_id = codex_agent.create_session(\"test-session\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
|
|
@ -2782,18 +2894,14 @@
|
|||
"# NBVAL_SKIP\n",
|
||||
"from llama_stack_client.lib.agents.agent import Agent\n",
|
||||
"from llama_stack_client.lib.agents.event_logger import EventLogger\n",
|
||||
"from llama_stack_client.types.agent_create_params import AgentConfig\n",
|
||||
"from termcolor import cprint\n",
|
||||
"\n",
|
||||
"agent_config = AgentConfig(\n",
|
||||
"agent = Agent(\n",
|
||||
" client, \n",
|
||||
" model=model_id,\n",
|
||||
" instructions=\"You are a helpful assistant\",\n",
|
||||
" toolgroups=[\"mcp::filesystem\"],\n",
|
||||
" input_shields=[],\n",
|
||||
" output_shields=[],\n",
|
||||
" enable_session_persistence=False,\n",
|
||||
" tools=[\"mcp::filesystem\"],\n",
|
||||
")\n",
|
||||
"agent = Agent(client, agent_config)\n",
|
||||
"user_prompts = [\n",
|
||||
" \"Hello\",\n",
|
||||
" \"list all the files /content\",\n",
|
||||
|
|
@ -2888,17 +2996,13 @@
|
|||
"source": [
|
||||
"from llama_stack_client.lib.agents.agent import Agent\n",
|
||||
"from llama_stack_client.lib.agents.event_logger import EventLogger\n",
|
||||
"from llama_stack_client.types.agent_create_params import AgentConfig\n",
|
||||
"\n",
|
||||
"agent_config = AgentConfig(\n",
|
||||
"agent = Agent(\n",
|
||||
" client, \n",
|
||||
" model=\"meta-llama/Llama-3.3-70B-Instruct\",\n",
|
||||
" instructions=\"You are a helpful assistant. Use search tool to answer the questions. \",\n",
|
||||
" toolgroups=[\"builtin::websearch\"],\n",
|
||||
" input_shields=[],\n",
|
||||
" output_shields=[],\n",
|
||||
" enable_session_persistence=False,\n",
|
||||
" tools=[\"builtin::websearch\"],\n",
|
||||
")\n",
|
||||
"agent = Agent(client, agent_config)\n",
|
||||
"user_prompts = [\n",
|
||||
" \"Which teams played in the NBA western conference finals of 2024. Search the web for the answer.\",\n",
|
||||
" \"In which episode and season of South Park does Bill Cosby (BSM-471) first appear? Give me the number and title. Search the web for the answer.\",\n",
|
||||
|
|
@ -4098,7 +4202,7 @@
|
|||
"source": [
|
||||
"## 4. Image Understanding with Llama 3.2\n",
|
||||
"\n",
|
||||
"Below is a complete example of using Together's Llama Stack 0.1 server at https://llama-stack.together.ai to ask Llama 3.2 questions about an image."
|
||||
"Below is a complete example of to ask Llama 3.2 questions about an image."
|
||||
]
|
||||
},
|
||||
{
|
||||
|
|
@ -4106,14 +4210,12 @@
|
|||
"id": "82e381ec",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 4.1 Setup and helpers\n",
|
||||
"\n",
|
||||
"Below we install the Llama Stack client 0.1, download the example image, define two image helpers, and set Llama Stack Together server URL and Llama 3.2 model name.\n"
|
||||
"### 4.1 Setup and helpers\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 1,
|
||||
"id": "44e05e16",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
|
|
@ -4123,7 +4225,7 @@
|
|||
"text": [
|
||||
" % Total % Received % Xferd Average Speed Time Time Time Current\n",
|
||||
" Dload Upload Total Spent Left Speed\n",
|
||||
"100 275k 100 275k 0 0 780k 0 --:--:-- --:--:-- --:--:-- 780k\n"
|
||||
"100 275k 100 275k 0 0 905k 0 --:--:-- --:--:-- --:--:-- 906k\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
|
|
@ -4133,32 +4235,13 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "469750f7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# NBVAL_SKIP\n",
|
||||
"from PIL import Image\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"\n",
|
||||
"def display_image(path):\n",
|
||||
" img = Image.open(path)\n",
|
||||
" plt.imshow(img)\n",
|
||||
" plt.axis('off')\n",
|
||||
" plt.show()\n",
|
||||
"\n",
|
||||
"display_image(\"Llama_Repo.jpeg\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 20,
|
||||
"id": "a2c1e1c2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import base64\n",
|
||||
"vision_model_id = \"meta-llama/Llama-3.2-11B-Vision-Instruct\"\n",
|
||||
"\n",
|
||||
"def encode_image(image_path):\n",
|
||||
" with open(image_path, \"rb\") as image_file:\n",
|
||||
|
|
@ -4167,19 +4250,6 @@
|
|||
" return base64_url"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c565f99e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from llama_stack_client import LlamaStackClient\n",
|
||||
"\n",
|
||||
"LLAMA_STACK_API_TOGETHER_URL=\"https://llama-stack.together.ai\"\n",
|
||||
"LLAMA32_11B_INSTRUCT = \"meta-llama/Llama-3.2-11B-Vision-Instruct\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "7737cd41",
|
||||
|
|
@ -4192,55 +4262,44 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 21,
|
||||
"id": "d7914894",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"There are three llamas in the image. The llama in the middle is purple, the llama on the left is white, and the llama on the right is also white, but it is wearing a blue party hat. Therefore, there are two different colors of llama in the image: purple and white.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from llama_stack_client.lib.inference.event_logger import EventLogger\n",
|
||||
"\n",
|
||||
"async def run_main(image_path: str, prompt):\n",
|
||||
" client = LlamaStackClient(\n",
|
||||
" base_url=LLAMA_STACK_API_TOGETHER_URL,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" message = {\n",
|
||||
" \"role\": \"user\",\n",
|
||||
" \"content\": [\n",
|
||||
" {\n",
|
||||
" \"type\": \"image\",\n",
|
||||
" \"image\": {\n",
|
||||
" \"url\": {\n",
|
||||
" \"uri\": encode_image(image_path)\n",
|
||||
" }\n",
|
||||
"response = client.inference.chat_completion(\n",
|
||||
" messages=[\n",
|
||||
" {\n",
|
||||
" \"role\": \"user\",\n",
|
||||
" \"content\": [\n",
|
||||
" {\n",
|
||||
" \"type\": \"image\",\n",
|
||||
" \"image\": {\n",
|
||||
" \"url\": {\n",
|
||||
" \"uri\": encode_image(\"Llama_Repo.jpeg\")\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"type\": \"text\",\n",
|
||||
" \"text\": \"How many different colors are those llamas? What are those colors?\",\n",
|
||||
" }\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"type\": \"text\",\n",
|
||||
" \"text\": prompt,\n",
|
||||
" }\n",
|
||||
" ]\n",
|
||||
" }\n",
|
||||
" ]\n",
|
||||
" }\n",
|
||||
" ],\n",
|
||||
" model_id=vision_model_id,\n",
|
||||
" stream=False,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
" response = client.inference.chat_completion(\n",
|
||||
" messages=[message],\n",
|
||||
" model_id=LLAMA32_11B_INSTRUCT,\n",
|
||||
" stream=False,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" print(response.completion_message.content.lower().strip())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "4ee09b97",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"await run_main(\"Llama_Repo.jpeg\",\n",
|
||||
" \"How many different colors are those llamas?\\\n",
|
||||
" What are those colors?\")"
|
||||
"print(response.completion_message.content)"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
|
@ -4255,68 +4314,60 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 19,
|
||||
"id": "f9a83275",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[33minference> \u001b[0m\u001b[33mThere\u001b[0m\u001b[33m are\u001b[0m\u001b[33m three\u001b[0m\u001b[33m different\u001b[0m\u001b[33m colors\u001b[0m\u001b[33m of\u001b[0m\u001b[33m ll\u001b[0m\u001b[33mamas\u001b[0m\u001b[33m in\u001b[0m\u001b[33m the\u001b[0m\u001b[33m image\u001b[0m\u001b[33m.\u001b[0m\u001b[33m The\u001b[0m\u001b[33m first\u001b[0m\u001b[33m llama\u001b[0m\u001b[33m on\u001b[0m\u001b[33m the\u001b[0m\u001b[33m left\u001b[0m\u001b[33m is\u001b[0m\u001b[33m white\u001b[0m\u001b[33m,\u001b[0m\u001b[33m the\u001b[0m\u001b[33m second\u001b[0m\u001b[33m llama\u001b[0m\u001b[33m in\u001b[0m\u001b[33m the\u001b[0m\u001b[33m middle\u001b[0m\u001b[33m is\u001b[0m\u001b[33m purple\u001b[0m\u001b[33m,\u001b[0m\u001b[33m and\u001b[0m\u001b[33m the\u001b[0m\u001b[33m third\u001b[0m\u001b[33m llama\u001b[0m\u001b[33m on\u001b[0m\u001b[33m the\u001b[0m\u001b[33m right\u001b[0m\u001b[33m is\u001b[0m\u001b[33m white\u001b[0m\u001b[33m with\u001b[0m\u001b[33m a\u001b[0m\u001b[33m blue\u001b[0m\u001b[33m party\u001b[0m\u001b[33m hat\u001b[0m\u001b[33m.\u001b[0m\u001b[97m\u001b[0m\n",
|
||||
"\u001b[30m\u001b[0m"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from llama_stack_client.lib.agents.agent import Agent\n",
|
||||
"from llama_stack_client.lib.agents.event_logger import EventLogger\n",
|
||||
"from llama_stack_client.types.agent_create_params import AgentConfig\n",
|
||||
"agent = Agent(\n",
|
||||
" client, \n",
|
||||
" model=vision_model_id,\n",
|
||||
" instructions=\"You are a helpful assistant\",\n",
|
||||
")\n",
|
||||
"session_id = agent.create_session(\"test-session\")\n",
|
||||
"\n",
|
||||
"async def run_main(image_path, prompt):\n",
|
||||
" base64_image = encode_image(image_path)\n",
|
||||
"\n",
|
||||
" client = LlamaStackClient(\n",
|
||||
" base_url=LLAMA_STACK_API_TOGETHER_URL,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" agent_config = AgentConfig(\n",
|
||||
" model=LLAMA32_11B_INSTRUCT,\n",
|
||||
" instructions=\"You are a helpful assistant\",\n",
|
||||
" enable_session_persistence=False,\n",
|
||||
" toolgroups=[],\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" agent = Agent(client, agent_config)\n",
|
||||
" session_id = agent.create_session(\"test-session\")\n",
|
||||
"\n",
|
||||
" response = agent.create_turn(\n",
|
||||
" messages=[{\n",
|
||||
" \"role\": \"user\",\n",
|
||||
" \"content\": [\n",
|
||||
" {\n",
|
||||
" \"type\": \"image\",\n",
|
||||
" \"image\": {\n",
|
||||
" \"url\": {\n",
|
||||
" \"uri\": encode_image(image_path)\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"type\": \"text\",\n",
|
||||
" \"text\": prompt,\n",
|
||||
"response = agent.create_turn(\n",
|
||||
" messages=[{\n",
|
||||
" \"role\": \"user\",\n",
|
||||
" \"content\": [\n",
|
||||
" {\n",
|
||||
" \"type\": \"image\",\n",
|
||||
" \"image\": {\n",
|
||||
" \"url\": {\n",
|
||||
" \"uri\": encode_image(\"Llama_Repo.jpeg\")\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
" ]\n",
|
||||
" }],\n",
|
||||
" session_id=session_id,\n",
|
||||
" )\n",
|
||||
" },\n",
|
||||
" {\n",
|
||||
" \"type\": \"text\",\n",
|
||||
" \"text\": \"How many different colors are those llamas? What are those colors?\",\n",
|
||||
" }\n",
|
||||
" ]\n",
|
||||
" }],\n",
|
||||
" session_id=session_id,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
" for log in EventLogger().log(response):\n",
|
||||
" log.print()"
|
||||
"for log in EventLogger().log(response):\n",
|
||||
" log.print()\n",
|
||||
" "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "15d0098b",
|
||||
"id": "f3352379",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"await run_main(\"Llama_Repo.jpeg\",\n",
|
||||
" \"How many different colors are those llamas?\\\n",
|
||||
" What are those colors?\")"
|
||||
]
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
|
|
|||
|
|
@ -3675,7 +3675,7 @@
|
|||
" benchmark_id=\"llama3.2-3B-instruct:tax_eval\",\n",
|
||||
" input_rows=eval_rows.rows,\n",
|
||||
" scoring_functions=[\"braintrust::answer-similarity\"],\n",
|
||||
" task_config={\n",
|
||||
" benchmark_config={\n",
|
||||
" \"type\": \"benchmark\",\n",
|
||||
" \"eval_candidate\": {\n",
|
||||
" \"type\": \"model\",\n",
|
||||
|
|
@ -6383,7 +6383,7 @@
|
|||
" benchmark_id=\"Llama-3.2-3B-Instruct-sft-0:tax_eval\",\n",
|
||||
" input_rows=eval_rows.rows,\n",
|
||||
" scoring_functions=[\"braintrust::answer-similarity\"],\n",
|
||||
" task_config={\n",
|
||||
" benchmark_config={\n",
|
||||
" \"type\": \"benchmark\",\n",
|
||||
" \"eval_candidate\": {\n",
|
||||
" \"type\": \"model\",\n",
|
||||
|
|
|
|||
3535
docs/notebooks/Llama_Stack_Agent_Workflows.ipynb
Normal file
3535
docs/notebooks/Llama_Stack_Agent_Workflows.ipynb
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -781,7 +781,7 @@
|
|||
" benchmark_id=\"meta-reference::mmmu\",\n",
|
||||
" input_rows=eval_rows,\n",
|
||||
" scoring_functions=[\"basic::regex_parser_multiple_choice_answer\"],\n",
|
||||
" task_config={\n",
|
||||
" benchmark_config={\n",
|
||||
" \"type\": \"benchmark\",\n",
|
||||
" \"eval_candidate\": {\n",
|
||||
" \"type\": \"model\",\n",
|
||||
|
|
@ -826,10 +826,9 @@
|
|||
"_ = client.datasets.register(\n",
|
||||
" dataset_id=simpleqa_dataset_id,\n",
|
||||
" provider_id=\"huggingface\",\n",
|
||||
" url={\"uri\": \"https://huggingface.co/datasets/llamastack/evals\"},\n",
|
||||
" url={\"uri\": \"https://huggingface.co/datasets/llamastack/simpleqa\"},\n",
|
||||
" metadata={\n",
|
||||
" \"path\": \"llamastack/evals\",\n",
|
||||
" \"name\": \"evals__simpleqa\",\n",
|
||||
" \"path\": \"llamastack/simpleqa\",\n",
|
||||
" \"split\": \"train\",\n",
|
||||
" },\n",
|
||||
" dataset_schema={\n",
|
||||
|
|
@ -960,7 +959,7 @@
|
|||
" benchmark_id=\"meta-reference::simpleqa\",\n",
|
||||
" input_rows=eval_rows.rows,\n",
|
||||
" scoring_functions=[\"llm-as-judge::405b-simpleqa\"],\n",
|
||||
" task_config={\n",
|
||||
" benchmark_config={\n",
|
||||
" \"type\": \"benchmark\",\n",
|
||||
" \"eval_candidate\": {\n",
|
||||
" \"type\": \"model\",\n",
|
||||
|
|
@ -1109,7 +1108,7 @@
|
|||
" benchmark_id=\"meta-reference::simpleqa\",\n",
|
||||
" input_rows=eval_rows.rows,\n",
|
||||
" scoring_functions=[\"llm-as-judge::405b-simpleqa\"],\n",
|
||||
" task_config={\n",
|
||||
" benchmark_config={\n",
|
||||
" \"type\": \"benchmark\",\n",
|
||||
" \"eval_candidate\": {\n",
|
||||
" \"type\": \"agent\",\n",
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ The RFC Specification (OpenAPI format) is generated from the set of API endpoint
|
|||
Please install the following packages before running the script:
|
||||
|
||||
```
|
||||
pip install fire PyYAML llama-models
|
||||
pip install fire PyYAML
|
||||
```
|
||||
|
||||
Then simply run `sh run_openapi_generator.sh`
|
||||
|
|
|
|||
|
|
@ -55,6 +55,7 @@ def main(output_dir: str):
|
|||
a set of endpoints and their corresponding interfaces that are tailored to
|
||||
best leverage Llama Models.""",
|
||||
),
|
||||
include_standard_error_responses=True,
|
||||
),
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ import typing
|
|||
from dataclasses import make_dataclass
|
||||
from typing import Any, Dict, Set, Union
|
||||
|
||||
from llama_stack.apis.datatypes import Error
|
||||
from llama_stack.strong_typing.core import JsonType
|
||||
from llama_stack.strong_typing.docstring import Docstring, parse_type
|
||||
from llama_stack.strong_typing.inspection import (
|
||||
|
|
@ -434,6 +435,75 @@ class Generator:
|
|||
)
|
||||
self.schema_builder = SchemaBuilder(schema_generator)
|
||||
self.responses = {}
|
||||
|
||||
# Create standard error responses
|
||||
self._create_standard_error_responses()
|
||||
|
||||
def _create_standard_error_responses(self) -> None:
|
||||
"""
|
||||
Creates standard error responses that can be reused across operations.
|
||||
These will be added to the components.responses section of the OpenAPI document.
|
||||
"""
|
||||
# Get the Error schema
|
||||
error_schema = self.schema_builder.classdef_to_ref(Error)
|
||||
|
||||
# Create standard error responses
|
||||
self.responses["BadRequest400"] = Response(
|
||||
description="The request was invalid or malformed",
|
||||
content={
|
||||
"application/json": MediaType(
|
||||
schema=error_schema,
|
||||
example={
|
||||
"status": 400,
|
||||
"title": "Bad Request",
|
||||
"detail": "The request was invalid or malformed",
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
|
||||
self.responses["TooManyRequests429"] = Response(
|
||||
description="The client has sent too many requests in a given amount of time",
|
||||
content={
|
||||
"application/json": MediaType(
|
||||
schema=error_schema,
|
||||
example={
|
||||
"status": 429,
|
||||
"title": "Too Many Requests",
|
||||
"detail": "You have exceeded the rate limit. Please try again later.",
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
|
||||
self.responses["InternalServerError500"] = Response(
|
||||
description="The server encountered an unexpected error",
|
||||
content={
|
||||
"application/json": MediaType(
|
||||
schema=error_schema,
|
||||
example={
|
||||
"status": 500,
|
||||
"title": "Internal Server Error",
|
||||
"detail": "An unexpected error occurred. Our team has been notified.",
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
|
||||
# Add a default error response for any unhandled error cases
|
||||
self.responses["DefaultError"] = Response(
|
||||
description="An unexpected error occurred",
|
||||
content={
|
||||
"application/json": MediaType(
|
||||
schema=error_schema,
|
||||
example={
|
||||
"status": 0,
|
||||
"title": "Error",
|
||||
"detail": "An unexpected error occurred",
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
|
||||
def _build_type_tag(self, ref: str, schema: Schema) -> Tag:
|
||||
# Don't include schema definition in the tag description because for one,
|
||||
|
|
@ -649,6 +719,18 @@ class Generator:
|
|||
responses.update(response_builder.build_response(response_options))
|
||||
|
||||
assert len(responses.keys()) > 0, f"No responses found for {op.name}"
|
||||
|
||||
# Add standard error response references
|
||||
if self.options.include_standard_error_responses:
|
||||
if "400" not in responses:
|
||||
responses["400"] = ResponseRef("BadRequest400")
|
||||
if "429" not in responses:
|
||||
responses["429"] = ResponseRef("TooManyRequests429")
|
||||
if "500" not in responses:
|
||||
responses["500"] = ResponseRef("InternalServerError500")
|
||||
if "default" not in responses:
|
||||
responses["default"] = ResponseRef("DefaultError")
|
||||
|
||||
if op.event_type is not None:
|
||||
builder = ContentBuilder(self.schema_builder)
|
||||
callbacks = {
|
||||
|
|
|
|||
|
|
@ -35,6 +35,7 @@ class Options:
|
|||
:param error_wrapper: True if errors are encapsulated in an error object wrapper.
|
||||
:param property_description_fun: Custom transformation function to apply to class property documentation strings.
|
||||
:param captions: User-defined captions for sections such as "Operations" or "Types", and (if applicable) groups of extra types.
|
||||
:param include_standard_error_responses: Whether to include standard error responses (400, 429, 500, 503) in all operations.
|
||||
"""
|
||||
|
||||
server: Server
|
||||
|
|
@ -52,6 +53,7 @@ class Options:
|
|||
error_wrapper: bool = False
|
||||
property_description_fun: Optional[Callable[[type, str, str], str]] = None
|
||||
captions: Optional[Dict[str, str]] = None
|
||||
include_standard_error_responses: bool = True
|
||||
|
||||
default_captions: ClassVar[Dict[str, str]] = {
|
||||
"Operations": "Operations",
|
||||
|
|
|
|||
|
|
@ -28,6 +28,5 @@ if [ ${#missing_packages[@]} -ne 0 ]; then
|
|||
fi
|
||||
|
||||
stack_dir=$(dirname $(dirname $THIS_DIR))
|
||||
models_dir=$(dirname $stack_dir)/llama-models
|
||||
PYTHONPATH=$PYTHONPATH:$stack_dir:$models_dir \
|
||||
PYTHONPATH=$PYTHONPATH:$stack_dir \
|
||||
python -m docs.openapi_generator.generate $(dirname $THIS_DIR)/_static
|
||||
|
|
|
|||
|
|
@ -11,3 +11,4 @@ sphinxcontrib-openapi
|
|||
sphinxcontrib-redoc
|
||||
sphinxcontrib-mermaid
|
||||
sphinxcontrib-video
|
||||
tomli
|
||||
|
|
|
|||
89
docs/source/building_applications/agent.md
Normal file
89
docs/source/building_applications/agent.md
Normal file
|
|
@ -0,0 +1,89 @@
|
|||
# Llama Stack Agent Framework
|
||||
|
||||
The Llama Stack agent framework is built on a modular architecture that allows for flexible and powerful AI applications. This document explains the key components and how they work together.
|
||||
|
||||
## Core Concepts
|
||||
|
||||
### 1. Agent Configuration
|
||||
|
||||
Agents are configured using the `AgentConfig` class, which includes:
|
||||
|
||||
- **Model**: The underlying LLM to power the agent
|
||||
- **Instructions**: System prompt that defines the agent's behavior
|
||||
- **Tools**: Capabilities the agent can use to interact with external systems
|
||||
- **Safety Shields**: Guardrails to ensure responsible AI behavior
|
||||
|
||||
```python
|
||||
from llama_stack_client.lib.agents.agent import Agent
|
||||
|
||||
|
||||
# Create the agent
|
||||
agent = Agent(
|
||||
llama_stack_client,
|
||||
model="meta-llama/Llama-3-70b-chat",
|
||||
instructions="You are a helpful assistant that can use tools to answer questions.",
|
||||
tools=["builtin::code_interpreter", "builtin::rag/knowledge_search"],
|
||||
)
|
||||
```
|
||||
|
||||
### 2. Sessions
|
||||
|
||||
Agents maintain state through sessions, which represent a conversation thread:
|
||||
|
||||
```python
|
||||
# Create a session
|
||||
session_id = agent.create_session(session_name="My conversation")
|
||||
```
|
||||
|
||||
### 3. Turns
|
||||
|
||||
Each interaction with an agent is called a "turn" and consists of:
|
||||
|
||||
- **Input Messages**: What the user sends to the agent
|
||||
- **Steps**: The agent's internal processing (inference, tool execution, etc.)
|
||||
- **Output Message**: The agent's response
|
||||
|
||||
```python
|
||||
from llama_stack_client.lib.agents.event_logger import EventLogger
|
||||
|
||||
# Create a turn with streaming response
|
||||
turn_response = agent.create_turn(
|
||||
session_id=session_id,
|
||||
messages=[{"role": "user", "content": "Tell me about Llama models"}],
|
||||
)
|
||||
for log in EventLogger().log(turn_response):
|
||||
log.print()
|
||||
```
|
||||
### Non-Streaming
|
||||
|
||||
|
||||
|
||||
```python
|
||||
from rich.pretty import pprint
|
||||
|
||||
# Non-streaming API
|
||||
response = agent.create_turn(
|
||||
session_id=session_id,
|
||||
messages=[{"role": "user", "content": "Tell me about Llama models"}],
|
||||
stream=False,
|
||||
)
|
||||
print("Inputs:")
|
||||
pprint(response.input_messages)
|
||||
print("Output:")
|
||||
pprint(response.output_message.content)
|
||||
print("Steps:")
|
||||
pprint(response.steps)
|
||||
```
|
||||
|
||||
### 4. Steps
|
||||
|
||||
Each turn consists of multiple steps that represent the agent's thought process:
|
||||
|
||||
- **Inference Steps**: The agent generating text responses
|
||||
- **Tool Execution Steps**: The agent using tools to gather information
|
||||
- **Shield Call Steps**: Safety checks being performed
|
||||
|
||||
## Agent Execution Loop
|
||||
|
||||
|
||||
Refer to the [Agent Execution Loop](agent_execution_loop) for more details on what happens within an agent turn.
|
||||
|
|
@ -13,7 +13,7 @@ Each agent turn follows these key steps:
|
|||
|
||||
3. **Inference Loop**: The agent enters its main execution loop:
|
||||
- The LLM receives a user prompt (with previous tool outputs)
|
||||
- The LLM generates a response, potentially with tool calls
|
||||
- The LLM generates a response, potentially with [tool calls](tools)
|
||||
- If tool calls are present:
|
||||
- Tool inputs are safety-checked
|
||||
- Tools are executed (e.g., web search, code execution)
|
||||
|
|
@ -67,20 +67,28 @@ sequenceDiagram
|
|||
Each step in this process can be monitored and controlled through configurations. Here's an example that demonstrates monitoring the agent's execution:
|
||||
|
||||
```python
|
||||
from llama_stack_client import LlamaStackClient
|
||||
from llama_stack_client.lib.agents.agent import Agent
|
||||
from llama_stack_client.lib.agents.event_logger import EventLogger
|
||||
from rich.pretty import pprint
|
||||
|
||||
agent_config = AgentConfig(
|
||||
# Replace host and port
|
||||
client = LlamaStackClient(base_url=f"http://{HOST}:{PORT}")
|
||||
|
||||
agent = Agent(
|
||||
client,
|
||||
# Check with `llama-stack-client models list`
|
||||
model="Llama3.2-3B-Instruct",
|
||||
instructions="You are a helpful assistant",
|
||||
# Enable both RAG and tool usage
|
||||
toolgroups=[
|
||||
tools=[
|
||||
{
|
||||
"name": "builtin::rag/knowledge_search",
|
||||
"args": {"vector_db_ids": ["my_docs"]},
|
||||
},
|
||||
"builtin::code_interpreter",
|
||||
],
|
||||
# Configure safety
|
||||
# Configure safety (optional)
|
||||
input_shields=["llama_guard"],
|
||||
output_shields=["llama_guard"],
|
||||
# Control the inference loop
|
||||
|
|
@ -90,14 +98,12 @@ agent_config = AgentConfig(
|
|||
"max_tokens": 2048,
|
||||
},
|
||||
)
|
||||
|
||||
agent = Agent(client, agent_config)
|
||||
session_id = agent.create_session("monitored_session")
|
||||
|
||||
# Stream the agent's execution steps
|
||||
response = agent.create_turn(
|
||||
messages=[{"role": "user", "content": "Analyze this code and run it"}],
|
||||
attachments=[
|
||||
documents=[
|
||||
{
|
||||
"content": "https://raw.githubusercontent.com/example/code.py",
|
||||
"mime_type": "text/plain",
|
||||
|
|
@ -108,14 +114,21 @@ response = agent.create_turn(
|
|||
|
||||
# Monitor each step of execution
|
||||
for log in EventLogger().log(response):
|
||||
if log.event.step_type == "memory_retrieval":
|
||||
print("Retrieved context:", log.event.retrieved_context)
|
||||
elif log.event.step_type == "inference":
|
||||
print("LLM output:", log.event.model_response)
|
||||
elif log.event.step_type == "tool_execution":
|
||||
print("Tool call:", log.event.tool_call)
|
||||
print("Tool response:", log.event.tool_response)
|
||||
elif log.event.step_type == "shield_call":
|
||||
if log.event.violation:
|
||||
print("Safety violation:", log.event.violation)
|
||||
log.print()
|
||||
|
||||
# Using non-streaming API, the response contains input, steps, and output.
|
||||
response = agent.create_turn(
|
||||
messages=[{"role": "user", "content": "Analyze this code and run it"}],
|
||||
documents=[
|
||||
{
|
||||
"content": "https://raw.githubusercontent.com/example/code.py",
|
||||
"mime_type": "text/plain",
|
||||
}
|
||||
],
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
pprint(f"Input: {response.input_messages}")
|
||||
pprint(f"Output: {response.output_message.content}")
|
||||
pprint(f"Steps: {response.steps}")
|
||||
```
|
||||
|
|
|
|||
|
|
@ -1,170 +1,124 @@
|
|||
# Evals
|
||||
# Evaluations
|
||||
|
||||
[](https://colab.research.google.com/drive/10CHyykee9j2OigaIcRv47BKG9mrNm0tJ?usp=sharing)
|
||||
The Llama Stack provides a set of APIs in Llama Stack for supporting running evaluations of LLM applications.
|
||||
- `/datasetio` + `/datasets` API
|
||||
- `/scoring` + `/scoring_functions` API
|
||||
- `/eval` + `/benchmarks` API
|
||||
|
||||
Llama Stack provides the building blocks needed to run benchmark and application evaluations. This guide will walk you through how to use these components to run open benchmark evaluations. Visit our [Evaluation Concepts](../concepts/evaluation_concepts.md) guide for more details on how evaluations work in Llama Stack, and our [Evaluation Reference](../references/evals_reference/index.md) guide for a comprehensive reference on the APIs.
|
||||
|
||||
### 1. Open Benchmark Model Evaluation
|
||||
|
||||
This first example walks you through how to evaluate a model candidate served by Llama Stack on open benchmarks. We will use the following benchmark:
|
||||
- [MMMU](https://arxiv.org/abs/2311.16502) (A Massive Multi-discipline Multimodal Understanding and Reasoning Benchmark for Expert AGI): Benchmark designed to evaluate multimodal models.
|
||||
- [SimpleQA](https://openai.com/index/introducing-simpleqa/): Benchmark designed to access models to answer short, fact-seeking questions.
|
||||
This guides walks you through the process of evaluating an LLM application built using Llama Stack. Checkout the [Evaluation Reference](../references/evals_reference/index.md) guide goes over the sets of APIs and developer experience flow of using Llama Stack to run evaluations for benchmark and application use cases. Checkout our Colab notebook on working examples with evaluations [here](https://colab.research.google.com/drive/10CHyykee9j2OigaIcRv47BKG9mrNm0tJ?usp=sharing).
|
||||
|
||||
#### 1.1 Running MMMU
|
||||
- We will use a pre-processed MMMU dataset from [llamastack/mmmu](https://huggingface.co/datasets/llamastack/mmmu). The preprocessing code is shown in in this [Github Gist](https://gist.github.com/yanxi0830/118e9c560227d27132a7fd10e2c92840). The dataset is obtained by transforming the original [MMMU/MMMU](https://huggingface.co/datasets/MMMU/MMMU) dataset into correct format by `inference/chat-completion` API.
|
||||
|
||||
## Application Evaluation
|
||||
|
||||
[](https://colab.research.google.com/github/meta-llama/llama-stack/blob/main/docs/getting_started.ipynb)
|
||||
|
||||
Llama Stack offers a library of scoring functions and the `/scoring` API, allowing you to run evaluations on your pre-annotated AI application datasets.
|
||||
|
||||
In this example, we will show you how to:
|
||||
1. Build an Agent with Llama Stack
|
||||
2. Query the agent's sessions, turns, and steps
|
||||
3. Evaluate the results.
|
||||
|
||||
##### Building a Search Agent
|
||||
```python
|
||||
import datasets
|
||||
from llama_stack_client.lib.agents.agent import Agent
|
||||
from llama_stack_client.lib.agents.event_logger import EventLogger
|
||||
|
||||
ds = datasets.load_dataset(path="llamastack/mmmu", name="Agriculture", split="dev")
|
||||
ds = ds.select_columns(["chat_completion_input", "input_query", "expected_answer"])
|
||||
eval_rows = ds.to_pandas().to_dict(orient="records")
|
||||
```
|
||||
|
||||
- Next, we will run evaluation on an model candidate, we will need to:
|
||||
- Define a system prompt
|
||||
- Define an EvalCandidate
|
||||
- Run evaluate on the dataset
|
||||
|
||||
```python
|
||||
SYSTEM_PROMPT_TEMPLATE = """
|
||||
You are an expert in Agriculture whose job is to answer questions from the user using images.
|
||||
First, reason about the correct answer.
|
||||
Then write the answer in the following format where X is exactly one of A,B,C,D:
|
||||
Answer: X
|
||||
Make sure X is one of A,B,C,D.
|
||||
If you are uncertain of the correct answer, guess the most likely one.
|
||||
"""
|
||||
|
||||
system_message = {
|
||||
"role": "system",
|
||||
"content": SYSTEM_PROMPT_TEMPLATE,
|
||||
}
|
||||
|
||||
client.benchmarks.register(
|
||||
benchmark_id="meta-reference::mmmu",
|
||||
dataset_id=f"mmmu-{subset}-{split}",
|
||||
scoring_functions=["basic::regex_parser_multiple_choice_answer"],
|
||||
agent = Agent(
|
||||
client,
|
||||
model="meta-llama/Llama-3.3-70B-Instruct",
|
||||
instructions="You are a helpful assistant. Use search tool to answer the questions. ",
|
||||
tools=["builtin::websearch"],
|
||||
)
|
||||
user_prompts = [
|
||||
"Which teams played in the NBA western conference finals of 2024. Search the web for the answer.",
|
||||
"In which episode and season of South Park does Bill Cosby (BSM-471) first appear? Give me the number and title. Search the web for the answer.",
|
||||
"What is the British-American kickboxer Andrew Tate's kickboxing name? Search the web for the answer.",
|
||||
]
|
||||
|
||||
response = client.eval.evaluate_rows(
|
||||
benchmark_id="meta-reference::mmmu",
|
||||
input_rows=eval_rows,
|
||||
scoring_functions=["basic::regex_parser_multiple_choice_answer"],
|
||||
task_config={
|
||||
"type": "benchmark",
|
||||
"eval_candidate": {
|
||||
"type": "model",
|
||||
"model": "meta-llama/Llama-3.2-90B-Vision-Instruct",
|
||||
"sampling_params": {
|
||||
"strategy": {
|
||||
"type": "greedy",
|
||||
},
|
||||
"max_tokens": 4096,
|
||||
"repeat_penalty": 1.0,
|
||||
},
|
||||
"system_message": system_message,
|
||||
},
|
||||
},
|
||||
)
|
||||
```
|
||||
session_id = agent.create_session("test-session")
|
||||
|
||||
#### 1.2. Running SimpleQA
|
||||
- We will use a pre-processed SimpleQA dataset from [llamastack/evals](https://huggingface.co/datasets/llamastack/evals/viewer/evals__simpleqa) which is obtained by transforming the input query into correct format accepted by `inference/chat-completion` API.
|
||||
- Since we will be using this same dataset in our next example for Agentic evaluation, we will register it using the `/datasets` API, and interact with it through `/datasetio` API.
|
||||
for prompt in user_prompts:
|
||||
response = agent.create_turn(
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": prompt,
|
||||
}
|
||||
],
|
||||
session_id=session_id,
|
||||
)
|
||||
|
||||
```python
|
||||
simpleqa_dataset_id = "huggingface::simpleqa"
|
||||
|
||||
_ = client.datasets.register(
|
||||
dataset_id=simpleqa_dataset_id,
|
||||
provider_id="huggingface",
|
||||
url={"uri": "https://huggingface.co/datasets/llamastack/evals"},
|
||||
metadata={
|
||||
"path": "llamastack/evals",
|
||||
"name": "evals__simpleqa",
|
||||
"split": "train",
|
||||
},
|
||||
dataset_schema={
|
||||
"input_query": {"type": "string"},
|
||||
"expected_answer": {"type": "string"},
|
||||
"chat_completion_input": {"type": "chat_completion_input"},
|
||||
},
|
||||
)
|
||||
|
||||
eval_rows = client.datasetio.get_rows_paginated(
|
||||
dataset_id=simpleqa_dataset_id,
|
||||
rows_in_page=5,
|
||||
)
|
||||
```
|
||||
|
||||
```python
|
||||
client.benchmarks.register(
|
||||
benchmark_id="meta-reference::simpleqa",
|
||||
dataset_id=simpleqa_dataset_id,
|
||||
scoring_functions=["llm-as-judge::405b-simpleqa"],
|
||||
)
|
||||
|
||||
response = client.eval.evaluate_rows(
|
||||
benchmark_id="meta-reference::simpleqa",
|
||||
input_rows=eval_rows.rows,
|
||||
scoring_functions=["llm-as-judge::405b-simpleqa"],
|
||||
task_config={
|
||||
"type": "benchmark",
|
||||
"eval_candidate": {
|
||||
"type": "model",
|
||||
"model": "meta-llama/Llama-3.2-90B-Vision-Instruct",
|
||||
"sampling_params": {
|
||||
"strategy": {
|
||||
"type": "greedy",
|
||||
},
|
||||
"max_tokens": 4096,
|
||||
"repeat_penalty": 1.0,
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
for log in EventLogger().log(response):
|
||||
log.print()
|
||||
```
|
||||
|
||||
|
||||
### 2. Agentic Evaluation
|
||||
- In this example, we will demonstrate how to evaluate a agent candidate served by Llama Stack via `/agent` API.
|
||||
- We will continue to use the SimpleQA dataset we used in previous example.
|
||||
- Instead of running evaluation on model, we will run the evaluation on a Search Agent with access to search tool. We will define our agent evaluation candidate through `AgentConfig`.
|
||||
##### Query Agent Execution Steps
|
||||
|
||||
Now, let's look deeper into the agent's execution steps and see if how well our agent performs.
|
||||
```python
|
||||
# query the agents session
|
||||
from rich.pretty import pprint
|
||||
|
||||
session_response = client.agents.session.retrieve(
|
||||
session_id=session_id,
|
||||
agent_id=agent.agent_id,
|
||||
)
|
||||
|
||||
pprint(session_response)
|
||||
```
|
||||
|
||||
As a sanity check, we will first check if all user prompts is followed by a tool call to `brave_search`.
|
||||
```python
|
||||
num_tool_call = 0
|
||||
for turn in session_response.turns:
|
||||
for step in turn.steps:
|
||||
if (
|
||||
step.step_type == "tool_execution"
|
||||
and step.tool_calls[0].tool_name == "brave_search"
|
||||
):
|
||||
num_tool_call += 1
|
||||
|
||||
print(
|
||||
f"{num_tool_call}/{len(session_response.turns)} user prompts are followed by a tool call to `brave_search`"
|
||||
)
|
||||
```
|
||||
|
||||
##### Evaluate Agent Responses
|
||||
Now, we want to evaluate the agent's responses to the user prompts.
|
||||
|
||||
1. First, we will process the agent's execution history into a list of rows that can be used for evaluation.
|
||||
2. Next, we will label the rows with the expected answer.
|
||||
3. Finally, we will use the `/scoring` API to score the agent's responses.
|
||||
|
||||
```python
|
||||
agent_config = {
|
||||
"model": "meta-llama/Llama-3.1-405B-Instruct",
|
||||
"instructions": "You are a helpful assistant",
|
||||
"sampling_params": {
|
||||
"strategy": {
|
||||
"type": "greedy",
|
||||
},
|
||||
},
|
||||
"tools": [
|
||||
eval_rows = []
|
||||
|
||||
expected_answers = [
|
||||
"Dallas Mavericks and the Minnesota Timberwolves",
|
||||
"Season 4, Episode 12",
|
||||
"King Cobra",
|
||||
]
|
||||
|
||||
for i, turn in enumerate(session_response.turns):
|
||||
eval_rows.append(
|
||||
{
|
||||
"type": "brave_search",
|
||||
"engine": "tavily",
|
||||
"api_key": userdata.get("TAVILY_SEARCH_API_KEY"),
|
||||
"input_query": turn.input_messages[0].content,
|
||||
"generated_answer": turn.output_message.content,
|
||||
"expected_answer": expected_answers[i],
|
||||
}
|
||||
],
|
||||
"tool_choice": "auto",
|
||||
"tool_prompt_format": "json",
|
||||
"input_shields": [],
|
||||
"output_shields": [],
|
||||
"enable_session_persistence": False,
|
||||
}
|
||||
)
|
||||
|
||||
response = client.eval.evaluate_rows(
|
||||
benchmark_id="meta-reference::simpleqa",
|
||||
input_rows=eval_rows.rows,
|
||||
scoring_functions=["llm-as-judge::405b-simpleqa"],
|
||||
task_config={
|
||||
"type": "benchmark",
|
||||
"eval_candidate": {
|
||||
"type": "agent",
|
||||
"config": agent_config,
|
||||
},
|
||||
},
|
||||
pprint(eval_rows)
|
||||
|
||||
scoring_params = {
|
||||
"basic::subset_of": None,
|
||||
}
|
||||
scoring_response = client.scoring.score(
|
||||
input_rows=eval_rows, scoring_functions=scoring_params
|
||||
)
|
||||
pprint(scoring_response)
|
||||
```
|
||||
|
|
|
|||
|
|
@ -1,30 +0,0 @@
|
|||
## Testing & Evaluation
|
||||
|
||||
Llama Stack provides built-in tools for evaluating your applications:
|
||||
|
||||
1. **Benchmarking**: Test against standard datasets
|
||||
2. **Application Evaluation**: Score your application's outputs
|
||||
3. **Custom Metrics**: Define your own evaluation criteria
|
||||
|
||||
Here's how to set up basic evaluation:
|
||||
|
||||
```python
|
||||
# Create an evaluation task
|
||||
response = client.benchmarks.register(
|
||||
benchmark_id="my_eval",
|
||||
dataset_id="my_dataset",
|
||||
scoring_functions=["accuracy", "relevance"],
|
||||
)
|
||||
|
||||
# Run evaluation
|
||||
job = client.eval.run_eval(
|
||||
benchmark_id="my_eval",
|
||||
task_config={
|
||||
"type": "app",
|
||||
"eval_candidate": {"type": "agent", "config": agent_config},
|
||||
},
|
||||
)
|
||||
|
||||
# Get results
|
||||
result = client.eval.job_result(benchmark_id="my_eval", job_id=job.job_id)
|
||||
```
|
||||
|
|
@ -8,22 +8,24 @@ The best way to get started is to look at this notebook which walks through the
|
|||
|
||||
Here are some key topics that will help you build effective agents:
|
||||
|
||||
- **[Agent Execution Loop](agent_execution_loop)**
|
||||
- **[RAG](rag)**
|
||||
- **[Safety](safety)**
|
||||
- **[Tools](tools)**
|
||||
- **[Telemetry](telemetry)**
|
||||
- **[Evals](evals)**
|
||||
|
||||
- **[Agent](agent)**: Understand the components and design patterns of the Llama Stack agent framework.
|
||||
- **[Agent Execution Loop](agent_execution_loop)**: Understand how agents process information, make decisions, and execute actions in a continuous loop.
|
||||
- **[RAG (Retrieval-Augmented Generation)](rag)**: Learn how to enhance your agents with external knowledge through retrieval mechanisms.
|
||||
- **[Tools](tools)**: Extend your agents' capabilities by integrating with external tools and APIs.
|
||||
- **[Evals](evals)**: Evaluate your agents' effectiveness and identify areas for improvement.
|
||||
- **[Telemetry](telemetry)**: Monitor and analyze your agents' performance and behavior.
|
||||
- **[Safety](safety)**: Implement guardrails and safety measures to ensure responsible AI behavior.
|
||||
|
||||
```{toctree}
|
||||
:hidden:
|
||||
:maxdepth: 1
|
||||
|
||||
agent
|
||||
agent_execution_loop
|
||||
rag
|
||||
safety
|
||||
tools
|
||||
telemetry
|
||||
evals
|
||||
advanced_agent_patterns
|
||||
safety
|
||||
```
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
## Using "Memory" or Retrieval Augmented Generation (RAG)
|
||||
## Using Retrieval Augmented Generation (RAG)
|
||||
|
||||
Memory enables your applications to reference and recall information from previous interactions or external documents.
|
||||
RAG enables your applications to reference and recall information from previous interactions or external documents.
|
||||
|
||||
Llama Stack organizes the memory APIs into three layers:
|
||||
Llama Stack organizes the APIs that enable RAG into three layers:
|
||||
- the lowermost APIs deal with raw storage and retrieval. These include Vector IO, KeyValue IO (coming soon) and Relational IO (also coming soon.)
|
||||
- next is the "Rag Tool", a first-class tool as part of the Tools API that allows you to ingest documents (from URLs, files, etc) with various chunking strategies and query them smartly.
|
||||
- finally, it all comes together with the top-level "Agents" API that allows you to create agents that can use the tools to answer questions, perform tasks, and more.
|
||||
|
|
@ -20,6 +20,11 @@ We may add more storage types like Graph IO in the future.
|
|||
Here's how to set up a vector database for RAG:
|
||||
|
||||
```python
|
||||
# Create http client
|
||||
from llama_stack_client import LlamaStackClient
|
||||
|
||||
client = LlamaStackClient(base_url=f"http://localhost:{os.environ['LLAMA_STACK_PORT']}")
|
||||
|
||||
# Register a vector db
|
||||
vector_db_id = "my_documents"
|
||||
response = client.vector_dbs.register(
|
||||
|
|
@ -81,15 +86,14 @@ results = client.tool_runtime.rag_tool.query(
|
|||
One of the most powerful patterns is combining agents with RAG capabilities. Here's a complete example:
|
||||
|
||||
```python
|
||||
from llama_stack_client.types.agent_create_params import AgentConfig
|
||||
from llama_stack_client.lib.agents.agent import Agent
|
||||
|
||||
# Configure agent with memory
|
||||
agent_config = AgentConfig(
|
||||
model="meta-llama/Llama-3.2-3B-Instruct",
|
||||
# Create agent with memory
|
||||
agent = Agent(
|
||||
client,
|
||||
model="meta-llama/Llama-3.3-70B-Instruct",
|
||||
instructions="You are a helpful assistant",
|
||||
enable_session_persistence=False,
|
||||
toolgroups=[
|
||||
tools=[
|
||||
{
|
||||
"name": "builtin::rag/knowledge_search",
|
||||
"args": {
|
||||
|
|
@ -98,10 +102,21 @@ agent_config = AgentConfig(
|
|||
}
|
||||
],
|
||||
)
|
||||
|
||||
agent = Agent(client, agent_config)
|
||||
session_id = agent.create_session("rag_session")
|
||||
|
||||
|
||||
# Ask questions about documents in the vector db, and the agent will query the db to answer the question.
|
||||
response = agent.create_turn(
|
||||
messages=[{"role": "user", "content": "How to optimize memory in PyTorch?"}],
|
||||
session_id=session_id,
|
||||
)
|
||||
```
|
||||
|
||||
> **NOTE:** the `instructions` field in the `AgentConfig` can be used to guide the agent's behavior. It is important to experiment with different instructions to see what works best for your use case.
|
||||
|
||||
|
||||
You can also pass documents along with the user's message and ask questions about them.
|
||||
```python
|
||||
# Initial document ingestion
|
||||
response = agent.create_turn(
|
||||
messages=[
|
||||
|
|
@ -109,7 +124,7 @@ response = agent.create_turn(
|
|||
],
|
||||
documents=[
|
||||
{
|
||||
"content": "https://raw.githubusercontent.com/example/doc.rst",
|
||||
"content": "https://raw.githubusercontent.com/pytorch/torchtune/main/docs/source/tutorials/memory_optimizations.rst",
|
||||
"mime_type": "text/plain",
|
||||
}
|
||||
],
|
||||
|
|
@ -123,6 +138,14 @@ response = agent.create_turn(
|
|||
)
|
||||
```
|
||||
|
||||
You can print the response with below.
|
||||
```python
|
||||
from llama_stack_client.lib.agents.event_logger import EventLogger
|
||||
|
||||
for log in EventLogger().log(response):
|
||||
log.print()
|
||||
```
|
||||
|
||||
### Unregistering Vector DBs
|
||||
|
||||
If you need to clean up and unregister vector databases, you can do so as follows:
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ An example of this would be a "db_access" tool group that contains tools for int
|
|||
|
||||
Tools are treated as any other resource in llama stack like models. You can register them, have providers for them etc.
|
||||
|
||||
When instatiating an agent, you can provide it a list of tool groups that it has access to. Agent gets the corresponding tool definitions for the specified tool groups and passes them along to the model.
|
||||
When instantiating an agent, you can provide it a list of tool groups that it has access to. Agent gets the corresponding tool definitions for the specified tool groups and passes them along to the model.
|
||||
|
||||
Refer to the [Building AI Applications](https://github.com/meta-llama/llama-stack/blob/main/docs/getting_started.ipynb) notebook for more examples on how to use tools.
|
||||
|
||||
|
|
@ -60,7 +60,7 @@ Features:
|
|||
- Disabled dangerous system operations
|
||||
- Configurable execution timeouts
|
||||
|
||||
> ⚠️ Important: The code interpreter tool can operate in a controlled enviroment locally or on Podman containers. To ensure proper functionality in containerised environments:
|
||||
> ⚠️ Important: The code interpreter tool can operate in a controlled environment locally or on Podman containers. To ensure proper functionality in containerized environments:
|
||||
> - The container requires privileged access (e.g., --privileged).
|
||||
> - Users without sufficient permissions may encounter permission errors. (`bwrap: Can't mount devpts on /newroot/dev/pts: Permission denied`)
|
||||
> - 🔒 Security Warning: Privileged mode grants elevated access and bypasses security restrictions. Use only in local, isolated, or controlled environments.
|
||||
|
|
@ -83,15 +83,15 @@ result = client.tool_runtime.invoke_tool(
|
|||
)
|
||||
```
|
||||
|
||||
#### Memory
|
||||
#### RAG
|
||||
|
||||
The Memory tool enables retrieval of context from various types of memory banks (vector, key-value, keyword, and graph).
|
||||
The RAG tool enables retrieval of context from various types of memory banks (vector, key-value, keyword, and graph).
|
||||
|
||||
```python
|
||||
# Register Memory tool group
|
||||
client.toolgroups.register(
|
||||
toolgroup_id="builtin::memory",
|
||||
provider_id="memory",
|
||||
toolgroup_id="builtin::rag",
|
||||
provider_id="faiss",
|
||||
args={"max_chunks": 5, "max_tokens_in_context": 4096},
|
||||
)
|
||||
```
|
||||
|
|
@ -102,7 +102,7 @@ Features:
|
|||
- Context retrieval with token limits
|
||||
|
||||
|
||||
> **Note:** By default, llama stack run.yaml defines toolgroups for web search, code interpreter and memory, that are provided by tavily-search, code-interpreter and memory providers.
|
||||
> **Note:** By default, llama stack run.yaml defines toolgroups for web search, code interpreter and rag, that are provided by tavily-search, code-interpreter and rag providers.
|
||||
|
||||
## Model Context Protocol (MCP) Tools
|
||||
|
||||
|
|
@ -125,50 +125,35 @@ MCP tools require:
|
|||
- Tools are discovered dynamically from the endpoint
|
||||
|
||||
|
||||
## Tools provided by the client
|
||||
## Adding Custom Tools
|
||||
|
||||
These tools are registered along with the agent config and are specific to the agent for which they are registered. The main difference between these tools and the tools provided by the built-in providers is that the execution of these tools is handled by the client and the agent transfers the tool call to the client and waits for the result from the client.
|
||||
When you want to use tools other than the built-in tools, you can implement a python function and decorate it with `@client_tool`.
|
||||
|
||||
To define a custom tool, you need to use the `@client_tool` decorator.
|
||||
```python
|
||||
from llama_stack_client.lib.agents.client_tool import client_tool
|
||||
|
||||
|
||||
# Example tool definition
|
||||
@client_tool
|
||||
def my_tool(input: int) -> int:
|
||||
"""
|
||||
Runs my awesome tool.
|
||||
|
||||
:param input: some int parameter
|
||||
"""
|
||||
return input * 2
|
||||
```
|
||||
> **NOTE:** We employ python docstrings to describe the tool and the parameters. It is important to document the tool and the parameters so that the model can use the tool correctly. It is recommended to experiment with different docstrings to see how they affect the model's behavior.
|
||||
|
||||
Once defined, simply pass the tool to the agent config. `Agent` will take care of the rest (calling the model with the tool definition, executing the tool, and returning the result to the model for the next iteration).
|
||||
```python
|
||||
# Example agent config with client provided tools
|
||||
config = AgentConfig(
|
||||
toolgroups=[
|
||||
"builtin::websearch",
|
||||
],
|
||||
client_tools=[ToolDef(name="client_tool", description="Client provided tool")],
|
||||
)
|
||||
agent = Agent(client, ..., tools=[my_tool])
|
||||
```
|
||||
|
||||
Refer to [llama-stack-apps](https://github.com/meta-llama/llama-stack-apps/blob/main/examples/agents/e2e_loop_with_client_tools.py) for an example of how to use client provided tools.
|
||||
|
||||
## Tool Structure
|
||||
|
||||
Each tool has the following components:
|
||||
|
||||
- `name`: Unique identifier for the tool
|
||||
- `description`: Human-readable description of the tool's functionality
|
||||
- `parameters`: List of parameters the tool accepts
|
||||
- `name`: Parameter name
|
||||
- `parameter_type`: Data type (string, number, etc.)
|
||||
- `description`: Parameter description
|
||||
- `required`: Whether the parameter is required (default: true)
|
||||
- `default`: Default value if any
|
||||
|
||||
Example tool definition:
|
||||
```python
|
||||
{
|
||||
"name": "web_search",
|
||||
"description": "Search the web for information",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "query",
|
||||
"parameter_type": "string",
|
||||
"description": "The query to search for",
|
||||
"required": True,
|
||||
}
|
||||
],
|
||||
}
|
||||
```
|
||||
|
||||
## Tool Invocation
|
||||
|
||||
|
|
@ -201,10 +186,10 @@ group_tools = client.tools.list_tools(toolgroup_id="search_tools")
|
|||
|
||||
```python
|
||||
from llama_stack_client.lib.agents.agent import Agent
|
||||
from llama_stack_client.types.agent_create_params import AgentConfig
|
||||
|
||||
# Configure the AI agent with necessary parameters
|
||||
agent_config = AgentConfig(
|
||||
# Instantiate the AI agent with the given configuration
|
||||
agent = Agent(
|
||||
client,
|
||||
name="code-interpreter",
|
||||
description="A code interpreter agent for executing Python code snippets",
|
||||
instructions="""
|
||||
|
|
@ -212,14 +197,10 @@ agent_config = AgentConfig(
|
|||
Always show the generated code, never generate your own code, and never anticipate results.
|
||||
""",
|
||||
model="meta-llama/Llama-3.2-3B-Instruct",
|
||||
toolgroups=["builtin::code_interpreter"],
|
||||
tools=["builtin::code_interpreter"],
|
||||
max_infer_iters=5,
|
||||
enable_session_persistence=False,
|
||||
)
|
||||
|
||||
# Instantiate the AI agent with the given configuration
|
||||
agent = Agent(client, agent_config)
|
||||
|
||||
# Start a session
|
||||
session_id = agent.create_session("tool_session")
|
||||
|
||||
|
|
|
|||
|
|
@ -24,17 +24,8 @@ The Evaluation APIs are associated with a set of Resources as shown in the follo
|
|||
- Associated with `Benchmark` resource.
|
||||
|
||||
|
||||
Use the following decision tree to decide how to use LlamaStack Evaluation flow.
|
||||

|
||||
|
||||
|
||||
```{admonition} Note on Benchmark v.s. Application Evaluation
|
||||
:class: tip
|
||||
- **Benchmark Evaluation** is a well-defined eval-task consisting of `dataset` and `scoring_function`. The generation (inference or agent) will be done as part of evaluation.
|
||||
- **Application Evaluation** assumes users already have app inputs & generated outputs. Evaluation will purely focus on scoring the generated outputs via scoring functions (e.g. LLM-as-judge).
|
||||
```
|
||||
|
||||
## What's Next?
|
||||
|
||||
- Check out our Colab notebook on working examples with evaluations [here](https://colab.research.google.com/drive/10CHyykee9j2OigaIcRv47BKG9mrNm0tJ?usp=sharing).
|
||||
- Check out our Colab notebook on working examples with running benchmark evaluations [here](https://colab.research.google.com/github/meta-llama/llama-stack/blob/main/docs/notebooks/Llama_Stack_Benchmark_Evals.ipynb#scrollTo=mxLCsP4MvFqP).
|
||||
- Check out our [Building Applications - Evaluation](../building_applications/evals.md) guide for more details on how to use the Evaluation APIs to evaluate your applications.
|
||||
- Check out our [Evaluation Reference](../references/evals_reference/index.md) for more details on the APIs.
|
||||
|
|
|
|||
|
|
@ -1,5 +1,13 @@
|
|||
# Core Concepts
|
||||
|
||||
|
||||
```{toctree}
|
||||
:maxdepth: 1
|
||||
:hidden:
|
||||
|
||||
evaluation_concepts
|
||||
```
|
||||
|
||||
Given Llama Stack's service-oriented philosophy, a few concepts and workflows arise which may not feel completely natural in the LLM landscape, especially if you are coming with a background in other frameworks.
|
||||
|
||||
|
||||
|
|
@ -26,7 +34,7 @@ We are working on adding a few more APIs to complete the application lifecycle.
|
|||
|
||||
The goal of Llama Stack is to build an ecosystem where users can easily swap out different implementations for the same API. Examples for these include:
|
||||
- LLM inference providers (e.g., Fireworks, Together, AWS Bedrock, Groq, Cerebras, SambaNova, vLLM, etc.),
|
||||
- Vector databases (e.g., ChromaDB, Weaviate, Qdrant, FAISS, PGVector, etc.),
|
||||
- Vector databases (e.g., ChromaDB, Weaviate, Qdrant, Milvus, FAISS, PGVector, etc.),
|
||||
- Safety providers (e.g., Meta's Llama Guard, AWS Bedrock Guardrails, etc.)
|
||||
|
||||
Providers come in two flavors:
|
||||
|
|
|
|||
|
|
@ -13,13 +13,19 @@
|
|||
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
|
||||
|
||||
from docutils import nodes
|
||||
import tomli # Import tomli for TOML parsing
|
||||
from pathlib import Path
|
||||
import requests
|
||||
import json
|
||||
|
||||
# Read version from pyproject.toml
|
||||
with Path(__file__).parent.parent.parent.joinpath("pyproject.toml").open("rb") as f:
|
||||
pyproject = tomli.load(f)
|
||||
llama_stack_version = pyproject["project"]["version"]
|
||||
pypi_url = "https://pypi.org/pypi/llama-stack/json"
|
||||
version_tag = json.loads(requests.get(pypi_url).text)["info"]["version"]
|
||||
print(f"{version_tag=}")
|
||||
|
||||
# generate the full link including text and url here
|
||||
llama_stack_version_url = f"https://github.com/meta-llama/llama-stack/releases/tag/v{version_tag}"
|
||||
llama_stack_version_link = f"<a href='{llama_stack_version_url}'>release notes</a>"
|
||||
|
||||
project = "llama-stack"
|
||||
copyright = "2025, Meta"
|
||||
|
|
@ -73,7 +79,8 @@ myst_enable_extensions = [
|
|||
|
||||
myst_substitutions = {
|
||||
"docker_hub": "https://hub.docker.com/repository/docker/llamastack",
|
||||
"llama_stack_version": llama_stack_version,
|
||||
"llama_stack_version": version_tag,
|
||||
"llama_stack_version_link": llama_stack_version_link,
|
||||
}
|
||||
|
||||
suppress_warnings = ['myst.header']
|
||||
|
|
|
|||
|
|
@ -17,25 +17,31 @@ Here are some example PRs to help you get started:
|
|||
|
||||
## Testing the Provider
|
||||
|
||||
Before running tests, you must have required dependencies installed. This depends on the providers or distributions you are testing. For example, if you are testing the `together` distribution, you should install dependencies via `llama stack build --template together`.
|
||||
|
||||
### 1. Integration Testing
|
||||
- Create integration tests that use real provider instances and configurations
|
||||
- For remote services, test actual API interactions
|
||||
- Avoid mocking at the provider level since adapter layers tend to be thin
|
||||
- Reference examples in {repopath}`tests/client-sdk`
|
||||
|
||||
### 2. Unit Testing (Optional)
|
||||
- Add unit tests for provider-specific functionality
|
||||
- See examples in {repopath}`llama_stack/providers/tests/inference/test_text_inference.py`
|
||||
Integration tests are located in {repopath}`tests/integration`. These tests use the python client-SDK APIs (from the `llama_stack_client` package) to test functionality. Since these tests use client APIs, they can be run either by pointing to an instance of the Llama Stack server or "inline" by using `LlamaStackAsLibraryClient`.
|
||||
|
||||
Consult {repopath}`tests/integration/README.md` for more details on how to run the tests.
|
||||
|
||||
Note that each provider's `sample_run_config()` method (in the configuration class for that provider)
|
||||
typically references some environment variables for specifying API keys and the like. You can set these in the environment or pass these via the `--env` flag to the test command.
|
||||
|
||||
|
||||
### 2. Unit Testing
|
||||
|
||||
Unit tests are located in {repopath}`tests/unit`. Provider-specific unit tests are located in {repopath}`tests/unit/providers`. These tests are all run automatically as part of the CI process.
|
||||
|
||||
|
||||
### 3. Additional end-to-end testing
|
||||
|
||||
### 3. End-to-End Testing
|
||||
1. Start a Llama Stack server with your new provider
|
||||
2. Test using client requests
|
||||
3. Verify compatibility with existing client scripts in the [llama-stack-apps](https://github.com/meta-llama/llama-stack-apps/tree/main) repository
|
||||
4. Document which scripts are compatible with your provider
|
||||
2. Verify compatibility with existing client scripts in the [llama-stack-apps](https://github.com/meta-llama/llama-stack-apps/tree/main) repository
|
||||
3. Document which scripts are compatible with your provider
|
||||
|
||||
## Submitting Your PR
|
||||
|
||||
1. Ensure all tests pass
|
||||
2. Include a comprehensive test plan in your PR summary
|
||||
3. Document any known limitations or considerations
|
||||
4. Submit your pull request for review
|
||||
|
|
|
|||
|
|
@ -4,6 +4,35 @@
|
|||
This guide will walk you through the steps to get started with building a Llama Stack distribution from scratch with your choice of API providers.
|
||||
|
||||
|
||||
### Setting your log level
|
||||
|
||||
In order to specify the proper logging level users can apply the following environment variable `LLAMA_STACK_LOGGING` with the following format:
|
||||
|
||||
`LLAMA_STACK_LOGGING=server=debug;core=info`
|
||||
|
||||
Where each category in the following list:
|
||||
|
||||
- all
|
||||
- core
|
||||
- server
|
||||
- router
|
||||
- inference
|
||||
- agents
|
||||
- safety
|
||||
- eval
|
||||
- tools
|
||||
- client
|
||||
|
||||
Can be set to any of the following log levels:
|
||||
|
||||
- debug
|
||||
- info
|
||||
- warning
|
||||
- error
|
||||
- critical
|
||||
|
||||
The default global log level is `info`. `all` sets the log level for all components.
|
||||
|
||||
### Llama Stack Build
|
||||
|
||||
In order to build your own distribution, we recommend you clone the `llama-stack` repository.
|
||||
|
|
@ -30,7 +59,7 @@ Build a Llama stack container
|
|||
|
||||
options:
|
||||
-h, --help show this help message and exit
|
||||
--config CONFIG Path to a config file to use for the build. You can find example configs in llama_stack/distribution/**/build.yaml.
|
||||
--config CONFIG Path to a config file to use for the build. You can find example configs in llama_stack/distributions/**/build.yaml.
|
||||
If this argument is not provided, you will be prompted to enter information interactively
|
||||
--template TEMPLATE Name of the example template config to use for build. You may use `llama stack build --list-templates` to check out the available templates
|
||||
--list-templates Show the available templates for building a Llama Stack distribution
|
||||
|
|
@ -106,7 +135,7 @@ It would be best to start with a template and understand the structure of the co
|
|||
llama stack build
|
||||
|
||||
> Enter a name for your Llama Stack (e.g. my-local-stack): my-stack
|
||||
> Enter the image type you want your Llama Stack to be built as (container or conda): conda
|
||||
> Enter the image type you want your Llama Stack to be built as (container or conda or venv): conda
|
||||
|
||||
Llama Stack is composed of several APIs working together. Let's select
|
||||
the provider types (implementations) you want to use for these APIs.
|
||||
|
|
@ -187,14 +216,14 @@ usage: llama stack run [-h] [--port PORT] [--image-name IMAGE_NAME] [--disable-i
|
|||
[--tls-certfile TLS_CERTFILE] [--image-type {conda,container,venv}]
|
||||
config
|
||||
|
||||
start the server for a Llama Stack Distribution. You should have already built (or downloaded) and configured the distribution.
|
||||
Start the server for a Llama Stack Distribution. You should have already built (or downloaded) and configured the distribution.
|
||||
|
||||
positional arguments:
|
||||
config Path to config file to use for the run
|
||||
|
||||
options:
|
||||
-h, --help show this help message and exit
|
||||
--port PORT Port to run the server on. Defaults to 8321
|
||||
--port PORT Port to run the server on. It can also be passed via the env var LLAMA_STACK_PORT. Defaults to 8321
|
||||
--image-name IMAGE_NAME
|
||||
Name of the image to run. Defaults to the current conda environment
|
||||
--disable-ipv6 Disable IPv6 support
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ The `llamastack/distribution-fireworks` distribution consists of the following p
|
|||
| safety | `inline::llama-guard` |
|
||||
| scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` |
|
||||
| telemetry | `inline::meta-reference` |
|
||||
| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime`, `remote::model-context-protocol` |
|
||||
| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `remote::wolfram-alpha`, `inline::code-interpreter`, `inline::rag-runtime`, `remote::model-context-protocol` |
|
||||
| vector_io | `inline::faiss`, `remote::chromadb`, `remote::pgvector` |
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -42,12 +42,31 @@ The following environment variables can be configured:
|
|||
|
||||
## Prerequisite: Downloading Models
|
||||
|
||||
Please make sure you have llama model checkpoints downloaded in `~/.llama` before proceeding. See [installation guide](https://llama-stack.readthedocs.io/en/latest/references/llama_cli_reference/download_models.html) here to download the models. Run `llama model list` to see the available models to download, and `llama model download` to download the checkpoints.
|
||||
Please use `llama model list --downloaded` to check that you have llama model checkpoints downloaded in `~/.llama` before proceeding. See [installation guide](https://llama-stack.readthedocs.io/en/latest/references/llama_cli_reference/download_models.html) here to download the models. Run `llama model list` to see the available models to download, and `llama model download` to download the checkpoints.
|
||||
|
||||
```
|
||||
$ ls ~/.llama/checkpoints
|
||||
Llama3.1-8B Llama3.2-11B-Vision-Instruct Llama3.2-1B-Instruct Llama3.2-90B-Vision-Instruct Llama-Guard-3-8B
|
||||
Llama3.1-8B-Instruct Llama3.2-1B Llama3.2-3B-Instruct Llama-Guard-3-1B Prompt-Guard-86M
|
||||
$ llama model list --downloaded
|
||||
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━┓
|
||||
┃ Model ┃ Size ┃ Modified Time ┃
|
||||
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━┩
|
||||
│ Llama3.2-1B-Instruct:int4-qlora-eo8 │ 1.53 GB │ 2025-02-26 11:22:28 │
|
||||
├─────────────────────────────────────────┼──────────┼─────────────────────┤
|
||||
│ Llama3.2-1B │ 2.31 GB │ 2025-02-18 21:48:52 │
|
||||
├─────────────────────────────────────────┼──────────┼─────────────────────┤
|
||||
│ Prompt-Guard-86M │ 0.02 GB │ 2025-02-26 11:29:28 │
|
||||
├─────────────────────────────────────────┼──────────┼─────────────────────┤
|
||||
│ Llama3.2-3B-Instruct:int4-spinquant-eo8 │ 3.69 GB │ 2025-02-26 11:37:41 │
|
||||
├─────────────────────────────────────────┼──────────┼─────────────────────┤
|
||||
│ Llama3.2-3B │ 5.99 GB │ 2025-02-18 21:51:26 │
|
||||
├─────────────────────────────────────────┼──────────┼─────────────────────┤
|
||||
│ Llama3.1-8B │ 14.97 GB │ 2025-02-16 10:36:37 │
|
||||
├─────────────────────────────────────────┼──────────┼─────────────────────┤
|
||||
│ Llama3.2-1B-Instruct:int4-spinquant-eo8 │ 1.51 GB │ 2025-02-26 11:35:02 │
|
||||
├─────────────────────────────────────────┼──────────┼─────────────────────┤
|
||||
│ Llama-Guard-3-1B │ 2.80 GB │ 2025-02-26 11:20:46 │
|
||||
├─────────────────────────────────────────┼──────────┼─────────────────────┤
|
||||
│ Llama-Guard-3-1B:int4 │ 0.43 GB │ 2025-02-26 11:33:33 │
|
||||
└─────────────────────────────────────────┴──────────┴─────────────────────┘
|
||||
```
|
||||
|
||||
## Running the Distribution
|
||||
|
|
|
|||
|
|
@ -42,12 +42,31 @@ The following environment variables can be configured:
|
|||
|
||||
## Prerequisite: Downloading Models
|
||||
|
||||
Please make sure you have llama model checkpoints downloaded in `~/.llama` before proceeding. See [installation guide](https://llama-stack.readthedocs.io/en/latest/references/llama_cli_reference/download_models.html) here to download the models. Run `llama model list` to see the available models to download, and `llama model download` to download the checkpoints.
|
||||
Please use `llama model list --downloaded` to check that you have llama model checkpoints downloaded in `~/.llama` before proceeding. See [installation guide](https://llama-stack.readthedocs.io/en/latest/references/llama_cli_reference/download_models.html) here to download the models. Run `llama model list` to see the available models to download, and `llama model download` to download the checkpoints.
|
||||
|
||||
```
|
||||
$ ls ~/.llama/checkpoints
|
||||
Llama3.1-8B Llama3.2-11B-Vision-Instruct Llama3.2-1B-Instruct Llama3.2-90B-Vision-Instruct Llama-Guard-3-8B
|
||||
Llama3.1-8B-Instruct Llama3.2-1B Llama3.2-3B-Instruct Llama-Guard-3-1B Prompt-Guard-86M
|
||||
$ llama model list --downloaded
|
||||
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━┓
|
||||
┃ Model ┃ Size ┃ Modified Time ┃
|
||||
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━┩
|
||||
│ Llama3.2-1B-Instruct:int4-qlora-eo8 │ 1.53 GB │ 2025-02-26 11:22:28 │
|
||||
├─────────────────────────────────────────┼──────────┼─────────────────────┤
|
||||
│ Llama3.2-1B │ 2.31 GB │ 2025-02-18 21:48:52 │
|
||||
├─────────────────────────────────────────┼──────────┼─────────────────────┤
|
||||
│ Prompt-Guard-86M │ 0.02 GB │ 2025-02-26 11:29:28 │
|
||||
├─────────────────────────────────────────┼──────────┼─────────────────────┤
|
||||
│ Llama3.2-3B-Instruct:int4-spinquant-eo8 │ 3.69 GB │ 2025-02-26 11:37:41 │
|
||||
├─────────────────────────────────────────┼──────────┼─────────────────────┤
|
||||
│ Llama3.2-3B │ 5.99 GB │ 2025-02-18 21:51:26 │
|
||||
├─────────────────────────────────────────┼──────────┼─────────────────────┤
|
||||
│ Llama3.1-8B │ 14.97 GB │ 2025-02-16 10:36:37 │
|
||||
├─────────────────────────────────────────┼──────────┼─────────────────────┤
|
||||
│ Llama3.2-1B-Instruct:int4-spinquant-eo8 │ 1.51 GB │ 2025-02-26 11:35:02 │
|
||||
├─────────────────────────────────────────┼──────────┼─────────────────────┤
|
||||
│ Llama-Guard-3-1B │ 2.80 GB │ 2025-02-26 11:20:46 │
|
||||
├─────────────────────────────────────────┼──────────┼─────────────────────┤
|
||||
│ Llama-Guard-3-1B:int4 │ 0.43 GB │ 2025-02-26 11:33:33 │
|
||||
└─────────────────────────────────────────┴──────────┴─────────────────────┘
|
||||
```
|
||||
|
||||
## Running the Distribution
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ The `llamastack/distribution-ollama` distribution consists of the following prov
|
|||
| safety | `inline::llama-guard` |
|
||||
| scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` |
|
||||
| telemetry | `inline::meta-reference` |
|
||||
| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime`, `remote::model-context-protocol` |
|
||||
| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime`, `remote::model-context-protocol`, `remote::wolfram-alpha` |
|
||||
| vector_io | `inline::sqlite-vec`, `remote::chromadb`, `remote::pgvector` |
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ The `llamastack/distribution-remote-vllm` distribution consists of the following
|
|||
| safety | `inline::llama-guard` |
|
||||
| scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` |
|
||||
| telemetry | `inline::meta-reference` |
|
||||
| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime`, `remote::model-context-protocol` |
|
||||
| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime`, `remote::model-context-protocol`, `remote::wolfram-alpha` |
|
||||
| vector_io | `inline::faiss`, `remote::chromadb`, `remote::pgvector` |
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ The following environment variables can be configured:
|
|||
|
||||
- `LLAMA_STACK_PORT`: Port for the Llama Stack distribution server (default: `5001`)
|
||||
- `INFERENCE_MODEL`: Inference model loaded into the TGI server (default: `meta-llama/Llama-3.2-3B-Instruct`)
|
||||
- `TGI_URL`: URL of the TGI server with the main inference model (default: `http://127.0.0.1:8080}/v1`)
|
||||
- `TGI_URL`: URL of the TGI server with the main inference model (default: `http://127.0.0.1:8080/v1`)
|
||||
- `TGI_SAFETY_URL`: URL of the TGI server with the safety model (default: `http://127.0.0.1:8081/v1`)
|
||||
- `SAFETY_MODEL`: Name of the safety (Llama-Guard) model to use (default: `meta-llama/Llama-Guard-3-1B`)
|
||||
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ The `llamastack/distribution-together` distribution consists of the following pr
|
|||
| safety | `inline::llama-guard` |
|
||||
| scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` |
|
||||
| telemetry | `inline::meta-reference` |
|
||||
| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime`, `remote::model-context-protocol` |
|
||||
| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::code-interpreter`, `inline::rag-runtime`, `remote::model-context-protocol`, `remote::wolfram-alpha` |
|
||||
| vector_io | `inline::faiss`, `remote::chromadb`, `remote::pgvector` |
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ The API is **exactly identical** for both clients.
|
|||
:::{dropdown} Starting up the Llama Stack server
|
||||
The Llama Stack server can be configured flexibly so you can mix-and-match various providers for its individual API components -- beyond Inference, these include Vector IO, Agents, Telemetry, Evals, Post Training, etc.
|
||||
|
||||
To get started quickly, we provide various container images for the server component that work with different inference providers out of the box. For this guide, we will use `llamastack/distribution-ollama` as the container image.
|
||||
To get started quickly, we provide various container images for the server component that work with different inference providers out of the box. For this guide, we will use `llamastack/distribution-ollama` as the container image. If you'd like to build your own image or customize the configurations, please check out [this guide](../references/index.md).
|
||||
|
||||
Lets setup some environment variables that we will use in the rest of the guide.
|
||||
```bash
|
||||
|
|
@ -184,7 +184,6 @@ from termcolor import cprint
|
|||
|
||||
from llama_stack_client.lib.agents.agent import Agent
|
||||
from llama_stack_client.lib.agents.event_logger import EventLogger
|
||||
from llama_stack_client.types.agent_create_params import AgentConfig
|
||||
from llama_stack_client.types import Document
|
||||
|
||||
|
||||
|
|
@ -241,13 +240,14 @@ client.tool_runtime.rag_tool.insert(
|
|||
chunk_size_in_tokens=512,
|
||||
)
|
||||
|
||||
agent_config = AgentConfig(
|
||||
rag_agent = Agent(
|
||||
client,
|
||||
model=os.environ["INFERENCE_MODEL"],
|
||||
# Define instructions for the agent ( aka system prompt)
|
||||
instructions="You are a helpful assistant",
|
||||
enable_session_persistence=False,
|
||||
# Define tools available to the agent
|
||||
toolgroups=[
|
||||
tools=[
|
||||
{
|
||||
"name": "builtin::rag/knowledge_search",
|
||||
"args": {
|
||||
|
|
@ -256,12 +256,10 @@ agent_config = AgentConfig(
|
|||
}
|
||||
],
|
||||
)
|
||||
|
||||
rag_agent = Agent(client, agent_config)
|
||||
session_id = rag_agent.create_session("test-session")
|
||||
|
||||
user_prompts = [
|
||||
"What are the top 5 topics that were explained? Only list succinct bullet points.",
|
||||
"How to optimize memory usage in torchtune? use the knowledge_search tool to get information.",
|
||||
]
|
||||
|
||||
# Run the agent loop by calling the `create_turn` method
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
```{admonition} News
|
||||
:class: tip
|
||||
|
||||
Llama Stack {{ llama_stack_version }} is now available! See the [release notes](https://github.com/meta-llama/llama-stack/releases/tag/v{{ llama_stack_version }}) for more details.
|
||||
Llama Stack {{ llama_stack_version }} is now available! See the {{ llama_stack_version_link }} for more details.
|
||||
```
|
||||
|
||||
# Llama Stack
|
||||
|
|
@ -68,6 +68,7 @@ A number of "adapters" are available for some popular Inference and Vector Store
|
|||
| FAISS | Single Node |
|
||||
| SQLite-Vec| Single Node |
|
||||
| Chroma | Hosted and Single Node |
|
||||
| Milvus | Hosted and Single Node |
|
||||
| Postgres (PGVector) | Hosted and Single Node |
|
||||
| Weaviate | Hosted |
|
||||
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
The goal of Llama Stack is to build an ecosystem where users can easily swap out different implementations for the same API. Examples for these include:
|
||||
- LLM inference providers (e.g., Fireworks, Together, AWS Bedrock, Groq, Cerebras, SambaNova, vLLM, etc.),
|
||||
- Vector databases (e.g., ChromaDB, Weaviate, Qdrant, FAISS, PGVector, etc.),
|
||||
- Vector databases (e.g., ChromaDB, Weaviate, Qdrant, Milvus, FAISS, PGVector, etc.),
|
||||
- Safety providers (e.g., Meta's Llama Guard, AWS Bedrock Guardrails, etc.)
|
||||
|
||||
Providers come in two flavors:
|
||||
|
|
@ -36,7 +36,7 @@ Evaluates the outputs of the system.
|
|||
Collects telemetry data from the system.
|
||||
|
||||
## Tool Runtime
|
||||
Is associated with the ToolGroup resouces.
|
||||
Is associated with the ToolGroup resouces.
|
||||
|
||||
## Vector IO
|
||||
|
||||
|
|
@ -55,5 +55,6 @@ vector_io/sqlite-vec
|
|||
vector_io/chromadb
|
||||
vector_io/pgvector
|
||||
vector_io/qdrant
|
||||
vector_io/milvus
|
||||
vector_io/weaviate
|
||||
```
|
||||
|
|
|
|||
|
|
@ -1,10 +1,10 @@
|
|||
---
|
||||
orphan: true
|
||||
---
|
||||
# Chroma
|
||||
# Chroma
|
||||
|
||||
[Chroma](https://www.trychroma.com/) is an inline and remote vector
|
||||
database provider for Llama Stack. It allows you to store and query vectors directly within a Chroma database.
|
||||
[Chroma](https://www.trychroma.com/) is an inline and remote vector
|
||||
database provider for Llama Stack. It allows you to store and query vectors directly within a Chroma database.
|
||||
That means you're not limited to storing vectors in memory or in a separate service.
|
||||
|
||||
## Features
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ orphan: true
|
|||
---
|
||||
# Faiss
|
||||
|
||||
[Faiss](https://github.com/facebookresearch/faiss) is an inline vector database provider for Llama Stack. It
|
||||
[Faiss](https://github.com/facebookresearch/faiss) is an inline vector database provider for Llama Stack. It
|
||||
allows you to store and query vectors directly in memory.
|
||||
That means you'll get fast and efficient vector retrieval.
|
||||
|
||||
|
|
@ -29,5 +29,5 @@ You can install Faiss using pip:
|
|||
pip install faiss-cpu
|
||||
```
|
||||
## Documentation
|
||||
See [Faiss' documentation](https://faiss.ai/) or the [Faiss Wiki](https://github.com/facebookresearch/faiss/wiki) for
|
||||
See [Faiss' documentation](https://faiss.ai/) or the [Faiss Wiki](https://github.com/facebookresearch/faiss/wiki) for
|
||||
more details about Faiss in general.
|
||||
|
|
|
|||
31
docs/source/providers/vector_io/mivus.md
Normal file
31
docs/source/providers/vector_io/mivus.md
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
---
|
||||
orphan: true
|
||||
---
|
||||
# Milvus
|
||||
|
||||
[Milvus](https://milvus.io/) is an inline and remote vector database provider for Llama Stack. It
|
||||
allows you to store and query vectors directly within a Milvus database.
|
||||
That means you're not limited to storing vectors in memory or in a separate service.
|
||||
|
||||
## Features
|
||||
|
||||
- Easy to use
|
||||
- Fully integrated with Llama Stack
|
||||
|
||||
## Usage
|
||||
|
||||
To use Milvus in your Llama Stack project, follow these steps:
|
||||
|
||||
1. Install the necessary dependencies.
|
||||
2. Configure your Llama Stack project to use Milvus.
|
||||
3. Start storing and querying vectors.
|
||||
|
||||
## Installation
|
||||
|
||||
You can install Milvus using pymilvus:
|
||||
|
||||
```bash
|
||||
pip install pymilvus
|
||||
```
|
||||
## Documentation
|
||||
See the [Milvus documentation](https://milvus.io/docs/install-overview.md) for more details about Milvus in general.
|
||||
|
|
@ -3,7 +3,7 @@ orphan: true
|
|||
---
|
||||
# Postgres PGVector
|
||||
|
||||
[PGVector](https://github.com/pgvector/pgvector) is a remote vector database provider for Llama Stack. It
|
||||
[PGVector](https://github.com/pgvector/pgvector) is a remote vector database provider for Llama Stack. It
|
||||
allows you to store and query vectors directly in memory.
|
||||
That means you'll get fast and efficient vector retrieval.
|
||||
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ orphan: true
|
|||
---
|
||||
# Qdrant
|
||||
|
||||
[Qdrant](https://qdrant.tech/documentation/) is a remote vector database provider for Llama Stack. It
|
||||
[Qdrant](https://qdrant.tech/documentation/) is a remote vector database provider for Llama Stack. It
|
||||
allows you to store and query vectors directly in memory.
|
||||
That means you'll get fast and efficient vector retrieval.
|
||||
|
||||
|
|
|
|||
|
|
@ -3,8 +3,8 @@ orphan: true
|
|||
---
|
||||
# SQLite-Vec
|
||||
|
||||
[SQLite-Vec](https://github.com/asg017/sqlite-vec) is an inline vector database provider for Llama Stack. It
|
||||
allows you to store and query vectors directly within an SQLite database.
|
||||
[SQLite-Vec](https://github.com/asg017/sqlite-vec) is an inline vector database provider for Llama Stack. It
|
||||
allows you to store and query vectors directly within an SQLite database.
|
||||
That means you're not limited to storing vectors in memory or in a separate service.
|
||||
|
||||
## Features
|
||||
|
|
|
|||
|
|
@ -1,10 +1,10 @@
|
|||
---
|
||||
orphan: true
|
||||
---
|
||||
# Weaviate
|
||||
# Weaviate
|
||||
|
||||
[Weaviate](https://weaviate.io/) is a vector database provider for Llama Stack.
|
||||
It allows you to store and query vectors directly within a Weaviate database.
|
||||
[Weaviate](https://weaviate.io/) is a vector database provider for Llama Stack.
|
||||
It allows you to store and query vectors directly within a Weaviate database.
|
||||
That means you're not limited to storing vectors in memory or in a separate service.
|
||||
|
||||
## Features
|
||||
|
|
@ -27,7 +27,7 @@ To use Weaviate in your Llama Stack project, follow these steps:
|
|||
|
||||
## Installation
|
||||
|
||||
To install Weaviate see the [Weaviate quickstart documentation](https://weaviate.io/developers/weaviate/quickstart).
|
||||
To install Weaviate see the [Weaviate quickstart documentation](https://weaviate.io/developers/weaviate/quickstart).
|
||||
|
||||
## Documentation
|
||||
See [Weaviate's documentation](https://weaviate.io/developers/weaviate) for more details about Weaviate in general.
|
||||
|
|
|
|||
|
|
@ -24,19 +24,9 @@ The Evaluation APIs are associated with a set of Resources as shown in the follo
|
|||
- Associated with `Benchmark` resource.
|
||||
|
||||
|
||||
Use the following decision tree to decide how to use LlamaStack Evaluation flow.
|
||||

|
||||
|
||||
|
||||
```{admonition} Note on Benchmark v.s. Application Evaluation
|
||||
:class: tip
|
||||
- **Benchmark Evaluation** is a well-defined eval-task consisting of `dataset` and `scoring_function`. The generation (inference or agent) will be done as part of evaluation.
|
||||
- **Application Evaluation** assumes users already have app inputs & generated outputs. Evaluation will purely focus on scoring the generated outputs via scoring functions (e.g. LLM-as-judge).
|
||||
```
|
||||
|
||||
## Evaluation Examples Walkthrough
|
||||
|
||||
[](https://colab.research.google.com/drive/10CHyykee9j2OigaIcRv47BKG9mrNm0tJ?usp=sharing)
|
||||
[](https://colab.research.google.com/github/meta-llama/llama-stack/blob/main/docs/notebooks/Llama_Stack_Benchmark_Evals.ipynb)
|
||||
|
||||
It is best to open this notebook in Colab to follow along with the examples.
|
||||
|
||||
|
|
@ -63,20 +53,29 @@ eval_rows = ds.to_pandas().to_dict(orient="records")
|
|||
- Run evaluate on the dataset
|
||||
|
||||
```python
|
||||
from rich.pretty import pprint
|
||||
from tqdm import tqdm
|
||||
|
||||
SYSTEM_PROMPT_TEMPLATE = """
|
||||
You are an expert in Agriculture whose job is to answer questions from the user using images.
|
||||
You are an expert in {subject} whose job is to answer questions from the user using images.
|
||||
|
||||
First, reason about the correct answer.
|
||||
|
||||
Then write the answer in the following format where X is exactly one of A,B,C,D:
|
||||
|
||||
Answer: X
|
||||
|
||||
Make sure X is one of A,B,C,D.
|
||||
|
||||
If you are uncertain of the correct answer, guess the most likely one.
|
||||
"""
|
||||
|
||||
system_message = {
|
||||
"role": "system",
|
||||
"content": SYSTEM_PROMPT_TEMPLATE,
|
||||
"content": SYSTEM_PROMPT_TEMPLATE.format(subject=subset),
|
||||
}
|
||||
|
||||
# register the evaluation benchmark task with the dataset and scoring function
|
||||
client.benchmarks.register(
|
||||
benchmark_id="meta-reference::mmmu",
|
||||
dataset_id=f"mmmu-{subset}-{split}",
|
||||
|
|
@ -87,14 +86,15 @@ response = client.eval.evaluate_rows(
|
|||
benchmark_id="meta-reference::mmmu",
|
||||
input_rows=eval_rows,
|
||||
scoring_functions=["basic::regex_parser_multiple_choice_answer"],
|
||||
task_config={
|
||||
"type": "benchmark",
|
||||
benchmark_config={
|
||||
"eval_candidate": {
|
||||
"type": "model",
|
||||
"model": "meta-llama/Llama-3.2-90B-Vision-Instruct",
|
||||
"sampling_params": {
|
||||
"strategy": {
|
||||
"type": "greedy",
|
||||
"type": "top_p",
|
||||
"temperature": 1.0,
|
||||
"top_p": 0.95,
|
||||
},
|
||||
"max_tokens": 4096,
|
||||
"repeat_penalty": 1.0,
|
||||
|
|
@ -103,6 +103,7 @@ response = client.eval.evaluate_rows(
|
|||
},
|
||||
},
|
||||
)
|
||||
pprint(response)
|
||||
```
|
||||
|
||||
#### 1.2. Running SimpleQA
|
||||
|
|
@ -115,10 +116,9 @@ simpleqa_dataset_id = "huggingface::simpleqa"
|
|||
_ = client.datasets.register(
|
||||
dataset_id=simpleqa_dataset_id,
|
||||
provider_id="huggingface",
|
||||
url={"uri": "https://huggingface.co/datasets/llamastack/evals"},
|
||||
url={"uri": "https://huggingface.co/datasets/llamastack/simpleqa"},
|
||||
metadata={
|
||||
"path": "llamastack/evals",
|
||||
"name": "evals__simpleqa",
|
||||
"path": "llamastack/simpleqa",
|
||||
"split": "train",
|
||||
},
|
||||
dataset_schema={
|
||||
|
|
@ -145,8 +145,7 @@ response = client.eval.evaluate_rows(
|
|||
benchmark_id="meta-reference::simpleqa",
|
||||
input_rows=eval_rows.rows,
|
||||
scoring_functions=["llm-as-judge::405b-simpleqa"],
|
||||
task_config={
|
||||
"type": "benchmark",
|
||||
benchmark_config={
|
||||
"eval_candidate": {
|
||||
"type": "model",
|
||||
"model": "meta-llama/Llama-3.2-90B-Vision-Instruct",
|
||||
|
|
@ -160,6 +159,7 @@ response = client.eval.evaluate_rows(
|
|||
},
|
||||
},
|
||||
)
|
||||
pprint(response)
|
||||
```
|
||||
|
||||
|
||||
|
|
@ -170,19 +170,17 @@ response = client.eval.evaluate_rows(
|
|||
|
||||
```python
|
||||
agent_config = {
|
||||
"model": "meta-llama/Llama-3.1-405B-Instruct",
|
||||
"instructions": "You are a helpful assistant",
|
||||
"model": "meta-llama/Llama-3.3-70B-Instruct",
|
||||
"instructions": "You are a helpful assistant that have access to tool to search the web. ",
|
||||
"sampling_params": {
|
||||
"strategy": {
|
||||
"type": "greedy",
|
||||
},
|
||||
},
|
||||
"tools": [
|
||||
{
|
||||
"type": "brave_search",
|
||||
"engine": "tavily",
|
||||
"api_key": userdata.get("TAVILY_SEARCH_API_KEY"),
|
||||
"type": "top_p",
|
||||
"temperature": 0.5,
|
||||
"top_p": 0.9,
|
||||
}
|
||||
},
|
||||
"toolgroups": [
|
||||
"builtin::websearch",
|
||||
],
|
||||
"tool_choice": "auto",
|
||||
"tool_prompt_format": "json",
|
||||
|
|
@ -195,25 +193,22 @@ response = client.eval.evaluate_rows(
|
|||
benchmark_id="meta-reference::simpleqa",
|
||||
input_rows=eval_rows.rows,
|
||||
scoring_functions=["llm-as-judge::405b-simpleqa"],
|
||||
task_config={
|
||||
"type": "benchmark",
|
||||
benchmark_config={
|
||||
"eval_candidate": {
|
||||
"type": "agent",
|
||||
"config": agent_config,
|
||||
},
|
||||
},
|
||||
)
|
||||
pprint(response)
|
||||
```
|
||||
|
||||
### 3. Agentic Application Dataset Scoring
|
||||
- Llama Stack offers a library of scoring functions and the `/scoring` API, allowing you to run evaluations on your pre-annotated AI application datasets.
|
||||
[](https://colab.research.google.com/github/meta-llama/llama-stack/blob/main/docs/getting_started.ipynb)
|
||||
|
||||
- In this example, we will work with an example RAG dataset and couple of scoring functions for evaluation.
|
||||
- `llm-as-judge::base`: LLM-As-Judge with custom judge prompt & model.
|
||||
- `braintrust::factuality`: Factuality scorer from [braintrust](https://github.com/braintrustdata/autoevals).
|
||||
- `basic::subset_of`: Basic checking if generated answer is a subset of expected answer.
|
||||
Llama Stack offers a library of scoring functions and the `/scoring` API, allowing you to run evaluations on your pre-annotated AI application datasets.
|
||||
|
||||
- Please checkout our [Llama Stack Playground](https://llama-stack.readthedocs.io/en/latest/playground/index.html) for an interactive interface to upload datasets and run scorings.
|
||||
In this example, we will work with an example RAG dataset you have built previously, label with an annotation, and use LLM-As-Judge with custom judge prompt for scoring. Please checkout our [Llama Stack Playground](https://llama-stack.readthedocs.io/en/latest/playground/index.html) for an interactive interface to upload datasets and run scorings.
|
||||
|
||||
```python
|
||||
judge_model_id = "meta-llama/Llama-3.1-405B-Instruct-FP8"
|
||||
|
|
@ -317,28 +312,9 @@ The `BenchmarkConfig` are user specified config to define:
|
|||
2. Optionally scoring function params to allow customization of scoring function behaviour. This is useful to parameterize generic scoring functions such as LLMAsJudge with custom `judge_model` / `judge_prompt`.
|
||||
|
||||
|
||||
**Example Benchmark BenchmarkConfig**
|
||||
**Example BenchmarkConfig**
|
||||
```json
|
||||
{
|
||||
"type": "benchmark",
|
||||
"eval_candidate": {
|
||||
"type": "model",
|
||||
"model": "Llama3.2-3B-Instruct",
|
||||
"sampling_params": {
|
||||
"strategy": {
|
||||
"type": "greedy",
|
||||
},
|
||||
"max_tokens": 0,
|
||||
"repetition_penalty": 1.0
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Example Application BenchmarkConfig**
|
||||
```json
|
||||
{
|
||||
"type": "app",
|
||||
"eval_candidate": {
|
||||
"type": "model",
|
||||
"model": "Llama3.1-405B-Instruct",
|
||||
|
|
|
|||
|
|
@ -129,3 +129,35 @@ llama download --source huggingface --model-id Prompt-Guard-86M --ignore-pattern
|
|||
**Important:** Set your environment variable `HF_TOKEN` or pass in `--hf-token` to the command to validate your access. You can find your token at [https://huggingface.co/settings/tokens](https://huggingface.co/settings/tokens).
|
||||
|
||||
> **Tip:** Default for `llama download` is to run with `--ignore-patterns *.safetensors` since we use the `.pth` files in the `original` folder. For Llama Guard and Prompt Guard, however, we need safetensors. Hence, please run with `--ignore-patterns original` so that safetensors are downloaded and `.pth` files are ignored.
|
||||
|
||||
## List the downloaded models
|
||||
|
||||
To list the downloaded models with the following command:
|
||||
```
|
||||
llama model list --downloaded
|
||||
```
|
||||
|
||||
You should see a table like this:
|
||||
```
|
||||
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━┓
|
||||
┃ Model ┃ Size ┃ Modified Time ┃
|
||||
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━┩
|
||||
│ Llama3.2-1B-Instruct:int4-qlora-eo8 │ 1.53 GB │ 2025-02-26 11:22:28 │
|
||||
├─────────────────────────────────────────┼──────────┼─────────────────────┤
|
||||
│ Llama3.2-1B │ 2.31 GB │ 2025-02-18 21:48:52 │
|
||||
├─────────────────────────────────────────┼──────────┼─────────────────────┤
|
||||
│ Prompt-Guard-86M │ 0.02 GB │ 2025-02-26 11:29:28 │
|
||||
├─────────────────────────────────────────┼──────────┼─────────────────────┤
|
||||
│ Llama3.2-3B-Instruct:int4-spinquant-eo8 │ 3.69 GB │ 2025-02-26 11:37:41 │
|
||||
├─────────────────────────────────────────┼──────────┼─────────────────────┤
|
||||
│ Llama3.2-3B │ 5.99 GB │ 2025-02-18 21:51:26 │
|
||||
├─────────────────────────────────────────┼──────────┼─────────────────────┤
|
||||
│ Llama3.1-8B │ 14.97 GB │ 2025-02-16 10:36:37 │
|
||||
├─────────────────────────────────────────┼──────────┼─────────────────────┤
|
||||
│ Llama3.2-1B-Instruct:int4-spinquant-eo8 │ 1.51 GB │ 2025-02-26 11:35:02 │
|
||||
├─────────────────────────────────────────┼──────────┼─────────────────────┤
|
||||
│ Llama-Guard-3-1B │ 2.80 GB │ 2025-02-26 11:20:46 │
|
||||
├─────────────────────────────────────────┼──────────┼─────────────────────┤
|
||||
│ Llama-Guard-3-1B:int4 │ 0.43 GB │ 2025-02-26 11:33:33 │
|
||||
└─────────────────────────────────────────┴──────────┴─────────────────────┘
|
||||
```
|
||||
|
|
|
|||
|
|
@ -154,6 +154,38 @@ llama download --source huggingface --model-id Prompt-Guard-86M --ignore-pattern
|
|||
|
||||
> **Tip:** Default for `llama download` is to run with `--ignore-patterns *.safetensors` since we use the `.pth` files in the `original` folder. For Llama Guard and Prompt Guard, however, we need safetensors. Hence, please run with `--ignore-patterns original` so that safetensors are downloaded and `.pth` files are ignored.
|
||||
|
||||
## List the downloaded models
|
||||
|
||||
To list the downloaded models with the following command:
|
||||
```
|
||||
llama model list --downloaded
|
||||
```
|
||||
|
||||
You should see a table like this:
|
||||
```
|
||||
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━┓
|
||||
┃ Model ┃ Size ┃ Modified Time ┃
|
||||
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━┩
|
||||
│ Llama3.2-1B-Instruct:int4-qlora-eo8 │ 1.53 GB │ 2025-02-26 11:22:28 │
|
||||
├─────────────────────────────────────────┼──────────┼─────────────────────┤
|
||||
│ Llama3.2-1B │ 2.31 GB │ 2025-02-18 21:48:52 │
|
||||
├─────────────────────────────────────────┼──────────┼─────────────────────┤
|
||||
│ Prompt-Guard-86M │ 0.02 GB │ 2025-02-26 11:29:28 │
|
||||
├─────────────────────────────────────────┼──────────┼─────────────────────┤
|
||||
│ Llama3.2-3B-Instruct:int4-spinquant-eo8 │ 3.69 GB │ 2025-02-26 11:37:41 │
|
||||
├─────────────────────────────────────────┼──────────┼─────────────────────┤
|
||||
│ Llama3.2-3B │ 5.99 GB │ 2025-02-18 21:51:26 │
|
||||
├─────────────────────────────────────────┼──────────┼─────────────────────┤
|
||||
│ Llama3.1-8B │ 14.97 GB │ 2025-02-16 10:36:37 │
|
||||
├─────────────────────────────────────────┼──────────┼─────────────────────┤
|
||||
│ Llama3.2-1B-Instruct:int4-spinquant-eo8 │ 1.51 GB │ 2025-02-26 11:35:02 │
|
||||
├─────────────────────────────────────────┼──────────┼─────────────────────┤
|
||||
│ Llama-Guard-3-1B │ 2.80 GB │ 2025-02-26 11:20:46 │
|
||||
├─────────────────────────────────────────┼──────────┼─────────────────────┤
|
||||
│ Llama-Guard-3-1B:int4 │ 0.43 GB │ 2025-02-26 11:33:33 │
|
||||
└─────────────────────────────────────────┴──────────┴─────────────────────┘
|
||||
```
|
||||
|
||||
|
||||
## Understand the models
|
||||
The `llama model` command helps you explore the model’s interface.
|
||||
|
|
|
|||
|
|
@ -294,8 +294,9 @@
|
|||
" # Initialize custom tool (ensure `WebSearchTool` is defined earlier in the notebook)\n",
|
||||
" webSearchTool = WebSearchTool(api_key=BRAVE_SEARCH_API_KEY)\n",
|
||||
"\n",
|
||||
" # Define the agent configuration, including the model and tool setup\n",
|
||||
" agent_config = AgentConfig(\n",
|
||||
" # Create an agent instance with the client and configuration\n",
|
||||
" agent = Agent(\n",
|
||||
" client, \n",
|
||||
" model=MODEL_NAME,\n",
|
||||
" instructions=\"\"\"You are a helpful assistant that responds to user queries with relevant information and cites sources when available.\"\"\",\n",
|
||||
" sampling_params={\n",
|
||||
|
|
@ -303,17 +304,12 @@
|
|||
" \"type\": \"greedy\",\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" tools=[webSearchTool.get_tool_definition()],\n",
|
||||
" tool_choice=\"auto\",\n",
|
||||
" tool_prompt_format=\"python_list\",\n",
|
||||
" tools=[webSearchTool],\n",
|
||||
" input_shields=input_shields,\n",
|
||||
" output_shields=output_shields,\n",
|
||||
" enable_session_persistence=False,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" # Create an agent instance with the client and configuration\n",
|
||||
" agent = Agent(client, agent_config, [webSearchTool])\n",
|
||||
"\n",
|
||||
" # Create a session for interaction and print the session ID\n",
|
||||
" session_id = agent.create_session(\"test-session\")\n",
|
||||
" print(f\"Created session_id={session_id} for Agent({agent.agent_id})\")\n",
|
||||
|
|
|
|||
|
|
@ -110,12 +110,12 @@
|
|||
"from llama_stack_client import LlamaStackClient\n",
|
||||
"from llama_stack_client.lib.agents.agent import Agent\n",
|
||||
"from llama_stack_client.lib.agents.event_logger import EventLogger\n",
|
||||
"from llama_stack_client.types.agent_create_params import AgentConfig\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"async def agent_example():\n",
|
||||
" client = LlamaStackClient(base_url=f\"http://{HOST}:{PORT}\")\n",
|
||||
" agent_config = AgentConfig(\n",
|
||||
" agent = Agent(\n",
|
||||
" client, \n",
|
||||
" model=MODEL_NAME,\n",
|
||||
" instructions=\"You are a helpful assistant! If you call builtin tools like brave search, follow the syntax brave_search.call(…)\",\n",
|
||||
" sampling_params={\n",
|
||||
|
|
@ -130,14 +130,7 @@
|
|||
" \"api_key\": BRAVE_SEARCH_API_KEY,\n",
|
||||
" }\n",
|
||||
" ],\n",
|
||||
" tool_choice=\"auto\",\n",
|
||||
" tool_prompt_format=\"function_tag\",\n",
|
||||
" input_shields=[],\n",
|
||||
" output_shields=[],\n",
|
||||
" enable_session_persistence=False,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" agent = Agent(client, agent_config)\n",
|
||||
" session_id = agent.create_session(\"test-session\")\n",
|
||||
" print(f\"Created session_id={session_id} for Agent({agent.agent_id})\")\n",
|
||||
"\n",
|
||||
|
|
|
|||
|
|
@ -73,7 +73,7 @@ If you're looking for more specific topics, we have a [Zero to Hero Guide](#next
|
|||
Open a new terminal and install `llama-stack`:
|
||||
```bash
|
||||
conda activate ollama
|
||||
pip install llama-stack==0.1.0
|
||||
pip install -U llama-stack
|
||||
```
|
||||
|
||||
---
|
||||
|
|
|
|||
|
|
@ -103,7 +103,6 @@
|
|||
"from llama_stack_client.lib.agents.agent import Agent\n",
|
||||
"from llama_stack_client.lib.agents.event_logger import EventLogger\n",
|
||||
"from llama_stack_client.types.agent_create_params import (\n",
|
||||
" AgentConfig,\n",
|
||||
" AgentConfigToolSearchToolDefinition,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
|
|
@ -117,7 +116,8 @@
|
|||
") -> Agent:\n",
|
||||
" \"\"\"Create an agent with specified tools.\"\"\"\n",
|
||||
" print(\"Using the following model: \", model)\n",
|
||||
" agent_config = AgentConfig(\n",
|
||||
" return Agent(\n",
|
||||
" client, \n",
|
||||
" model=model,\n",
|
||||
" instructions=instructions,\n",
|
||||
" sampling_params={\n",
|
||||
|
|
@ -126,12 +126,7 @@
|
|||
" },\n",
|
||||
" },\n",
|
||||
" tools=tools,\n",
|
||||
" tool_choice=\"auto\",\n",
|
||||
" tool_prompt_format=\"json\",\n",
|
||||
" enable_session_persistence=True,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" return Agent(client, agent_config)\n"
|
||||
" )\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
|
@ -360,9 +355,9 @@
|
|||
" # Create the agent with the tool\n",
|
||||
" weather_tool = WeatherTool()\n",
|
||||
"\n",
|
||||
" agent_config = AgentConfig(\n",
|
||||
" agent = Agent(\n",
|
||||
" client=client, \n",
|
||||
" model=LLAMA31_8B_INSTRUCT,\n",
|
||||
" # model=model_name,\n",
|
||||
" instructions=\"\"\"\n",
|
||||
" You are a weather assistant that can provide weather information.\n",
|
||||
" Always specify the location clearly in your responses.\n",
|
||||
|
|
@ -373,16 +368,9 @@
|
|||
" \"type\": \"greedy\",\n",
|
||||
" },\n",
|
||||
" },\n",
|
||||
" tools=[weather_tool.get_tool_definition()],\n",
|
||||
" tool_choice=\"auto\",\n",
|
||||
" tool_prompt_format=\"json\",\n",
|
||||
" input_shields=[],\n",
|
||||
" output_shields=[],\n",
|
||||
" enable_session_persistence=True,\n",
|
||||
" tools=[weather_tool],\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" agent = Agent(client=client, agent_config=agent_config, custom_tools=[weather_tool])\n",
|
||||
"\n",
|
||||
" return agent\n",
|
||||
"\n",
|
||||
"\n",
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue