From 6519b917e545fde167d332bb768a8c3a00e86272 Mon Sep 17 00:00:00 2001 From: Xi Yan Date: Wed, 12 Mar 2025 23:23:32 -0700 Subject: [PATCH] new mocks --- .../recorded_responses/chat_completion.json | 6969 +++++++++++++---- .../recorded_responses/invoke_tool.json | 55 +- 2 files changed, 5276 insertions(+), 1748 deletions(-) diff --git a/tests/integration/fixtures/recorded_responses/chat_completion.json b/tests/integration/fixtures/recorded_responses/chat_completion.json index 8694cc271..80744576e 100644 --- a/tests/integration/fixtures/recorded_responses/chat_completion.json +++ b/tests/integration/fixtures/recorded_responses/chat_completion.json @@ -39542,7 +39542,7 @@ "data": { "event": { "delta": { - "text": " provided function definitions", + "text": " provided function definitions are", "type": "text" }, "event_type": { @@ -39562,7 +39562,7 @@ "data": { "event": { "delta": { - "text": " are not suitable", + "text": " not suitable for this task", "type": "text" }, "event_type": { @@ -39582,7 +39582,7 @@ "data": { "event": { "delta": { - "text": " for this task. Please re", + "text": ". Please rework them", "type": "text" }, "event_type": { @@ -39602,27 +39602,7 @@ "data": { "event": { "delta": { - "text": "work them to", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "__module__": "llama_stack.apis.inference.inference", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - } - }, - { - "__module__": "llama_stack.apis.inference.inference", - "__pydantic__": "ChatCompletionResponseStreamChunk", - "data": { - "event": { - "delta": { - "text": " align with the task requirements.", + "text": " to align with the task requirements.", "type": "text" }, "event_type": { @@ -39659,54 +39639,18 @@ }, "metrics": [ { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "prompt_tokens", - "span_id": "D2n_IS_8", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T02:03:32.021393+00:00", - "__module__": "datetime" - }, - "trace_id": "amAiZv5PQKSsA74j", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 90 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "completion_tokens", - "span_id": "D2n_IS_8", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T02:03:32.021420+00:00", - "__module__": "datetime" - }, - "trace_id": "amAiZv5PQKSsA74j", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 32 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "total_tokens", - "span_id": "D2n_IS_8", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T02:03:32.021427+00:00", - "__module__": "datetime" - }, - "trace_id": "amAiZv5PQKSsA74j", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 122 } ] @@ -40551,7 +40495,7 @@ "data": { "event": { "delta": { - "text": "get_boiling_point(liquid_name='polyjuice", + "text": "get_boiling_point(liquid_name='polyjuice', celcius", "type": "text" }, "event_type": { @@ -40571,7 +40515,7 @@ "data": { "event": { "delta": { - "text": "', celcius=True)]", + "text": "=True)]", "type": "text" }, "event_type": { @@ -40601,7 +40545,7 @@ "celcius": true, "liquid_name": "polyjuice" }, - "call_id": "3955f756-9aa0-433f-be8f-af8941c220de", + "call_id": "152ff1b5-7bcf-4d46-8c8f-e5c13f646925", "tool_name": "get_boiling_point" }, "type": "tool_call" @@ -40644,54 +40588,18 @@ }, "metrics": [ { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "prompt_tokens", - "span_id": "QZ6PSGpT", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T02:03:29.629456+00:00", - "__module__": "datetime" - }, - "trace_id": "M72bosg8TBe3uhx3", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 43 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "completion_tokens", - "span_id": "QZ6PSGpT", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T02:03:29.629488+00:00", - "__module__": "datetime" - }, - "trace_id": "M72bosg8TBe3uhx3", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 28 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "total_tokens", - "span_id": "QZ6PSGpT", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T02:03:29.629494+00:00", - "__module__": "datetime" - }, - "trace_id": "M72bosg8TBe3uhx3", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 71 } ] @@ -40748,7 +40656,7 @@ "data": { "event": { "delta": { - "text": " function call returned an", + "text": " function get_boiling_point was called with", "type": "text" }, "event_type": { @@ -40768,7 +40676,7 @@ "data": { "event": { "delta": { - "text": " error since", + "text": " the parameters liquid_name = \"polyjuice\" and", "type": "text" }, "event_type": { @@ -40788,7 +40696,7 @@ "data": { "event": { "delta": { - "text": " \"", + "text": " celcius = True. However, the function returned -", "type": "text" }, "event_type": { @@ -40808,7 +40716,7 @@ "data": { "event": { "delta": { - "text": "polyjuice\" is", + "text": "100, which is not a valid", "type": "text" }, "event_type": { @@ -40828,7 +40736,7 @@ "data": { "event": { "delta": { - "text": " not a real liquid. Polyju", + "text": " boiling point. This suggests that the function does not", "type": "text" }, "event_type": { @@ -40848,7 +40756,7 @@ "data": { "event": { "delta": { - "text": "ice is a fictional substance from the", + "text": " have the boiling point of \"poly", "type": "text" }, "event_type": { @@ -40868,107 +40776,7 @@ "data": { "event": { "delta": { - "text": " Harry Potter series. The boiling point", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "__module__": "llama_stack.apis.inference.inference", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - } - }, - { - "__module__": "llama_stack.apis.inference.inference", - "__pydantic__": "ChatCompletionResponseStreamChunk", - "data": { - "event": { - "delta": { - "text": " of a substance is a physical", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "__module__": "llama_stack.apis.inference.inference", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - } - }, - { - "__module__": "llama_stack.apis.inference.inference", - "__pydantic__": "ChatCompletionResponseStreamChunk", - "data": { - "event": { - "delta": { - "text": " property that can be measured and", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "__module__": "llama_stack.apis.inference.inference", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - } - }, - { - "__module__": "llama_stack.apis.inference.inference", - "__pydantic__": "ChatCompletionResponseStreamChunk", - "data": { - "event": { - "delta": { - "text": " quantified", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "__module__": "llama_stack.apis.inference.inference", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - } - }, - { - "__module__": "llama_stack.apis.inference.inference", - "__pydantic__": "ChatCompletionResponseStreamChunk", - "data": { - "event": { - "delta": { - "text": ", but it only applies", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "__module__": "llama_stack.apis.inference.inference", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - } - }, - { - "__module__": "llama_stack.apis.inference.inference", - "__pydantic__": "ChatCompletionResponseStreamChunk", - "data": { - "event": { - "delta": { - "text": " to real substances that exist in the physical world.", + "text": "juice\" in its database.", "type": "text" }, "event_type": { @@ -41005,54 +40813,18 @@ }, "metrics": [ { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "prompt_tokens", - "span_id": "y9SHtJTQ", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T02:05:01.411612+00:00", - "__module__": "datetime" - }, - "trace_id": "_I2Cu85IRtOSBSX9", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 84 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "completion_tokens", - "span_id": "y9SHtJTQ", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T02:05:01.411644+00:00", - "__module__": "datetime" - }, - "trace_id": "_I2Cu85IRtOSBSX9", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 73 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "total_tokens", - "span_id": "y9SHtJTQ", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T02:05:01.411650+00:00", - "__module__": "datetime" - }, - "trace_id": "_I2Cu85IRtOSBSX9", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 157 } ] @@ -41270,7 +41042,7 @@ "data": { "event": { "delta": { - "text": " function get_bo", + "text": " function call should have been", "type": "text" }, "event_type": { @@ -41290,7 +41062,7 @@ "data": { "event": { "delta": { - "text": "iling_point_with_metadata does not exist,", + "text": " \n[get_boiling_point_with", "type": "text" }, "event_type": { @@ -41310,7 +41082,7 @@ "data": { "event": { "delta": { - "text": " I will", + "text": "_metadata(liquid_name='polyjuice', celci", "type": "text" }, "event_type": { @@ -41330,7 +41102,7 @@ "data": { "event": { "delta": { - "text": " assume you", + "text": "us=True)] \nHowever since the", "type": "text" }, "event_type": { @@ -41350,7 +41122,7 @@ "data": { "event": { "delta": { - "text": " meant get_bo", + "text": " function get_boiling_point_with_metadata does not", "type": "text" }, "event_type": { @@ -41370,7 +41142,7 @@ "data": { "event": { "delta": { - "text": "iling_point_with_metadata", + "text": " exist in the ipython environment,", "type": "text" }, "event_type": { @@ -41390,7 +41162,7 @@ "data": { "event": { "delta": { - "text": ". The boiling point of polyjuice", + "text": " it will throw an", "type": "text" }, "event_type": { @@ -41410,7 +41182,367 @@ "data": { "event": { "delta": { - "text": " is -100.", + "text": " error. \n\nIn order to", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " get the correct answer, you would need to define the function", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " get_boiling_point_with_metadata first. \n\nHere is how you", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " can define it:\n\n```python\ndef get_boiling", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "_point_with_metadata(liquid_name, celcius=True):\n", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " # This is a mock implementation,", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " you would need a real", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " database of boiling points\n boiling_points", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " = {\n \"water\": 100,\n \"polyjuice\":", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " 120 #", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " This is fictional, polyjuice is not a", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " real liquid\n", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " }\n \n if liquid_name in boiling", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "_points:\n if celcius:\n return boiling_points[liquid_name", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "]\n else:\n return boiling_points[liquid_name] *", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " 9/5 + 32\n else:\n", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " return \"Boiling point not found\"\n```\n\nThen you", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " can call the function:\n\n```python\nprint(get_bo", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "iling_point_with_metadata(\"polyjuice\"))\n```", "type": "text" }, "event_type": { @@ -41447,55 +41579,19 @@ }, "metrics": [ { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "prompt_tokens", - "span_id": "8dM6i5mO", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T02:05:03.329281+00:00", - "__module__": "datetime" - }, - "trace_id": "zMJDP5dXRrChi7uE", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 86 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "completion_tokens", - "span_id": "8dM6i5mO", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T02:05:03.329312+00:00", - "__module__": "datetime" - }, - "trace_id": "zMJDP5dXRrChi7uE", - "type": "metric", - "unit": "tokens", - "value": 45 + "unit": null, + "value": 250 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "total_tokens", - "span_id": "8dM6i5mO", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T02:05:03.329318+00:00", - "__module__": "datetime" - }, - "trace_id": "zMJDP5dXRrChi7uE", - "type": "metric", - "unit": "tokens", - "value": 131 + "unit": null, + "value": 336 } ] } @@ -41732,7 +41828,7 @@ "data": { "event": { "delta": { - "text": "get_boiling_point(liquid_name='polyjuice", + "text": "get_boiling_point(liquid_name='polyjuice', cel", "type": "text" }, "event_type": { @@ -41752,7 +41848,7 @@ "data": { "event": { "delta": { - "text": "', celcius=True)]", + "text": "cius=True)]", "type": "text" }, "event_type": { @@ -41782,7 +41878,7 @@ "celcius": true, "liquid_name": "polyjuice" }, - "call_id": "328cb19d-47bb-47cc-8258-a5ca2e26803e", + "call_id": "2dece34e-68c3-43a9-b685-e229569135ab", "tool_name": "get_boiling_point" }, "type": "tool_call" @@ -41825,54 +41921,18 @@ }, "metrics": [ { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "prompt_tokens", - "span_id": "dS0bhfN_", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T02:04:53.324788+00:00", - "__module__": "datetime" - }, - "trace_id": "UJz5Cas1SDyQYeBk", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 37 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "completion_tokens", - "span_id": "dS0bhfN_", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T02:04:53.324835+00:00", - "__module__": "datetime" - }, - "trace_id": "UJz5Cas1SDyQYeBk", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 28 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "total_tokens", - "span_id": "dS0bhfN_", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T02:04:53.324844+00:00", - "__module__": "datetime" - }, - "trace_id": "UJz5Cas1SDyQYeBk", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 65 } ] @@ -41929,7 +41989,7 @@ "data": { "event": { "delta": { - "text": "get_boiling_point_with_metadata", + "text": "get_boiling_point_with_metadata(liquid_name='poly", "type": "text" }, "event_type": { @@ -41949,27 +42009,7 @@ "data": { "event": { "delta": { - "text": "(liquid_name='polyjuice', cel", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "__module__": "llama_stack.apis.inference.inference", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - } - }, - { - "__module__": "llama_stack.apis.inference.inference", - "__pydantic__": "ChatCompletionResponseStreamChunk", - "data": { - "event": { - "delta": { - "text": "cius=True)]", + "text": "juice', celcius=True)]", "type": "text" }, "event_type": { @@ -41999,7 +42039,7 @@ "celcius": true, "liquid_name": "polyjuice" }, - "call_id": "5bb48d00-7d5c-49e2-bddf-e5fdc5f35485", + "call_id": "24730d85-818c-4719-8a28-903160341849", "tool_name": "get_boiling_point_with_metadata" }, "type": "tool_call" @@ -42042,54 +42082,18 @@ }, "metrics": [ { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "prompt_tokens", - "span_id": "mfrFN7m2", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T02:05:02.136501+00:00", - "__module__": "datetime" - }, - "trace_id": "T4eddr4-SMWPQwKA", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 37 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "completion_tokens", - "span_id": "mfrFN7m2", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T02:05:02.136529+00:00", - "__module__": "datetime" - }, - "trace_id": "T4eddr4-SMWPQwKA", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 30 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "total_tokens", - "span_id": "mfrFN7m2", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T02:05:02.136535+00:00", - "__module__": "datetime" - }, - "trace_id": "T4eddr4-SMWPQwKA", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 67 } ] @@ -42166,7 +42170,7 @@ "data": { "event": { "delta": { - "text": " phone, the friendly", + "text": " phone, the friendly voice on the other end said", "type": "text" }, "event_type": { @@ -42186,7 +42190,7 @@ "data": { "event": { "delta": { - "text": " voice on the other end said \"hello\"", + "text": " \"hello\" and asked how I was doing", "type": "text" }, "event_type": { @@ -42206,7 +42210,7 @@ "data": { "event": { "delta": { - "text": " and asked how I was doing.", + "text": ".", "type": "text" }, "event_type": { @@ -42243,54 +42247,18 @@ }, "metrics": [ { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "prompt_tokens", - "span_id": "tJEuRhla", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T01:44:01.044284+00:00", - "__module__": "datetime" - }, - "trace_id": "bnDS7Z41TRO0UyfH", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 30 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "completion_tokens", - "span_id": "tJEuRhla", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T01:44:01.044312+00:00", - "__module__": "datetime" - }, - "trace_id": "bnDS7Z41TRO0UyfH", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 34 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "total_tokens", - "span_id": "tJEuRhla", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T01:44:01.044318+00:00", - "__module__": "datetime" - }, - "trace_id": "bnDS7Z41TRO0UyfH", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 64 } ] @@ -42548,13 +42516,8 @@ "data": { "event": { "delta": { - "parse_status": { - "__enum__": "ToolCallParseStatus", - "__module__": "llama_stack.apis.common.content_types", - "value": "started" - }, - "tool_call": "", - "type": "tool_call" + "text": "The", + "type": "text" }, "event_type": { "__enum__": "ChatCompletionResponseEventType", @@ -42573,13 +42536,8 @@ "data": { "event": { "delta": { - "parse_status": { - "__enum__": "ToolCallParseStatus", - "__module__": "llama_stack.apis.common.content_types", - "value": "in_progress" - }, - "tool_call": "import pandas as pd\n# Load data\ndf =", - "type": "tool_call" + "text": " error message indicates that the file 'bwrap' was not", + "type": "text" }, "event_type": { "__enum__": "ChatCompletionResponseEventType", @@ -42598,13 +42556,8 @@ "data": { "event": { "delta": { - "parse_status": { - "__enum__": "ToolCallParseStatus", - "__module__": "llama_stack.apis.common.content_types", - "value": "in_progress" - }, - "tool_call": " pd.read_csv(\"/var/folders/rb/qv8vwgyj", - "type": "tool_call" + "text": " found. This is likely because the file path provided", + "type": "text" }, "event_type": { "__enum__": "ChatCompletionResponseEventType", @@ -42623,13 +42576,8 @@ "data": { "event": { "delta": { - "parse_status": { - "__enum__": "ToolCallParseStatus", - "__module__": "llama_stack.apis.common.content_types", - "value": "in_progress" - }, - "tool_call": "6yjd3t4pwsy9t0rm0000", - "type": "tool_call" + "text": " is incorrect or the file does not exist in", + "type": "text" }, "event_type": { "__enum__": "ChatCompletionResponseEventType", @@ -42648,13 +42596,8 @@ "data": { "event": { "delta": { - "parse_status": { - "__enum__": "ToolCallParseStatus", - "__module__": "llama_stack.apis.common.content_types", - "value": "in_progress" - }, - "tool_call": "gn/T/tmp2x_sml66/ZEjbinQHin", - "type": "tool_call" + "text": " the current working directory.\n\nTo resolve this", + "type": "text" }, "event_type": { "__enum__": "ChatCompletionResponseEventType", @@ -42673,13 +42616,8 @@ "data": { "event": { "delta": { - "parse_status": { - "__enum__": "ToolCallParseStatus", - "__module__": "llama_stack.apis.common.content_types", - "value": "in_progress" - }, - "tool_call": "flation.csv\")\n# Rows\nprint(\"Number of rows and columns in the", - "type": "tool_call" + "text": " issue, you can try the following:\n\n1. Check", + "type": "text" }, "event_type": { "__enum__": "ChatCompletionResponseEventType", @@ -42698,13 +42636,8 @@ "data": { "event": { "delta": { - "parse_status": { - "__enum__": "ToolCallParseStatus", - "__module__": "llama_stack.apis.common.content_types", - "value": "in_progress" - }, - "tool_call": " data:\", df.shape)\n# Columns\nprint(\"Columns of the data are:\",", - "type": "tool_call" + "text": " the file path: Ensure that the file path provided is", + "type": "text" }, "event_type": { "__enum__": "ChatCompletionResponseEventType", @@ -42723,13 +42656,8 @@ "data": { "event": { "delta": { - "parse_status": { - "__enum__": "ToolCallParseStatus", - "__module__": "llama_stack.apis.common.content_types", - "value": "in_progress" - }, - "tool_call": " len(df.columns))\n# Column names\nprint(\"Columns of the data", - "type": "tool_call" + "text": " correct and the file exists in the specified location.\n2. Use the correct", + "type": "text" }, "event_type": { "__enum__": "ChatCompletionResponseEventType", @@ -42748,13 +42676,8 @@ "data": { "event": { "delta": { - "parse_status": { - "__enum__": "ToolCallParseStatus", - "__module__": "llama_stack.apis.common.content_types", - "value": "in_progress" - }, - "tool_call": " are:\", df.columns)\n# Column dtypes\nprint(\"Datatype of the columns are:\", df", - "type": "tool_call" + "text": " file path: If the file is located in a different directory,", + "type": "text" }, "event_type": { "__enum__": "ChatCompletionResponseEventType", @@ -42773,13 +42696,8 @@ "data": { "event": { "delta": { - "parse_status": { - "__enum__": "ToolCallParseStatus", - "__module__": "llama_stack.apis.common.content_types", - "value": "in_progress" - }, - "tool_call": ".dtypes)\n# Sample of data\nprint(\"Data sample from file:\")\n", - "type": "tool_call" + "text": " provide the correct file path.\n3. Check the file name: Ensure that", + "type": "text" }, "event_type": { "__enum__": "ChatCompletionResponseEventType", @@ -42798,13 +42716,8 @@ "data": { "event": { "delta": { - "parse_status": { - "__enum__": "ToolCallParseStatus", - "__module__": "llama_stack.apis.common.content_types", - "value": "in_progress" - }, - "tool_call": "print(df.head())", - "type": "tool_call" + "text": " the file name is correct and matches the one provided", + "type": "text" }, "event_type": { "__enum__": "ChatCompletionResponseEventType", @@ -42823,23 +42736,8 @@ "data": { "event": { "delta": { - "parse_status": { - "__enum__": "ToolCallParseStatus", - "__module__": "llama_stack.apis.common.content_types", - "value": "succeeded" - }, - "tool_call": { - "arguments": { - "code": "import pandas as pd\n# Load data\ndf = pd.read_csv(\"/var/folders/rb/qv8vwgyj6yjd3t4pwsy9t0rm0000gn/T/tmp2x_sml66/ZEjbinQHinflation.csv\")\n# Rows\nprint(\"Number of rows and columns in the data:\", df.shape)\n# Columns\nprint(\"Columns of the data are:\", len(df.columns))\n# Column names\nprint(\"Columns of the data are:\", df.columns)\n# Column dtypes\nprint(\"Datatype of the columns are:\", df.dtypes)\n# Sample of data\nprint(\"Data sample from file:\")\nprint(df.head())" - }, - "call_id": "1df8b196-9eff-4b06-97e7-ab175c741e8f", - "tool_name": { - "__enum__": "BuiltinTool", - "__module__": "llama_stack.models.llama.datatypes", - "value": "code_interpreter" - } - }, - "type": "tool_call" + "text": " in the code.\n4. Use the absolute", + "type": "text" }, "event_type": { "__enum__": "ChatCompletionResponseEventType", @@ -42847,11 +42745,127 @@ "value": "progress" }, "logprobs": null, - "stop_reason": { - "__enum__": "StopReason", - "__module__": "llama_stack.models.llama.datatypes", - "value": "end_of_turn" - } + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " file path: Instead of", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " using a relative file path, try using the absolute file path to", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " the file.\n\nIf you are", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " still encountering issues, please provide more details about", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " the file and its location, and I'll be happy to assist you further", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": ".", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null }, "metrics": null } @@ -42879,55 +42893,19 @@ }, "metrics": [ { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "prompt_tokens", - "span_id": "fLqIbpek", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T01:45:40.262304+00:00", - "__module__": "datetime" - }, - "trace_id": "StUjhrTMQKKQSRvS", - "type": "metric", - "unit": "tokens", - "value": 235 + "unit": null, + "value": 238 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "completion_tokens", - "span_id": "fLqIbpek", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T01:45:40.262340+00:00", - "__module__": "datetime" - }, - "trace_id": "StUjhrTMQKKQSRvS", - "type": "metric", - "unit": "tokens", - "value": 10 + "unit": null, + "value": 183 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "total_tokens", - "span_id": "fLqIbpek", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T01:45:40.262347+00:00", - "__module__": "datetime" - }, - "trace_id": "StUjhrTMQKKQSRvS", - "type": "metric", - "unit": "tokens", - "value": 245 + "unit": null, + "value": 421 } ] } @@ -42993,7 +42971,7 @@ "__module__": "llama_stack.apis.common.content_types", "value": "in_progress" }, - "tool_call": "import pandas as pd\n# Load data\ndf = pd", + "tool_call": "import pandas as pd\n# Load data\ndf = pd.read_csv(\"/var", "type": "tool_call" }, "event_type": { @@ -43018,7 +42996,7 @@ "__module__": "llama_stack.apis.common.content_types", "value": "in_progress" }, - "tool_call": ".read_csv(\"/var/folders/rb/qv8vwgyj6yjd3t4", + "tool_call": "/folders/rb/qvqvwgyj6yjd3", "type": "tool_call" }, "event_type": { @@ -43043,7 +43021,7 @@ "__module__": "llama_stack.apis.common.content_types", "value": "in_progress" }, - "tool_call": "pwsy9t0rm0000gn/T/tmp2x_sml66/ZEj", + "tool_call": "t4pwsy9t0rm0000gn/T/tmpcf_0q3u", "type": "tool_call" }, "event_type": { @@ -43068,7 +43046,7 @@ "__module__": "llama_stack.apis.common.content_types", "value": "in_progress" }, - "tool_call": "binQHinflation.csv\")\n# Rows\nprint(\"Number of rows and columns in the data", + "tool_call": "1/q2GJtbK2inflation.csv\")\n# Rows\nprint(\"Number of rows", "type": "tool_call" }, "event_type": { @@ -43093,7 +43071,7 @@ "__module__": "llama_stack.apis.common.content_types", "value": "in_progress" }, - "tool_call": ":\", df.shape)\n# Columns\nprint(\"Columns of the data are:\", len(df.columns))\n#", + "tool_call": " and columns in the data:\", df.shape)\n# Columns\nprint(\"Columns of the data are:\",", "type": "tool_call" }, "event_type": { @@ -43118,7 +43096,7 @@ "__module__": "llama_stack.apis.common.content_types", "value": "in_progress" }, - "tool_call": " Column names\nprint(\"Columns of the data are:\", df.columns)\n# Column dtypes\nprint", + "tool_call": " len(df.columns))\n# Column names\nprint(\"Columns of the data are:\", df.columns)\n#", "type": "tool_call" }, "event_type": { @@ -43143,7 +43121,7 @@ "__module__": "llama_stack.apis.common.content_types", "value": "in_progress" }, - "tool_call": "(\"Datatype of the columns are:\", df.dtypes)\n# Sample", + "tool_call": " Column dtypes\nprint(\"Datatype of the columns are:\", df.dtypes)\n#", "type": "tool_call" }, "event_type": { @@ -43168,7 +43146,7 @@ "__module__": "llama_stack.apis.common.content_types", "value": "in_progress" }, - "tool_call": " of data\nprint(\"Data sample from file:\")\nprint(df.head())", + "tool_call": " Sample of data\nprint(\"Data sample from file:\")\nprint(df.head())", "type": "tool_call" }, "event_type": { @@ -43195,9 +43173,9 @@ }, "tool_call": { "arguments": { - "code": "import pandas as pd\n# Load data\ndf = pd.read_csv(\"/var/folders/rb/qv8vwgyj6yjd3t4pwsy9t0rm0000gn/T/tmp2x_sml66/ZEjbinQHinflation.csv\")\n# Rows\nprint(\"Number of rows and columns in the data:\", df.shape)\n# Columns\nprint(\"Columns of the data are:\", len(df.columns))\n# Column names\nprint(\"Columns of the data are:\", df.columns)\n# Column dtypes\nprint(\"Datatype of the columns are:\", df.dtypes)\n# Sample of data\nprint(\"Data sample from file:\")\nprint(df.head())" + "code": "import pandas as pd\n# Load data\ndf = pd.read_csv(\"/var/folders/rb/qvqvwgyj6yjd3t4pwsy9t0rm0000gn/T/tmpcf_0q3u1/q2GJtbK2inflation.csv\")\n# Rows\nprint(\"Number of rows and columns in the data:\", df.shape)\n# Columns\nprint(\"Columns of the data are:\", len(df.columns))\n# Column names\nprint(\"Columns of the data are:\", df.columns)\n# Column dtypes\nprint(\"Datatype of the columns are:\", df.dtypes)\n# Sample of data\nprint(\"Data sample from file:\")\nprint(df.head())" }, - "call_id": "c1708ded-f272-4008-b91f-19d61780c394", + "call_id": "e57e8c8f-b440-46c6-aa6e-3d0e743e2fd9", "tool_name": { "__enum__": "BuiltinTool", "__module__": "llama_stack.models.llama.datatypes", @@ -43244,54 +43222,18 @@ }, "metrics": [ { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "prompt_tokens", - "span_id": "KTMayjIE", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T01:45:37.305765+00:00", - "__module__": "datetime" - }, - "trace_id": "StUjhrTMQKKQSRvS", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 37 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "completion_tokens", - "span_id": "KTMayjIE", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T01:45:37.305820+00:00", - "__module__": "datetime" - }, - "trace_id": "StUjhrTMQKKQSRvS", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 10 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "total_tokens", - "span_id": "KTMayjIE", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T01:45:37.305832+00:00", - "__module__": "datetime" - }, - "trace_id": "StUjhrTMQKKQSRvS", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 47 } ] @@ -44261,6 +44203,1999 @@ ], "type": "generator" }, + "[[\"meta-llama/Llama-3.3-70B-Instruct\", [{\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"SystemMessage\", \"data\": {\"content\": \"You are a helpful assistant\", \"role\": \"system\"}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"UserMessage\", \"data\": {\"content\": \"Here is a csv, can you describe it?\", \"context\": null, \"role\": \"user\"}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"CompletionMessage\", \"data\": {\"content\": \"\", \"role\": \"assistant\", \"stop_reason\": {\"__enum__\": \"StopReason\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"end_of_turn\"}, \"tool_calls\": [{\"arguments\": {\"code\": \"import pandas as pd\\n\\n# Load the CSV file\\ndf = pd.read_csv(\\\"\")\\n\\n# Print the first few rows of the dataframe\\nprint(df.head())\\n\\n# Print information about the dataframe\\nprint(df.info())\\n\\n# Print summary statistics of the dataframe\\nprint(df.describe())\"}, \"call_id\": \"\", \"tool_name\": {\"__enum__\": \"BuiltinTool\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"code_interpreter\"}}]}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"ToolResponseMessage\", \"data\": {\"call_id\": \"\", \"content\": \"error\\n[stdout]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stdout]\\n[stderr]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stderr]\", \"role\": \"tool\", \"tool_name\": {\"__enum__\": \"BuiltinTool\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"code_interpreter\"}}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"CompletionMessage\", \"data\": {\"content\": \"The error message indicates that the file \\\"\" does not exist. This could be due to a number of reasons such as the file being deleted, the path being incorrect, or the file not being accessible.\\n\\nTo resolve this issue, you can try the following:\\n\\n1. Check the file path: Make sure the file path is correct and the file exists at that location.\\n2. Check file permissions: Ensure that the file is accessible and you have the necessary permissions to read it.\\n3. Try a different file: If the file is not available, try using a different file or creating a sample file to test the code.\\n\\nIf you are still having issues, please provide more details about the file and the environment you are running the code in, and I'll be happy to help further.\", \"role\": \"assistant\", \"stop_reason\": {\"__enum__\": \"StopReason\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"end_of_turn\"}, \"tool_calls\": []}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"UserMessage\", \"data\": {\"content\": \"Plot average yearly inflation as a time series\", \"context\": null, \"role\": \"user\"}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"CompletionMessage\", \"data\": {\"content\": \"\", \"role\": \"assistant\", \"stop_reason\": {\"__enum__\": \"StopReason\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"end_of_turn\"}, \"tool_calls\": [{\"arguments\": {\"code\": \"import pandas as pd\\nimport matplotlib.pyplot as plt\\n\\n# Load the CSV file\\ndf = pd.read_csv(\\\"\")\\n\\n# Convert the 'Year' column to datetime\\ndf['Year'] = pd.to_datetime(df['Year'], format='%Y')\\n\\n# Group by 'Year' and calculate the average inflation\\ndf_avg_inflation = df.groupby('Year')['Inflation'].mean().reset_index()\\n\\n# Plot the average yearly inflation as a time series\\nplt.figure(figsize=(10,6))\\nplt.plot(df_avg_inflation['Year'], df_avg_inflation['Inflation'], marker='o')\\nplt.title('Average Yearly Inflation')\\nplt.xlabel('Year')\\nplt.ylabel('Inflation')\\nplt.grid(True)\\nplt.show()\"}, \"call_id\": \"\", \"tool_name\": {\"__enum__\": \"BuiltinTool\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"code_interpreter\"}}]}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"ToolResponseMessage\", \"data\": {\"call_id\": \"\", \"content\": \"error\\n[stdout]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stdout]\\n[stderr]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stderr]\", \"role\": \"tool\", \"tool_name\": {\"__enum__\": \"BuiltinTool\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"code_interpreter\"}}}]], {\"response_format\": null, \"sampling_params\": {\"__module__\": \"llama_stack.models.llama.datatypes\", \"__pydantic__\": \"SamplingParams\", \"data\": {\"max_tokens\": 0, \"repetition_penalty\": 1.0, \"strategy\": {\"temperature\": 0.0001, \"top_p\": 0.9, \"type\": \"top_p\"}}}, \"stream\": true, \"tool_config\": {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"ToolConfig\", \"data\": {\"system_message_behavior\": {\"__enum__\": \"SystemMessageBehavior\", \"__module__\": \"llama_stack.apis.inference.inference\", \"value\": \"append\"}, \"tool_choice\": {\"__enum__\": \"ToolChoice\", \"__module__\": \"llama_stack.apis.inference.inference\", \"value\": \"auto\"}, \"tool_prompt_format\": null}}, \"tool_prompt_format\": null, \"tools\": [{\"__module__\": \"llama_stack.models.llama.datatypes\", \"__pydantic__\": \"ToolDefinition\", \"data\": {\"description\": \"Execute code\", \"parameters\": {\"code\": {\"default\": null, \"description\": \"The code to execute\", \"param_type\": \"string\", \"required\": true}}, \"tool_name\": {\"__enum__\": \"BuiltinTool\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"code_interpreter\"}}}]}]": { + "chunks": [ + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "start" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "The", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " error message indicates that the file \"/var/folders/rb/qv8", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "vwgyj6yjd3t4pwsy9t0", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "rm0000gn/T/tmpk42nx919/quNW0bvlin", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "flation.csv\" does not exist. This could be due to a number of", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " reasons such as the file being deleted, the path being incorrect, or the", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " file not being accessible.\n\nTo resolve this issue,", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " you can try the following:\n\n1. Check the file path: Make sure", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " the file path is correct and the file exists at that location.\n2.", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " Check file permissions: Ensure that the file is accessible and you have the necessary", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " permissions to read it.\n3. Try a different file: If the file", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " is not available, try using a different file or creating a sample file to", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " test the code.\n\nIf you are still having issues, please provide more details", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " about the file and the environment you are running the code in, and I", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "'ll be happy to help further.", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "complete" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "__module__": "llama_stack.models.llama.datatypes", + "value": "end_of_turn" + } + }, + "metrics": [ + { + "metric": "prompt_tokens", + "unit": null, + "value": 644 + }, + { + "metric": "completion_tokens", + "unit": null, + "value": 208 + }, + { + "metric": "total_tokens", + "unit": null, + "value": 852 + } + ] + } + } + ], + "type": "generator" + }, + "[[\"meta-llama/Llama-3.3-70B-Instruct\", [{\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"SystemMessage\", \"data\": {\"content\": \"You are a helpful assistant\", \"role\": \"system\"}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"UserMessage\", \"data\": {\"content\": \"Here is a csv, can you describe it?\", \"context\": null, \"role\": \"user\"}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"CompletionMessage\", \"data\": {\"content\": \"\", \"role\": \"assistant\", \"stop_reason\": {\"__enum__\": \"StopReason\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"end_of_turn\"}, \"tool_calls\": [{\"arguments\": {\"code\": \"import pandas as pd\\n\\n# Load the CSV file\\ndf = pd.read_csv(\\\"\")\\n\\n# Print the first few rows of the dataframe\\nprint(df.head())\\n\\n# Print information about the dataframe\\nprint(df.info())\\n\\n# Print summary statistics of the dataframe\\nprint(df.describe())\"}, \"call_id\": \"\", \"tool_name\": {\"__enum__\": \"BuiltinTool\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"code_interpreter\"}}]}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"ToolResponseMessage\", \"data\": {\"call_id\": \"\", \"content\": \"error\\n[stdout]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stdout]\\n[stderr]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stderr]\", \"role\": \"tool\", \"tool_name\": {\"__enum__\": \"BuiltinTool\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"code_interpreter\"}}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"CompletionMessage\", \"data\": {\"content\": \"The error message indicates that the file \\\"\" does not exist. This could be due to a number of reasons such as the file being deleted, the path being incorrect, or the file not being accessible.\\n\\nTo resolve this issue, you can try the following:\\n\\n1. Check the file path: Make sure the file path is correct and the file exists at that location.\\n2. Check file permissions: Ensure that the file is accessible and you have the necessary permissions to read it.\\n3. Try a different file: If the file is not available, try using a different file or creating a sample file to test the code.\\n\\nIf you are still having issues, please provide more details about the file and the environment you are running the code in, and I'll be happy to help further.\", \"role\": \"assistant\", \"stop_reason\": {\"__enum__\": \"StopReason\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"end_of_turn\"}, \"tool_calls\": []}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"UserMessage\", \"data\": {\"content\": \"Plot average yearly inflation as a time series\", \"context\": null, \"role\": \"user\"}}]], {\"response_format\": null, \"sampling_params\": {\"__module__\": \"llama_stack.models.llama.datatypes\", \"__pydantic__\": \"SamplingParams\", \"data\": {\"max_tokens\": 0, \"repetition_penalty\": 1.0, \"strategy\": {\"temperature\": 0.0001, \"top_p\": 0.9, \"type\": \"top_p\"}}}, \"stream\": true, \"tool_config\": {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"ToolConfig\", \"data\": {\"system_message_behavior\": {\"__enum__\": \"SystemMessageBehavior\", \"__module__\": \"llama_stack.apis.inference.inference\", \"value\": \"append\"}, \"tool_choice\": {\"__enum__\": \"ToolChoice\", \"__module__\": \"llama_stack.apis.inference.inference\", \"value\": \"auto\"}, \"tool_prompt_format\": null}}, \"tool_prompt_format\": null, \"tools\": [{\"__module__\": \"llama_stack.models.llama.datatypes\", \"__pydantic__\": \"ToolDefinition\", \"data\": {\"description\": \"Execute code\", \"parameters\": {\"code\": {\"default\": null, \"description\": \"The code to execute\", \"param_type\": \"string\", \"required\": true}}, \"tool_name\": {\"__enum__\": \"BuiltinTool\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"code_interpreter\"}}}]}]": { + "chunks": [ + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "start" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "__module__": "llama_stack.apis.common.content_types", + "value": "started" + }, + "tool_call": "", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "__module__": "llama_stack.apis.common.content_types", + "value": "in_progress" + }, + "tool_call": "import pandas as pd\nimport matplotlib.pyplot as plt\n\n# Load the CSV", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "__module__": "llama_stack.apis.common.content_types", + "value": "in_progress" + }, + "tool_call": " file\ndf = pd.read_csv(\"/var/folders/rb/qv", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "__module__": "llama_stack.apis.common.content_types", + "value": "in_progress" + }, + "tool_call": "8vwgyj6yjd3t4pwsy9t", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "__module__": "llama_stack.apis.common.content_types", + "value": "in_progress" + }, + "tool_call": "0rm0000gn/T/tmpk42nx919/quNW0bv", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "__module__": "llama_stack.apis.common.content_types", + "value": "in_progress" + }, + "tool_call": "linflation.csv\")\n\n# Convert the 'Year' column to datetime\ndf", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "__module__": "llama_stack.apis.common.content_types", + "value": "in_progress" + }, + "tool_call": "['Year'] = pd.to_datetime(df['Year'], format", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "__module__": "llama_stack.apis.common.content_types", + "value": "in_progress" + }, + "tool_call": "='%Y')\n\n# Group by 'Year' and calculate", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "__module__": "llama_stack.apis.common.content_types", + "value": "in_progress" + }, + "tool_call": " the average inflation\ndf_avg_inflation = df.groupby('", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "__module__": "llama_stack.apis.common.content_types", + "value": "in_progress" + }, + "tool_call": "Year')['Inflation'].mean().reset_index()\n\n# Plot the average yearly", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "__module__": "llama_stack.apis.common.content_types", + "value": "in_progress" + }, + "tool_call": " inflation as a time series\nplt.figure(figsize=(10", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "__module__": "llama_stack.apis.common.content_types", + "value": "in_progress" + }, + "tool_call": ",6))\nplt.plot(df_avg_inflation['Year'], df_avg_in", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "__module__": "llama_stack.apis.common.content_types", + "value": "in_progress" + }, + "tool_call": "flation['Inflation'], marker='o')\nplt.title('Average Yearly", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "__module__": "llama_stack.apis.common.content_types", + "value": "in_progress" + }, + "tool_call": " Inflation')\nplt.xlabel('Year')\nplt.ylabel('Inflation')\nplt", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "__module__": "llama_stack.apis.common.content_types", + "value": "in_progress" + }, + "tool_call": ".grid(True)\nplt.show()", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "__module__": "llama_stack.apis.common.content_types", + "value": "succeeded" + }, + "tool_call": { + "arguments": { + "code": "import pandas as pd\nimport matplotlib.pyplot as plt\n\n# Load the CSV file\ndf = pd.read_csv(\"/var/folders/rb/qv8vwgyj6yjd3t4pwsy9t0rm0000gn/T/tmpk42nx919/quNW0bvlinflation.csv\")\n\n# Convert the 'Year' column to datetime\ndf['Year'] = pd.to_datetime(df['Year'], format='%Y')\n\n# Group by 'Year' and calculate the average inflation\ndf_avg_inflation = df.groupby('Year')['Inflation'].mean().reset_index()\n\n# Plot the average yearly inflation as a time series\nplt.figure(figsize=(10,6))\nplt.plot(df_avg_inflation['Year'], df_avg_inflation['Inflation'], marker='o')\nplt.title('Average Yearly Inflation')\nplt.xlabel('Year')\nplt.ylabel('Inflation')\nplt.grid(True)\nplt.show()" + }, + "call_id": "dcc87d01-280c-4aef-9d8a-4295ddaff6c6", + "tool_name": { + "__enum__": "BuiltinTool", + "__module__": "llama_stack.models.llama.datatypes", + "value": "code_interpreter" + } + }, + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "__module__": "llama_stack.models.llama.datatypes", + "value": "end_of_turn" + } + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "complete" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "__module__": "llama_stack.models.llama.datatypes", + "value": "end_of_turn" + } + }, + "metrics": [ + { + "metric": "prompt_tokens", + "unit": null, + "value": 399 + }, + { + "metric": "completion_tokens", + "unit": null, + "value": 10 + }, + { + "metric": "total_tokens", + "unit": null, + "value": 409 + } + ] + } + } + ], + "type": "generator" + }, + "[[\"meta-llama/Llama-3.3-70B-Instruct\", [{\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"SystemMessage\", \"data\": {\"content\": \"You are a helpful assistant\", \"role\": \"system\"}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"UserMessage\", \"data\": {\"content\": \"Here is a csv, can you describe it?\", \"context\": null, \"role\": \"user\"}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"CompletionMessage\", \"data\": {\"content\": \"\", \"role\": \"assistant\", \"stop_reason\": {\"__enum__\": \"StopReason\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"end_of_turn\"}, \"tool_calls\": [{\"arguments\": {\"code\": \"import pandas as pd\\n\\n# Load the CSV file\\ndf = pd.read_csv(\\\"\")\\n\\n# Print the first few rows of the dataframe\\nprint(df.head())\\n\\n# Print information about the dataframe\\nprint(df.info())\\n\\n# Print summary statistics of the dataframe\\nprint(df.describe())\"}, \"call_id\": \"\", \"tool_name\": {\"__enum__\": \"BuiltinTool\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"code_interpreter\"}}]}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"ToolResponseMessage\", \"data\": {\"call_id\": \"\", \"content\": \"error\\n[stdout]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stdout]\\n[stderr]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stderr]\", \"role\": \"tool\", \"tool_name\": {\"__enum__\": \"BuiltinTool\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"code_interpreter\"}}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"CompletionMessage\", \"data\": {\"content\": \"The error message indicates that the file \\\"\" does not exist. This could be due to a number of reasons, such as the file being deleted or moved, or the path being incorrect.\\n\\nTo resolve this issue, you can try the following:\\n\\n1. Check the file path: Make sure the file path is correct and the file exists at that location.\\n2. Check file permissions: Ensure that the user running the script has read permissions for the file.\\n3. Use a try-except block: You can use a try-except block to catch the FileNotFoundError and handle it accordingly.\\n\\nHere's an example of how you can modify the code to handle the error:\\n\\n```\\nimport pandas as pd\\n\\ntry:\\n df = pd.read_csv(\\\"\")\\n print(df.head())\\n print(df.info())\\n print(df.describe())\\nexcept FileNotFoundError:\\n print(\\\"The file does not exist\\\")\\n```\\n\\nThis code will print \\\"The file does not exist\\\" if the file is not found, instead of raising an error.\", \"role\": \"assistant\", \"stop_reason\": {\"__enum__\": \"StopReason\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"end_of_turn\"}, \"tool_calls\": []}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"UserMessage\", \"data\": {\"content\": \"Plot average yearly inflation as a time series\", \"context\": null, \"role\": \"user\"}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"CompletionMessage\", \"data\": {\"content\": \"\", \"role\": \"assistant\", \"stop_reason\": {\"__enum__\": \"StopReason\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"end_of_turn\"}, \"tool_calls\": [{\"arguments\": {\"code\": \"import pandas as pd\\nimport matplotlib.pyplot as plt\\n\\n# Load the CSV file\\ndf = pd.read_csv(\\\"\")\\n\\n# Convert the 'Year' column to datetime\\ndf['Year'] = pd.to_datetime(df['Year'], format='%Y')\\n\\n# Group by 'Year' and calculate the average inflation\\ndf_avg_inflation = df.groupby('Year')['Inflation'].mean().reset_index()\\n\\n# Plot the average yearly inflation as a time series\\nplt.figure(figsize=(10,6))\\nplt.plot(df_avg_inflation['Year'], df_avg_inflation['Inflation'], marker='o')\\nplt.title('Average Yearly Inflation')\\nplt.xlabel('Year')\\nplt.ylabel('Inflation')\\nplt.grid(True)\\nplt.show()\"}, \"call_id\": \"\", \"tool_name\": {\"__enum__\": \"BuiltinTool\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"code_interpreter\"}}]}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"ToolResponseMessage\", \"data\": {\"call_id\": \"\", \"content\": \"error\\n[stdout]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stdout]\\n[stderr]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stderr]\", \"role\": \"tool\", \"tool_name\": {\"__enum__\": \"BuiltinTool\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"code_interpreter\"}}}]], {\"response_format\": null, \"sampling_params\": {\"__module__\": \"llama_stack.models.llama.datatypes\", \"__pydantic__\": \"SamplingParams\", \"data\": {\"max_tokens\": 0, \"repetition_penalty\": 1.0, \"strategy\": {\"temperature\": 0.0001, \"top_p\": 0.9, \"type\": \"top_p\"}}}, \"stream\": true, \"tool_config\": {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"ToolConfig\", \"data\": {\"system_message_behavior\": {\"__enum__\": \"SystemMessageBehavior\", \"__module__\": \"llama_stack.apis.inference.inference\", \"value\": \"append\"}, \"tool_choice\": {\"__enum__\": \"ToolChoice\", \"__module__\": \"llama_stack.apis.inference.inference\", \"value\": \"auto\"}, \"tool_prompt_format\": null}}, \"tool_prompt_format\": null, \"tools\": [{\"__module__\": \"llama_stack.models.llama.datatypes\", \"__pydantic__\": \"ToolDefinition\", \"data\": {\"description\": \"Execute code\", \"parameters\": {\"code\": {\"default\": null, \"description\": \"The code to execute\", \"param_type\": \"string\", \"required\": true}}, \"tool_name\": {\"__enum__\": \"BuiltinTool\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"code_interpreter\"}}}]}]": { + "chunks": [ + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "start" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "The", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " error message indicates that the file \"/var/folders/rb/qv8", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "vwgyj6yjd3t4pwsy9t0", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "rm0000gn/T/tmpaxhbg_k1/YkD5", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "cgQZinflation.csv\" does not exist. This could be due", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " to a number of reasons, such as the file", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " being deleted or moved, or the path being incorrect.\n\nTo resolve this issue", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": ", you can try the following:\n\n1. Check the file path: Make", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " sure the file path is correct and the file exists at that location.\n2", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": ". Check file permissions: Ensure that the user running the script has read permissions", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " for the file.\n3", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": ". Use a try-except block: You can", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " use a try-except block to catch the FileNotFoundError", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " and handle it accordingly.\n\nHere's an example of how you can modify the", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " code to handle the error:\n\n```\nimport pandas as pd\nimport matplotlib", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": ".pyplot as plt\n\ntry:\n df = pd.read_csv(\"/var", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "/folders/rb/qv8vwgyj6yjd3t", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "4pwsy9t0rm0000", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "gn/T/tmpaxhbg_k1/YkD5cgQZ", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "inflation.csv\")\n df['Year'] = pd.to_datetime(df['", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "Year'], format='%Y')\n df_avg_inflation = df.groupby('", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "Year')['Inflation'].mean().reset_index()\n plt.figure(figsize=(", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "10,6))\n plt.plot(df_avg_inflation['Year'], df", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "_avg_inflation['Inflation'], marker='o')\n plt.title('", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "Average Yearly Inflation')\n plt.xlabel('Year')\n plt.ylabel", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "('Inflation')\n plt.grid(True)\n plt.show()\nexcept FileNotFoundError", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": ":\n print(\"The file does not exist\")\n```\n\n", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "This code will print \"The file does not exist\" if the file is", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " not found, instead of raising an error.", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "complete" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "__module__": "llama_stack.models.llama.datatypes", + "value": "end_of_turn" + } + }, + "metrics": [ + { + "metric": "prompt_tokens", + "unit": null, + "value": 745 + }, + { + "metric": "completion_tokens", + "unit": null, + "value": 391 + }, + { + "metric": "total_tokens", + "unit": null, + "value": 1136 + } + ] + } + } + ], + "type": "generator" + }, + "[[\"meta-llama/Llama-3.3-70B-Instruct\", [{\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"SystemMessage\", \"data\": {\"content\": \"You are a helpful assistant\", \"role\": \"system\"}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"UserMessage\", \"data\": {\"content\": \"Here is a csv, can you describe it?\", \"context\": null, \"role\": \"user\"}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"CompletionMessage\", \"data\": {\"content\": \"\", \"role\": \"assistant\", \"stop_reason\": {\"__enum__\": \"StopReason\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"end_of_turn\"}, \"tool_calls\": [{\"arguments\": {\"code\": \"import pandas as pd\\n\\n# Load the CSV file\\ndf = pd.read_csv(\\\"\")\\n\\n# Print the first few rows of the dataframe\\nprint(df.head())\\n\\n# Print information about the dataframe\\nprint(df.info())\\n\\n# Print summary statistics of the dataframe\\nprint(df.describe())\"}, \"call_id\": \"\", \"tool_name\": {\"__enum__\": \"BuiltinTool\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"code_interpreter\"}}]}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"ToolResponseMessage\", \"data\": {\"call_id\": \"\", \"content\": \"error\\n[stdout]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stdout]\\n[stderr]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stderr]\", \"role\": \"tool\", \"tool_name\": {\"__enum__\": \"BuiltinTool\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"code_interpreter\"}}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"CompletionMessage\", \"data\": {\"content\": \"The error message indicates that the file \\\"\" does not exist. This could be due to a number of reasons, such as the file being deleted or moved, or the path being incorrect.\\n\\nTo resolve this issue, you can try the following:\\n\\n1. Check the file path: Make sure the file path is correct and the file exists at that location.\\n2. Check file permissions: Ensure that the user running the script has read permissions for the file.\\n3. Use a try-except block: You can use a try-except block to catch the FileNotFoundError and handle it accordingly.\\n\\nHere's an example of how you can modify the code to handle the error:\\n\\n```\\nimport pandas as pd\\n\\ntry:\\n df = pd.read_csv(\\\"\")\\n print(df.head())\\n print(df.info())\\n print(df.describe())\\nexcept FileNotFoundError:\\n print(\\\"The file does not exist\\\")\\n```\\n\\nThis code will print \\\"The file does not exist\\\" if the file is not found, instead of raising an error.\", \"role\": \"assistant\", \"stop_reason\": {\"__enum__\": \"StopReason\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"end_of_turn\"}, \"tool_calls\": []}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"UserMessage\", \"data\": {\"content\": \"Plot average yearly inflation as a time series\", \"context\": null, \"role\": \"user\"}}]], {\"response_format\": null, \"sampling_params\": {\"__module__\": \"llama_stack.models.llama.datatypes\", \"__pydantic__\": \"SamplingParams\", \"data\": {\"max_tokens\": 0, \"repetition_penalty\": 1.0, \"strategy\": {\"temperature\": 0.0001, \"top_p\": 0.9, \"type\": \"top_p\"}}}, \"stream\": true, \"tool_config\": {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"ToolConfig\", \"data\": {\"system_message_behavior\": {\"__enum__\": \"SystemMessageBehavior\", \"__module__\": \"llama_stack.apis.inference.inference\", \"value\": \"append\"}, \"tool_choice\": {\"__enum__\": \"ToolChoice\", \"__module__\": \"llama_stack.apis.inference.inference\", \"value\": \"auto\"}, \"tool_prompt_format\": null}}, \"tool_prompt_format\": null, \"tools\": [{\"__module__\": \"llama_stack.models.llama.datatypes\", \"__pydantic__\": \"ToolDefinition\", \"data\": {\"description\": \"Execute code\", \"parameters\": {\"code\": {\"default\": null, \"description\": \"The code to execute\", \"param_type\": \"string\", \"required\": true}}, \"tool_name\": {\"__enum__\": \"BuiltinTool\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"code_interpreter\"}}}]}]": { + "chunks": [ + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "start" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "__module__": "llama_stack.apis.common.content_types", + "value": "started" + }, + "tool_call": "", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "__module__": "llama_stack.apis.common.content_types", + "value": "in_progress" + }, + "tool_call": "import pandas as pd\nimport matplotlib.pyplot as plt\n\n# Load the CSV", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "__module__": "llama_stack.apis.common.content_types", + "value": "in_progress" + }, + "tool_call": " file\ndf = pd.read_csv(\"/var/folders/rb/qv", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "__module__": "llama_stack.apis.common.content_types", + "value": "in_progress" + }, + "tool_call": "8vwgyj6yjd3t4", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "__module__": "llama_stack.apis.common.content_types", + "value": "in_progress" + }, + "tool_call": "pwsy9t0rm0000gn", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "__module__": "llama_stack.apis.common.content_types", + "value": "in_progress" + }, + "tool_call": "/T/tmpaxhbg_k1/YkD5cgQZin", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "__module__": "llama_stack.apis.common.content_types", + "value": "in_progress" + }, + "tool_call": "flation.csv\")\n\n# Convert the 'Year' column", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "__module__": "llama_stack.apis.common.content_types", + "value": "in_progress" + }, + "tool_call": " to datetime\ndf['Year'] = pd.to_datetime(df['Year'],", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "__module__": "llama_stack.apis.common.content_types", + "value": "in_progress" + }, + "tool_call": " format='%Y')\n\n# Group by", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "__module__": "llama_stack.apis.common.content_types", + "value": "in_progress" + }, + "tool_call": " 'Year' and calculate the average inflation\ndf_avg_in", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "__module__": "llama_stack.apis.common.content_types", + "value": "in_progress" + }, + "tool_call": "flation = df.groupby('Year')['Inflation'].mean().reset_index()\n\n", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "__module__": "llama_stack.apis.common.content_types", + "value": "in_progress" + }, + "tool_call": "# Plot the average yearly inflation as a time series\n", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "__module__": "llama_stack.apis.common.content_types", + "value": "in_progress" + }, + "tool_call": "plt.figure(figsize=(10,6))\nplt.plot(df_avg_inflation['", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "__module__": "llama_stack.apis.common.content_types", + "value": "in_progress" + }, + "tool_call": "Year'], df_avg_inflation['Inflation'], marker='o')\nplt", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "__module__": "llama_stack.apis.common.content_types", + "value": "in_progress" + }, + "tool_call": ".title('Average Yearly Inflation')\nplt.xlabel('Year')\nplt.ylabel", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "__module__": "llama_stack.apis.common.content_types", + "value": "in_progress" + }, + "tool_call": "('Inflation')\nplt.grid(True)\nplt.show()", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "__module__": "llama_stack.apis.common.content_types", + "value": "succeeded" + }, + "tool_call": { + "arguments": { + "code": "import pandas as pd\nimport matplotlib.pyplot as plt\n\n# Load the CSV file\ndf = pd.read_csv(\"/var/folders/rb/qv8vwgyj6yjd3t4pwsy9t0rm0000gn/T/tmpaxhbg_k1/YkD5cgQZinflation.csv\")\n\n# Convert the 'Year' column to datetime\ndf['Year'] = pd.to_datetime(df['Year'], format='%Y')\n\n# Group by 'Year' and calculate the average inflation\ndf_avg_inflation = df.groupby('Year')['Inflation'].mean().reset_index()\n\n# Plot the average yearly inflation as a time series\nplt.figure(figsize=(10,6))\nplt.plot(df_avg_inflation['Year'], df_avg_inflation['Inflation'], marker='o')\nplt.title('Average Yearly Inflation')\nplt.xlabel('Year')\nplt.ylabel('Inflation')\nplt.grid(True)\nplt.show()" + }, + "call_id": "947831c7-8279-4f5b-8cfd-c30697382127", + "tool_name": { + "__enum__": "BuiltinTool", + "__module__": "llama_stack.models.llama.datatypes", + "value": "code_interpreter" + } + }, + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "__module__": "llama_stack.models.llama.datatypes", + "value": "end_of_turn" + } + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "complete" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "__module__": "llama_stack.models.llama.datatypes", + "value": "end_of_turn" + } + }, + "metrics": [ + { + "metric": "prompt_tokens", + "unit": null, + "value": 496 + }, + { + "metric": "completion_tokens", + "unit": null, + "value": 10 + }, + { + "metric": "total_tokens", + "unit": null, + "value": 506 + } + ] + } + } + ], + "type": "generator" + }, "[[\"meta-llama/Llama-3.3-70B-Instruct\", [{\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"SystemMessage\", \"data\": {\"content\": \"You are a helpful assistant\", \"role\": \"system\"}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"UserMessage\", \"data\": {\"content\": \"Here is a csv, can you describe it?\", \"context\": null, \"role\": \"user\"}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"ToolResponseMessage\", \"data\": {\"call_id\": \"\", \"content\": [{\"text\": \"# User provided a file accessible to you at \\\"\"\\nYou can use code_interpreter to load and inspect it.\", \"type\": \"text\"}], \"role\": \"tool\", \"tool_name\": {\"__enum__\": \"BuiltinTool\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"code_interpreter\"}}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"CompletionMessage\", \"data\": {\"content\": \"\", \"role\": \"assistant\", \"stop_reason\": {\"__enum__\": \"StopReason\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"end_of_turn\"}, \"tool_calls\": [{\"arguments\": {\"code\": \"import pandas as pd\\n\\n# Load the CSV file\\ndf = pd.read_csv(\\\"\")\\n\\n# Print the first few rows of the dataframe\\nprint(df.head())\\n\\n# Print information about the dataframe\\nprint(df.info())\\n\\n# Print summary statistics about the dataframe\\nprint(df.describe())\"}, \"call_id\": \"\", \"tool_name\": {\"__enum__\": \"BuiltinTool\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"code_interpreter\"}}]}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"ToolResponseMessage\", \"data\": {\"call_id\": \"\", \"content\": \"error\\n[stdout]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stdout]\\n[stderr]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stderr]\", \"role\": \"tool\", \"tool_name\": {\"__enum__\": \"BuiltinTool\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"code_interpreter\"}}}]], {\"response_format\": null, \"sampling_params\": {\"__module__\": \"llama_stack.models.llama.datatypes\", \"__pydantic__\": \"SamplingParams\", \"data\": {\"max_tokens\": 0, \"repetition_penalty\": 1.0, \"strategy\": {\"temperature\": 0.0001, \"top_p\": 0.9, \"type\": \"top_p\"}}}, \"stream\": true, \"tool_config\": {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"ToolConfig\", \"data\": {\"system_message_behavior\": {\"__enum__\": \"SystemMessageBehavior\", \"__module__\": \"llama_stack.apis.inference.inference\", \"value\": \"append\"}, \"tool_choice\": {\"__enum__\": \"ToolChoice\", \"__module__\": \"llama_stack.apis.inference.inference\", \"value\": \"auto\"}, \"tool_prompt_format\": null}}, \"tool_prompt_format\": null, \"tools\": [{\"__module__\": \"llama_stack.models.llama.datatypes\", \"__pydantic__\": \"ToolDefinition\", \"data\": {\"description\": \"Execute code\", \"parameters\": {\"code\": {\"default\": null, \"description\": \"The code to execute\", \"param_type\": \"string\", \"required\": true}}, \"tool_name\": {\"__enum__\": \"BuiltinTool\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"code_interpreter\"}}}]}]": { "chunks": [ { @@ -44822,6 +46757,551 @@ ], "type": "generator" }, + "[[\"meta-llama/Llama-3.3-70B-Instruct\", [{\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"SystemMessage\", \"data\": {\"content\": \"You are a helpful assistant\", \"role\": \"system\"}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"UserMessage\", \"data\": {\"content\": \"Here is a csv, can you describe it?\", \"context\": null, \"role\": \"user\"}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"ToolResponseMessage\", \"data\": {\"call_id\": \"\", \"content\": [{\"text\": \"# User provided a file accessible to you at \\\"\"\\nYou can use code_interpreter to load and inspect it.\", \"type\": \"text\"}], \"role\": \"tool\", \"tool_name\": {\"__enum__\": \"BuiltinTool\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"code_interpreter\"}}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"CompletionMessage\", \"data\": {\"content\": \"\", \"role\": \"assistant\", \"stop_reason\": {\"__enum__\": \"StopReason\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"end_of_turn\"}, \"tool_calls\": [{\"arguments\": {\"code\": \"import pandas as pd\\n\\n# Load the CSV file\\ndf = pd.read_csv(\\\"\")\\n\\n# Print the first few rows of the dataframe\\nprint(df.head())\\n\\n# Print information about the dataframe\\nprint(df.info())\\n\\n# Print summary statistics of the dataframe\\nprint(df.describe())\"}, \"call_id\": \"\", \"tool_name\": {\"__enum__\": \"BuiltinTool\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"code_interpreter\"}}]}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"ToolResponseMessage\", \"data\": {\"call_id\": \"\", \"content\": \"error\\n[stdout]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stdout]\\n[stderr]\\n[Errno 2] No such file or directory: 'bwrap'\\n[/stderr]\", \"role\": \"tool\", \"tool_name\": {\"__enum__\": \"BuiltinTool\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"code_interpreter\"}}}]], {\"response_format\": null, \"sampling_params\": {\"__module__\": \"llama_stack.models.llama.datatypes\", \"__pydantic__\": \"SamplingParams\", \"data\": {\"max_tokens\": 0, \"repetition_penalty\": 1.0, \"strategy\": {\"temperature\": 0.0001, \"top_p\": 0.9, \"type\": \"top_p\"}}}, \"stream\": true, \"tool_config\": {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"ToolConfig\", \"data\": {\"system_message_behavior\": {\"__enum__\": \"SystemMessageBehavior\", \"__module__\": \"llama_stack.apis.inference.inference\", \"value\": \"append\"}, \"tool_choice\": {\"__enum__\": \"ToolChoice\", \"__module__\": \"llama_stack.apis.inference.inference\", \"value\": \"auto\"}, \"tool_prompt_format\": null}}, \"tool_prompt_format\": null, \"tools\": [{\"__module__\": \"llama_stack.models.llama.datatypes\", \"__pydantic__\": \"ToolDefinition\", \"data\": {\"description\": \"Execute code\", \"parameters\": {\"code\": {\"default\": null, \"description\": \"The code to execute\", \"param_type\": \"string\", \"required\": true}}, \"tool_name\": {\"__enum__\": \"BuiltinTool\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"code_interpreter\"}}}]}]": { + "chunks": [ + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "start" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "The", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " error message indicates that the file \"/var/folders/rb/qv8", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "vwgyj6yjd3t4pwsy9t0", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "rm0000gn/T/tmpaxhbg_k1/YkD5", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "cgQZinflation.csv\" does not exist. This could be", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " due to a number of reasons, such as the file being deleted or moved", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": ", or the path being incorrect.\n\nTo resolve this", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " issue, you can try the following:\n\n1. Check the file path:", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " Make sure the file path is correct and the file exists at that", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " location.\n2. Check file permissions", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": ": Ensure that the user running the script has read permissions", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " for the file.\n3. Use a", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " try-except block: You can use a try-except", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " block to catch the FileNotFoundError and handle it accordingly.\n\nHere's an", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " example of how you can modify the code to handle the error", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": ":\n\n```\nimport pandas as pd\n\ntry:\n", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " df = pd.read_csv(\"/var/folders/rb/q", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "v8vwgyj6yjd3t4pwsy9", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "t0rm0000gn/T/tmpaxhbg_k1/Yk", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "D5cgQZinflation.csv\")\n print(df.head())\n print(df.info())\n ", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " print(df.describe())\nexcept FileNotFoundError:\n print(\"The file does not exist", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "\")\n```\n\nThis code will print \"", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "The file does not exist\" if the file is not", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " found, instead of raising an error.", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "complete" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "__module__": "llama_stack.models.llama.datatypes", + "value": "end_of_turn" + } + }, + "metrics": [ + { + "metric": "prompt_tokens", + "unit": null, + "value": 193 + }, + { + "metric": "completion_tokens", + "unit": null, + "value": 301 + }, + { + "metric": "total_tokens", + "unit": null, + "value": 494 + } + ] + } + } + ], + "type": "generator" + }, "[[\"meta-llama/Llama-3.3-70B-Instruct\", [{\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"SystemMessage\", \"data\": {\"content\": \"You are a helpful assistant\", \"role\": \"system\"}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"UserMessage\", \"data\": {\"content\": \"Here is a csv, can you describe it?\", \"context\": null, \"role\": \"user\"}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"ToolResponseMessage\", \"data\": {\"call_id\": \"\", \"content\": [{\"text\": \"# User provided a file accessible to you at \\\"\"\\nYou can use code_interpreter to load and inspect it.\", \"type\": \"text\"}], \"role\": \"tool\", \"tool_name\": {\"__enum__\": \"BuiltinTool\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"code_interpreter\"}}}]], {\"response_format\": null, \"sampling_params\": {\"__module__\": \"llama_stack.models.llama.datatypes\", \"__pydantic__\": \"SamplingParams\", \"data\": {\"max_tokens\": 0, \"repetition_penalty\": 1.0, \"strategy\": {\"temperature\": 0.0001, \"top_p\": 0.9, \"type\": \"top_p\"}}}, \"stream\": true, \"tool_config\": {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"ToolConfig\", \"data\": {\"system_message_behavior\": {\"__enum__\": \"SystemMessageBehavior\", \"__module__\": \"llama_stack.apis.inference.inference\", \"value\": \"append\"}, \"tool_choice\": {\"__enum__\": \"ToolChoice\", \"__module__\": \"llama_stack.apis.inference.inference\", \"value\": \"auto\"}, \"tool_prompt_format\": null}}, \"tool_prompt_format\": null, \"tools\": [{\"__module__\": \"llama_stack.models.llama.datatypes\", \"__pydantic__\": \"ToolDefinition\", \"data\": {\"description\": \"Execute code\", \"parameters\": {\"code\": {\"default\": null, \"description\": \"The code to execute\", \"param_type\": \"string\", \"required\": true}}, \"tool_name\": {\"__enum__\": \"BuiltinTool\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"code_interpreter\"}}}]}]": { "chunks": [ { @@ -44955,7 +47435,7 @@ "__module__": "llama_stack.apis.common.content_types", "value": "in_progress" }, - "tool_call": "/tmp2x_sml66/9vYvmVRoinflation.csv", + "tool_call": "/tmpaxhbg_k1/YkD5cgQZinflation", "type": "tool_call" }, "event_type": { @@ -44980,7 +47460,7 @@ "__module__": "llama_stack.apis.common.content_types", "value": "in_progress" }, - "tool_call": "\")\n\n# Print the first few rows of the dataframe\nprint(df.head())\n\n", + "tool_call": ".csv\")\n\n# Print the first few rows of the dataframe\nprint(df.head", "type": "tool_call" }, "event_type": { @@ -45005,7 +47485,7 @@ "__module__": "llama_stack.apis.common.content_types", "value": "in_progress" }, - "tool_call": "# Print information about the dataframe\nprint(df", + "tool_call": "())\n\n# Print information about", "type": "tool_call" }, "event_type": { @@ -45030,7 +47510,7 @@ "__module__": "llama_stack.apis.common.content_types", "value": "in_progress" }, - "tool_call": ".info())\n\n# Print summary statistics about the dataframe\nprint(df.describe", + "tool_call": " the dataframe\nprint(df", "type": "tool_call" }, "event_type": { @@ -45055,7 +47535,7 @@ "__module__": "llama_stack.apis.common.content_types", "value": "in_progress" }, - "tool_call": "())", + "tool_call": ".info())\n\n# Print summary statistics of the dataframe\nprint(df.describe())", "type": "tool_call" }, "event_type": { @@ -45082,9 +47562,9 @@ }, "tool_call": { "arguments": { - "code": "import pandas as pd\n\n# Load the CSV file\ndf = pd.read_csv(\"/var/folders/rb/qv8vwgyj6yjd3t4pwsy9t0rm0000gn/T/tmp2x_sml66/9vYvmVRoinflation.csv\")\n\n# Print the first few rows of the dataframe\nprint(df.head())\n\n# Print information about the dataframe\nprint(df.info())\n\n# Print summary statistics about the dataframe\nprint(df.describe())" + "code": "import pandas as pd\n\n# Load the CSV file\ndf = pd.read_csv(\"/var/folders/rb/qv8vwgyj6yjd3t4pwsy9t0rm0000gn/T/tmpaxhbg_k1/YkD5cgQZinflation.csv\")\n\n# Print the first few rows of the dataframe\nprint(df.head())\n\n# Print information about the dataframe\nprint(df.info())\n\n# Print summary statistics of the dataframe\nprint(df.describe())" }, - "call_id": "5bbfebeb-4360-4ef9-a9e2-4227a8e8c699", + "call_id": "ae5e788f-1422-4f12-b465-a29043709fbb", "tool_name": { "__enum__": "BuiltinTool", "__module__": "llama_stack.models.llama.datatypes", @@ -45131,54 +47611,18 @@ }, "metrics": [ { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "prompt_tokens", - "span_id": "AyEX3So6", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T01:44:17.873486+00:00", - "__module__": "datetime" - }, - "trace_id": "kNsljyzfQV2Cn4aZ", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 36 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "completion_tokens", - "span_id": "AyEX3So6", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T01:44:17.873500+00:00", - "__module__": "datetime" - }, - "trace_id": "kNsljyzfQV2Cn4aZ", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 10 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "total_tokens", - "span_id": "AyEX3So6", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T01:44:17.873503+00:00", - "__module__": "datetime" - }, - "trace_id": "kNsljyzfQV2Cn4aZ", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 46 } ] @@ -45187,6 +47631,936 @@ ], "type": "generator" }, + "[[\"meta-llama/Llama-3.3-70B-Instruct\", [{\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"SystemMessage\", \"data\": {\"content\": \"You are a helpful assistant\", \"role\": \"system\"}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"UserMessage\", \"data\": {\"content\": \"I am attaching some documentation for Torchtune. Help me answer questions I will ask next.\", \"context\": null, \"role\": \"user\"}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"CompletionMessage\", \"data\": {\"content\": \"\", \"role\": \"assistant\", \"stop_reason\": {\"__enum__\": \"StopReason\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"end_of_turn\"}, \"tool_calls\": [{\"arguments\": {\"query\": \"Torchtune documentation\"}, \"call_id\": \"\", \"tool_name\": \"knowledge_search\"}]}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"ToolResponseMessage\", \"data\": {\"call_id\": \"\", \"content\": [{\"text\": \"knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n\", \"type\": \"text\"}, {\"text\": \"Result 1:\\nDocument_id:3e31d\\nContent: conversational data, :func:`~torchtune.datasets.chat_dataset` seems to be a good fit. For any\\ncustom local dataset we always need to specify ``source``, ``data_files``, and ``split`` for any dataset\\nbuilder in torchtune. For :func:`~torchtune.datasets.chat_dataset`, we additionally need to specify\\n``conversation_column`` and ``conversation_style``. Our data follows the ``\\\"sharegpt\\\"`` format, so\\nwe can specify that here. Altogether, our :func:`~torchtune.datasets.chat_dataset` call should\\nlook like so:\\n\\n.. code-block:: python\\n\\n from torchtune.datasets import chat_dataset\\n from torchtune.models.llama3 import llama3_tokenizer\\n\\n tokenizer = llama3_tokenizer(\\\"\")\\n ds = chat_dataset(\\n tokenizer=tokenizer,\\n source=\\\"json\\\",\\n data_files=\\\"data/my_data.json\\\",\\n split=\\\"train\\\",\\n conversation_column=\\\"dialogue\\\",\\n conversation_style=\\\"sharegpt\\\",\\n )\\n\\n.. code-block:: yaml\\n\\n # In config\\n tokenizer:\\n _component_: torchtune.models.llama3.llama3_tokenizer\\n path: dataset:\\n _component_: torchtune.datasets.chat_dataset\\n source: json\\n data_files: data/my_data.json\\n split: train\\n conversation_column: dialogue\\n conversation_style: sharegpt\\n\\n.. note::\\n You can pass in any keyword argument for `load_dataset `_ into all our\\n Dataset classes and they will honor them. This is useful for common parameters\\n such as specifying the data split with :code:`split` or configuration with\\n :code:`name`\\n\\nIf you needed to add a prompt template, you would simply pass it into the tokenizer.\\nSince we're fine-tuning Llama3, the tokenizer will handle all formatting for\\nus and prompt templates are optional. Other models such as Mistral's :class:`~torchtune.models.mistral._tokenizer.MistralTokenizer`,\\nuse a chat template by default (:class:`~torchtune.models.mistral.MistralChatTemplate`) to format\\nall messages according to their `recommendations `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW `.\\n.. .. _glossary_fsdp2:\\n\\n\", \"type\": \"text\"}, {\"text\": \"Result 4:\\nDocument_id:14c27\\nContent: 06% of all params are trainable.\\n\\n.. note::\\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\\n of in the recipe.\\n\\n\\n.. _lora_recipe_label:\\n\\nLoRA finetuning recipe in torchtune\\n-----------------------------------\\n\\nFinally, we can put it all together and finetune a model using torchtune's `LoRA recipe `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \\\"\\\":ref:`config_tutorial_label`\\\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune's factory settings, but we may want to experiment a bit.\\nLet's take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: ['q_proj', 'v_proj']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n\", \"type\": \"text\"}, {\"text\": \"Result 5:\\nDocument_id:90a49\\nContent: etune\\n:func:`torchtune.models.llama3.llama3_8b` with DoRA, you would use :func:`torchtune.models.llama3.lora_llama3_8b` with ``use_dora=True``:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.use_dora=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n use_dora: True\\n\\nSince DoRA extends LoRA, the parameters for :ref:`customizing LoRA ` are identical. You can also quantize the base model weights like in :ref:`glossary_qlora` by using ``quantize=True`` to reap\\neven more memory savings!\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.apply_lora_to_mlp=True \\\\\\n model.lora_attn_modules=[\\\"q_proj\\\",\\\"k_proj\\\",\\\"v_proj\\\"] \\\\\\n model.lora_rank=16 \\\\\\n model.lora_alpha=32 \\\\\\n model.use_dora=True \\\\\\n model.quantize_base=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n apply_lora_to_mlp: True\\n lora_attn_modules: [\\\"q_proj\\\", \\\"k_proj\\\", \\\"v_proj\\\"]\\n lora_rank: 16\\n lora_alpha: 32\\n use_dora: True\\n quantize_base: True\\n\\n\\n.. note::\\n\\n Under the hood, we've enabled DoRA by adding the :class:`~torchtune.modules.peft.DoRALinear` module, which we swap\\n out for :class:`~torchtune.modules.peft.LoRALinear` when ``use_dora=True``.\\n\\n.. _glossary_distrib:\\n\\n\\n.. TODO\\n\\n.. Distributed\\n.. -----------\\n\\n.. .. _glossary_fsdp:\\n\\n.. Fully Sharded Data Parallel (FSDP)\\n.. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\\n\\n.. All our ``_distributed`` recipes use `FSDP `.\\n.. .. _glossary_fsdp2:\\n\\n\", \"type\": \"text\"}, {\"text\": \"END of knowledge_search tool results.\\n\", \"type\": \"text\"}], \"role\": \"tool\", \"tool_name\": \"knowledge_search\"}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"CompletionMessage\", \"data\": {\"content\": \"I'm ready to help. What's your question about Torchtune?\", \"role\": \"assistant\", \"stop_reason\": {\"__enum__\": \"StopReason\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"end_of_turn\"}, \"tool_calls\": []}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"UserMessage\", \"data\": {\"content\": \"Tell me how to use LoRA\", \"context\": null, \"role\": \"user\"}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"CompletionMessage\", \"data\": {\"content\": \"\", \"role\": \"assistant\", \"stop_reason\": {\"__enum__\": \"StopReason\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"end_of_turn\"}, \"tool_calls\": [{\"arguments\": {\"query\": \"using LoRA in Torchtune\"}, \"call_id\": \"\", \"tool_name\": \"knowledge_search\"}]}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"ToolResponseMessage\", \"data\": {\"call_id\": \"\", \"content\": [{\"text\": \"knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n\", \"type\": \"text\"}, {\"text\": \"Result 1:\\nDocument_id:14c27\\nContent: .. _lora_finetune_label:\\n\\n============================\\nFine-Tuning Llama2 with LoRA\\n============================\\n\\nThis guide will teach you about `LoRA `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW ` alone will not handle the definition of which parameters are trainable.\\n See :ref:`below` for how to do this.\\n\\nLet's inspect each of these models a bit more closely.\\n\\n.. code-block:: bash\\n\\n # Print the first layer's self-attention in the usual Llama2 model\\n >>> print(base_model.layers[0].attn)\\n MultiHeadAttention(\\n (q_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (k_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (v_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (output_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (pos_embeddings): RotaryPositionalEmbeddings()\\n )\\n\\n # Print the same for Llama2 with LoRA weights\\n >>> print(lora_model.layers[0].attn)\\n MultiHeadAttention(\\n (q_proj): LoRALinear(\\n (dropout): Dropout(p=0.0, inplace=False)\\n \\n\", \"type\": \"text\"}, {\"text\": \"Result 3:\\nDocument_id:14c27\\nContent: 06% of all params are trainable.\\n\\n.. note::\\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\\n of in the recipe.\\n\\n\\n.. _lora_recipe_label:\\n\\nLoRA finetuning recipe in torchtune\\n-----------------------------------\\n\\nFinally, we can put it all together and finetune a model using torchtune's `LoRA recipe `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \\\"\\\":ref:`config_tutorial_label`\\\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune's factory settings, but we may want to experiment a bit.\\nLet's take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: ['q_proj', 'v_proj']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n\", \"type\": \"text\"}, {\"text\": \"Result 4:\\nDocument_id:14c27\\nContent: from our Llama2\\nmodel without any wrappers or custom checkpoint conversion logic.\\n\\n.. code-block:: python\\n\\n # Assuming that base_model already has the pretrained Llama2 weights,\\n # this will directly load them into your LoRA model without any conversion necessary.\\n lora_model.load_state_dict(base_model.state_dict(), strict=False)\\n\\n.. note::\\n Whenever loading weights with :code:`strict=False`, you should verify that any missing or extra keys in\\n the loaded :code:`state_dict` are as expected. torchtune's LoRA recipes do this by default via\\n :func:`validate_missing_and_unexpected_for_lora() `.\\n\\nOnce we've loaded the base model weights, we also want to set only LoRA parameters to trainable.\\n\\n.. _setting_trainable_params:\\n\\n.. code-block:: python\\n\\n from torchtune.modules.peft.peft_utils import get_adapter_params, set_trainable_params\\n\\n # Fetch all params from the model that are associated with LoRA.\\n lora_params = get_adapter_params(lora_model)\\n\\n # Set requires_grad=True on lora_params, and requires_grad=False on all others.\\n set_trainable_params(lora_model, lora_params)\\n\\n # Print the total number of parameters\\n total_params = sum([p.numel() for p in lora_model.parameters()])\\n trainable_params = sum([p.numel() for p in lora_model.parameters() if p.requires_grad])\\n print(\\n f\\\"\\\"\\\"\\n {total_params} total params,\\n {trainable_params}\\\" trainable params,\\n {(100.0 * trainable_params / total_params):.2f}% of all params are trainable.\\n \\\"\\\"\\\"\\n )\\n\\n 6742609920 total params,\\n 4194304 trainable params,\\n 0.06% of all params are trainable.\\n\\n.. note::\\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\\n of in the recipe.\\n\\n\\n.. _lora_recipe_label:\\n\\nLoRA finetuning recipe in torchtune\\n-----------------------------------\\n\\nFinally, we can put it all together and finetune a model using torchtune's `LoRA recipe \", \"tool_name\": \"knowledge_search\"}]}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"ToolResponseMessage\", \"data\": {\"call_id\": \"\", \"content\": [{\"text\": \"knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n\", \"type\": \"text\"}, {\"text\": \"Result 1:\\nDocument_id:3e31d\\nContent: conversational data, :func:`~torchtune.datasets.chat_dataset` seems to be a good fit. For any\\ncustom local dataset we always need to specify ``source``, ``data_files``, and ``split`` for any dataset\\nbuilder in torchtune. For :func:`~torchtune.datasets.chat_dataset`, we additionally need to specify\\n``conversation_column`` and ``conversation_style``. Our data follows the ``\\\"sharegpt\\\"`` format, so\\nwe can specify that here. Altogether, our :func:`~torchtune.datasets.chat_dataset` call should\\nlook like so:\\n\\n.. code-block:: python\\n\\n from torchtune.datasets import chat_dataset\\n from torchtune.models.llama3 import llama3_tokenizer\\n\\n tokenizer = llama3_tokenizer(\\\"\")\\n ds = chat_dataset(\\n tokenizer=tokenizer,\\n source=\\\"json\\\",\\n data_files=\\\"data/my_data.json\\\",\\n split=\\\"train\\\",\\n conversation_column=\\\"dialogue\\\",\\n conversation_style=\\\"sharegpt\\\",\\n )\\n\\n.. code-block:: yaml\\n\\n # In config\\n tokenizer:\\n _component_: torchtune.models.llama3.llama3_tokenizer\\n path: dataset:\\n _component_: torchtune.datasets.chat_dataset\\n source: json\\n data_files: data/my_data.json\\n split: train\\n conversation_column: dialogue\\n conversation_style: sharegpt\\n\\n.. note::\\n You can pass in any keyword argument for `load_dataset `_ into all our\\n Dataset classes and they will honor them. This is useful for common parameters\\n such as specifying the data split with :code:`split` or configuration with\\n :code:`name`\\n\\nIf you needed to add a prompt template, you would simply pass it into the tokenizer.\\nSince we're fine-tuning Llama3, the tokenizer will handle all formatting for\\nus and prompt templates are optional. Other models such as Mistral's :class:`~torchtune.models.mistral._tokenizer.MistralTokenizer`,\\nuse a chat template by default (:class:`~torchtune.models.mistral.MistralChatTemplate`) to format\\nall messages according to their `recommendations `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW `.\\n.. .. _glossary_fsdp2:\\n\\n\", \"type\": \"text\"}, {\"text\": \"Result 4:\\nDocument_id:14c27\\nContent: 06% of all params are trainable.\\n\\n.. note::\\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\\n of in the recipe.\\n\\n\\n.. _lora_recipe_label:\\n\\nLoRA finetuning recipe in torchtune\\n-----------------------------------\\n\\nFinally, we can put it all together and finetune a model using torchtune's `LoRA recipe `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \\\"\\\":ref:`config_tutorial_label`\\\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune's factory settings, but we may want to experiment a bit.\\nLet's take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: ['q_proj', 'v_proj']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n\", \"type\": \"text\"}, {\"text\": \"Result 5:\\nDocument_id:90a49\\nContent: etune\\n:func:`torchtune.models.llama3.llama3_8b` with DoRA, you would use :func:`torchtune.models.llama3.lora_llama3_8b` with ``use_dora=True``:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.use_dora=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n use_dora: True\\n\\nSince DoRA extends LoRA, the parameters for :ref:`customizing LoRA ` are identical. You can also quantize the base model weights like in :ref:`glossary_qlora` by using ``quantize=True`` to reap\\neven more memory savings!\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.apply_lora_to_mlp=True \\\\\\n model.lora_attn_modules=[\\\"q_proj\\\",\\\"k_proj\\\",\\\"v_proj\\\"] \\\\\\n model.lora_rank=16 \\\\\\n model.lora_alpha=32 \\\\\\n model.use_dora=True \\\\\\n model.quantize_base=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n apply_lora_to_mlp: True\\n lora_attn_modules: [\\\"q_proj\\\", \\\"k_proj\\\", \\\"v_proj\\\"]\\n lora_rank: 16\\n lora_alpha: 32\\n use_dora: True\\n quantize_base: True\\n\\n\\n.. note::\\n\\n Under the hood, we've enabled DoRA by adding the :class:`~torchtune.modules.peft.DoRALinear` module, which we swap\\n out for :class:`~torchtune.modules.peft.LoRALinear` when ``use_dora=True``.\\n\\n.. _glossary_distrib:\\n\\n\\n.. TODO\\n\\n.. Distributed\\n.. -----------\\n\\n.. .. _glossary_fsdp:\\n\\n.. Fully Sharded Data Parallel (FSDP)\\n.. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\\n\\n.. All our ``_distributed`` recipes use `FSDP `.\\n.. .. _glossary_fsdp2:\\n\\n\", \"type\": \"text\"}, {\"text\": \"END of knowledge_search tool results.\\n\", \"type\": \"text\"}], \"role\": \"tool\", \"tool_name\": \"knowledge_search\"}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"CompletionMessage\", \"data\": {\"content\": \"I'm ready to help. What's your question about Torchtune?\", \"role\": \"assistant\", \"stop_reason\": {\"__enum__\": \"StopReason\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"end_of_turn\"}, \"tool_calls\": []}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"UserMessage\", \"data\": {\"content\": \"Tell me how to use LoRA\", \"context\": null, \"role\": \"user\"}}]], {\"response_format\": null, \"sampling_params\": {\"__module__\": \"llama_stack.models.llama.datatypes\", \"__pydantic__\": \"SamplingParams\", \"data\": {\"max_tokens\": 0, \"repetition_penalty\": 1.0, \"strategy\": {\"temperature\": 0.0001, \"top_p\": 0.9, \"type\": \"top_p\"}}}, \"stream\": true, \"tool_config\": {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"ToolConfig\", \"data\": {\"system_message_behavior\": {\"__enum__\": \"SystemMessageBehavior\", \"__module__\": \"llama_stack.apis.inference.inference\", \"value\": \"append\"}, \"tool_choice\": {\"__enum__\": \"ToolChoice\", \"__module__\": \"llama_stack.apis.inference.inference\", \"value\": \"auto\"}, \"tool_prompt_format\": null}}, \"tool_prompt_format\": null, \"tools\": [{\"__module__\": \"llama_stack.models.llama.datatypes\", \"__pydantic__\": \"ToolDefinition\", \"data\": {\"description\": \"Search for information in a database.\", \"parameters\": {\"query\": {\"default\": null, \"description\": \"The query to search for. Can be a natural language sentence or keywords.\", \"param_type\": \"string\", \"required\": true}}, \"tool_name\": \"knowledge_search\"}}]}]": { + "chunks": [ + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "start" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "[k", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "nowledge_search(query=\"using LoRA in Torchtune", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "\")]", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "__module__": "llama_stack.apis.common.content_types", + "value": "succeeded" + }, + "tool_call": { + "arguments": { + "query": "using LoRA in Torchtune" + }, + "call_id": "0e2bf2f6-0915-4d44-ad3b-6fd48d3df717", + "tool_name": "knowledge_search" + }, + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "__module__": "llama_stack.models.llama.datatypes", + "value": "end_of_turn" + } + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "complete" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "__module__": "llama_stack.models.llama.datatypes", + "value": "end_of_turn" + } + }, + "metrics": [ + { + "metric": "prompt_tokens", + "unit": null, + "value": 107 + }, + { + "metric": "completion_tokens", + "unit": null, + "value": 23 + }, + { + "metric": "total_tokens", + "unit": null, + "value": 130 + } + ] + } + } + ], + "type": "generator" + }, + "[[\"meta-llama/Llama-3.3-70B-Instruct\", [{\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"SystemMessage\", \"data\": {\"content\": \"You are a helpful assistant\", \"role\": \"system\"}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"UserMessage\", \"data\": {\"content\": \"I am attaching some documentation for Torchtune. Help me answer questions I will ask next.\", \"context\": null, \"role\": \"user\"}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"CompletionMessage\", \"data\": {\"content\": \"\", \"role\": \"assistant\", \"stop_reason\": {\"__enum__\": \"StopReason\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"end_of_turn\"}, \"tool_calls\": [{\"arguments\": {\"query\": \"Torchtune documentation\"}, \"call_id\": \"\", \"tool_name\": \"knowledge_search\"}]}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"ToolResponseMessage\", \"data\": {\"call_id\": \"\", \"content\": [{\"text\": \"knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n\", \"type\": \"text\"}, {\"text\": \"Result 1:\\nDocument_id:3e31d\\nContent: conversational data, :func:`~torchtune.datasets.chat_dataset` seems to be a good fit. For any\\ncustom local dataset we always need to specify ``source``, ``data_files``, and ``split`` for any dataset\\nbuilder in torchtune. For :func:`~torchtune.datasets.chat_dataset`, we additionally need to specify\\n``conversation_column`` and ``conversation_style``. Our data follows the ``\\\"sharegpt\\\"`` format, so\\nwe can specify that here. Altogether, our :func:`~torchtune.datasets.chat_dataset` call should\\nlook like so:\\n\\n.. code-block:: python\\n\\n from torchtune.datasets import chat_dataset\\n from torchtune.models.llama3 import llama3_tokenizer\\n\\n tokenizer = llama3_tokenizer(\\\"\")\\n ds = chat_dataset(\\n tokenizer=tokenizer,\\n source=\\\"json\\\",\\n data_files=\\\"data/my_data.json\\\",\\n split=\\\"train\\\",\\n conversation_column=\\\"dialogue\\\",\\n conversation_style=\\\"sharegpt\\\",\\n )\\n\\n.. code-block:: yaml\\n\\n # In config\\n tokenizer:\\n _component_: torchtune.models.llama3.llama3_tokenizer\\n path: dataset:\\n _component_: torchtune.datasets.chat_dataset\\n source: json\\n data_files: data/my_data.json\\n split: train\\n conversation_column: dialogue\\n conversation_style: sharegpt\\n\\n.. note::\\n You can pass in any keyword argument for `load_dataset `_ into all our\\n Dataset classes and they will honor them. This is useful for common parameters\\n such as specifying the data split with :code:`split` or configuration with\\n :code:`name`\\n\\nIf you needed to add a prompt template, you would simply pass it into the tokenizer.\\nSince we're fine-tuning Llama3, the tokenizer will handle all formatting for\\nus and prompt templates are optional. Other models such as Mistral's :class:`~torchtune.models.mistral._tokenizer.MistralTokenizer`,\\nuse a chat template by default (:class:`~torchtune.models.mistral.MistralChatTemplate`) to format\\nall messages according to their `recommendations `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW `.\\n.. .. _glossary_fsdp2:\\n\\n\", \"type\": \"text\"}, {\"text\": \"Result 4:\\nDocument_id:14c27\\nContent: 06% of all params are trainable.\\n\\n.. note::\\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\\n of in the recipe.\\n\\n\\n.. _lora_recipe_label:\\n\\nLoRA finetuning recipe in torchtune\\n-----------------------------------\\n\\nFinally, we can put it all together and finetune a model using torchtune's `LoRA recipe `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \\\"\\\":ref:`config_tutorial_label`\\\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune's factory settings, but we may want to experiment a bit.\\nLet's take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: ['q_proj', 'v_proj']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n\", \"type\": \"text\"}, {\"text\": \"Result 5:\\nDocument_id:90a49\\nContent: etune\\n:func:`torchtune.models.llama3.llama3_8b` with DoRA, you would use :func:`torchtune.models.llama3.lora_llama3_8b` with ``use_dora=True``:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.use_dora=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n use_dora: True\\n\\nSince DoRA extends LoRA, the parameters for :ref:`customizing LoRA ` are identical. You can also quantize the base model weights like in :ref:`glossary_qlora` by using ``quantize=True`` to reap\\neven more memory savings!\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.apply_lora_to_mlp=True \\\\\\n model.lora_attn_modules=[\\\"q_proj\\\",\\\"k_proj\\\",\\\"v_proj\\\"] \\\\\\n model.lora_rank=16 \\\\\\n model.lora_alpha=32 \\\\\\n model.use_dora=True \\\\\\n model.quantize_base=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n apply_lora_to_mlp: True\\n lora_attn_modules: [\\\"q_proj\\\", \\\"k_proj\\\", \\\"v_proj\\\"]\\n lora_rank: 16\\n lora_alpha: 32\\n use_dora: True\\n quantize_base: True\\n\\n\\n.. note::\\n\\n Under the hood, we've enabled DoRA by adding the :class:`~torchtune.modules.peft.DoRALinear` module, which we swap\\n out for :class:`~torchtune.modules.peft.LoRALinear` when ``use_dora=True``.\\n\\n.. _glossary_distrib:\\n\\n\\n.. TODO\\n\\n.. Distributed\\n.. -----------\\n\\n.. .. _glossary_fsdp:\\n\\n.. Fully Sharded Data Parallel (FSDP)\\n.. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\\n\\n.. All our ``_distributed`` recipes use `FSDP `.\\n.. .. _glossary_fsdp2:\\n\\n\", \"type\": \"text\"}, {\"text\": \"END of knowledge_search tool results.\\n\", \"type\": \"text\"}], \"role\": \"tool\", \"tool_name\": \"knowledge_search\"}}]], {\"response_format\": null, \"sampling_params\": {\"__module__\": \"llama_stack.models.llama.datatypes\", \"__pydantic__\": \"SamplingParams\", \"data\": {\"max_tokens\": 0, \"repetition_penalty\": 1.0, \"strategy\": {\"temperature\": 0.0001, \"top_p\": 0.9, \"type\": \"top_p\"}}}, \"stream\": true, \"tool_config\": {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"ToolConfig\", \"data\": {\"system_message_behavior\": {\"__enum__\": \"SystemMessageBehavior\", \"__module__\": \"llama_stack.apis.inference.inference\", \"value\": \"append\"}, \"tool_choice\": {\"__enum__\": \"ToolChoice\", \"__module__\": \"llama_stack.apis.inference.inference\", \"value\": \"auto\"}, \"tool_prompt_format\": null}}, \"tool_prompt_format\": null, \"tools\": [{\"__module__\": \"llama_stack.models.llama.datatypes\", \"__pydantic__\": \"ToolDefinition\", \"data\": {\"description\": \"Search for information in a database.\", \"parameters\": {\"query\": {\"default\": null, \"description\": \"The query to search for. Can be a natural language sentence or keywords.\", \"param_type\": \"string\", \"required\": true}}, \"tool_name\": \"knowledge_search\"}}]}]": { + "chunks": [ + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "start" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "I", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "'m ready to help. What's", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " your question about Torchtune?", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "complete" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "__module__": "llama_stack.models.llama.datatypes", + "value": "end_of_turn" + } + }, + "metrics": [ + { + "metric": "prompt_tokens", + "unit": null, + "value": 75 + }, + { + "metric": "completion_tokens", + "unit": null, + "value": 25 + }, + { + "metric": "total_tokens", + "unit": null, + "value": 100 + } + ] + } + } + ], + "type": "generator" + }, "[[\"meta-llama/Llama-3.3-70B-Instruct\", [{\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"SystemMessage\", \"data\": {\"content\": \"You are a helpful assistant\", \"role\": \"system\"}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"UserMessage\", \"data\": {\"content\": \"I am attaching some documentation for Torchtune. Help me answer questions I will ask next.\", \"context\": null, \"role\": \"user\"}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"CompletionMessage\", \"data\": {\"content\": \"\", \"role\": \"assistant\", \"stop_reason\": {\"__enum__\": \"StopReason\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"end_of_turn\"}, \"tool_calls\": [{\"arguments\": {\"query\": \"Torchtune documentation\"}, \"call_id\": \"\", \"tool_name\": \"knowledge_search\"}]}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"ToolResponseMessage\", \"data\": {\"call_id\": \"\", \"content\": [{\"text\": \"knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n\", \"type\": \"text\"}, {\"text\": \"Result 1:\\nDocument_id:42933\\nContent: conversational data, :func:`~torchtune.datasets.chat_dataset` seems to be a good fit. For any\\ncustom local dataset we always need to specify ``source``, ``data_files``, and ``split`` for any dataset\\nbuilder in torchtune. For :func:`~torchtune.datasets.chat_dataset`, we additionally need to specify\\n``conversation_column`` and ``conversation_style``. Our data follows the ``\\\"sharegpt\\\"`` format, so\\nwe can specify that here. Altogether, our :func:`~torchtune.datasets.chat_dataset` call should\\nlook like so:\\n\\n.. code-block:: python\\n\\n from torchtune.datasets import chat_dataset\\n from torchtune.models.llama3 import llama3_tokenizer\\n\\n tokenizer = llama3_tokenizer(\\\"/tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\\\")\\n ds = chat_dataset(\\n tokenizer=tokenizer,\\n source=\\\"json\\\",\\n data_files=\\\"data/my_data.json\\\",\\n split=\\\"train\\\",\\n conversation_column=\\\"dialogue\\\",\\n conversation_style=\\\"sharegpt\\\",\\n )\\n\\n.. code-block:: yaml\\n\\n # In config\\n tokenizer:\\n _component_: torchtune.models.llama3.llama3_tokenizer\\n path: /tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\\n\\n dataset:\\n _component_: torchtune.datasets.chat_dataset\\n source: json\\n data_files: data/my_data.json\\n split: train\\n conversation_column: dialogue\\n conversation_style: sharegpt\\n\\n.. note::\\n You can pass in any keyword argument for `load_dataset `_ into all our\\n Dataset classes and they will honor them. This is useful for common parameters\\n such as specifying the data split with :code:`split` or configuration with\\n :code:`name`\\n\\nIf you needed to add a prompt template, you would simply pass it into the tokenizer.\\nSince we're fine-tuning Llama3, the tokenizer will handle all formatting for\\nus and prompt templates are optional. Other models such as Mistral's :class:`~torchtune.models.mistral._tokenizer.MistralTokenizer`,\\nuse a chat template by default (:class:`~torchtune.models.mistral.MistralChatTemplate`) to format\\nall messages according to their `recommendations `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW `.\\n.. .. _glossary_fsdp2:\\n\\n\", \"type\": \"text\"}, {\"text\": \"Result 4:\\nDocument_id:20e5d\\nContent: 06% of all params are trainable.\\n\\n.. note::\\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\\n of in the recipe.\\n\\n\\n.. _lora_recipe_label:\\n\\nLoRA finetuning recipe in torchtune\\n-----------------------------------\\n\\nFinally, we can put it all together and finetune a model using torchtune's `LoRA recipe `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \\\"\\\":ref:`config_tutorial_label`\\\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune's factory settings, but we may want to experiment a bit.\\nLet's take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: ['q_proj', 'v_proj']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n\", \"type\": \"text\"}, {\"text\": \"Result 5:\\nDocument_id:0cd43\\nContent: etune\\n:func:`torchtune.models.llama3.llama3_8b` with DoRA, you would use :func:`torchtune.models.llama3.lora_llama3_8b` with ``use_dora=True``:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.use_dora=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n use_dora: True\\n\\nSince DoRA extends LoRA, the parameters for :ref:`customizing LoRA ` are identical. You can also quantize the base model weights like in :ref:`glossary_qlora` by using ``quantize=True`` to reap\\neven more memory savings!\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.apply_lora_to_mlp=True \\\\\\n model.lora_attn_modules=[\\\"q_proj\\\",\\\"k_proj\\\",\\\"v_proj\\\"] \\\\\\n model.lora_rank=16 \\\\\\n model.lora_alpha=32 \\\\\\n model.use_dora=True \\\\\\n model.quantize_base=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n apply_lora_to_mlp: True\\n lora_attn_modules: [\\\"q_proj\\\", \\\"k_proj\\\", \\\"v_proj\\\"]\\n lora_rank: 16\\n lora_alpha: 32\\n use_dora: True\\n quantize_base: True\\n\\n\\n.. note::\\n\\n Under the hood, we've enabled DoRA by adding the :class:`~torchtune.modules.peft.DoRALinear` module, which we swap\\n out for :class:`~torchtune.modules.peft.LoRALinear` when ``use_dora=True``.\\n\\n.. _glossary_distrib:\\n\\n\\n.. TODO\\n\\n.. Distributed\\n.. -----------\\n\\n.. .. _glossary_fsdp:\\n\\n.. Fully Sharded Data Parallel (FSDP)\\n.. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\\n\\n.. All our ``_distributed`` recipes use `FSDP `.\\n.. .. _glossary_fsdp2:\\n\\n\", \"type\": \"text\"}, {\"text\": \"END of knowledge_search tool results.\\n\", \"type\": \"text\"}], \"role\": \"tool\", \"tool_name\": \"knowledge_search\"}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"CompletionMessage\", \"data\": {\"content\": \"I'm ready to help. What's your question about Torchtune?\", \"role\": \"assistant\", \"stop_reason\": {\"__enum__\": \"StopReason\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"end_of_turn\"}, \"tool_calls\": []}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"UserMessage\", \"data\": {\"content\": \"Tell me how to use LoRA\", \"context\": null, \"role\": \"user\"}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"CompletionMessage\", \"data\": {\"content\": \"\", \"role\": \"assistant\", \"stop_reason\": {\"__enum__\": \"StopReason\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"end_of_turn\"}, \"tool_calls\": [{\"arguments\": {\"query\": \"using LoRA in Torchtune\"}, \"call_id\": \"\", \"tool_name\": \"knowledge_search\"}]}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"ToolResponseMessage\", \"data\": {\"call_id\": \"\", \"content\": [{\"text\": \"knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n\", \"type\": \"text\"}, {\"text\": \"Result 1:\\nDocument_id:20e5d\\nContent: .. _lora_finetune_label:\\n\\n============================\\nFine-Tuning Llama2 with LoRA\\n============================\\n\\nThis guide will teach you about `LoRA `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW ` alone will not handle the definition of which parameters are trainable.\\n See :ref:`below` for how to do this.\\n\\nLet's inspect each of these models a bit more closely.\\n\\n.. code-block:: bash\\n\\n # Print the first layer's self-attention in the usual Llama2 model\\n >>> print(base_model.layers[0].attn)\\n MultiHeadAttention(\\n (q_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (k_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (v_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (output_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (pos_embeddings): RotaryPositionalEmbeddings()\\n )\\n\\n # Print the same for Llama2 with LoRA weights\\n >>> print(lora_model.layers[0].attn)\\n MultiHeadAttention(\\n (q_proj): LoRALinear(\\n (dropout): Dropout(p=0.0, inplace=False)\\n \\n\", \"type\": \"text\"}, {\"text\": \"Result 3:\\nDocument_id:20e5d\\nContent: 06% of all params are trainable.\\n\\n.. note::\\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\\n of in the recipe.\\n\\n\\n.. _lora_recipe_label:\\n\\nLoRA finetuning recipe in torchtune\\n-----------------------------------\\n\\nFinally, we can put it all together and finetune a model using torchtune's `LoRA recipe `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \\\"\\\":ref:`config_tutorial_label`\\\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune's factory settings, but we may want to experiment a bit.\\nLet's take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: ['q_proj', 'v_proj']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n\", \"type\": \"text\"}, {\"text\": \"Result 4:\\nDocument_id:20e5d\\nContent: from our Llama2\\nmodel without any wrappers or custom checkpoint conversion logic.\\n\\n.. code-block:: python\\n\\n # Assuming that base_model already has the pretrained Llama2 weights,\\n # this will directly load them into your LoRA model without any conversion necessary.\\n lora_model.load_state_dict(base_model.state_dict(), strict=False)\\n\\n.. note::\\n Whenever loading weights with :code:`strict=False`, you should verify that any missing or extra keys in\\n the loaded :code:`state_dict` are as expected. torchtune's LoRA recipes do this by default via\\n :func:`validate_missing_and_unexpected_for_lora() `.\\n\\nOnce we've loaded the base model weights, we also want to set only LoRA parameters to trainable.\\n\\n.. _setting_trainable_params:\\n\\n.. code-block:: python\\n\\n from torchtune.modules.peft.peft_utils import get_adapter_params, set_trainable_params\\n\\n # Fetch all params from the model that are associated with LoRA.\\n lora_params = get_adapter_params(lora_model)\\n\\n # Set requires_grad=True on lora_params, and requires_grad=False on all others.\\n set_trainable_params(lora_model, lora_params)\\n\\n # Print the total number of parameters\\n total_params = sum([p.numel() for p in lora_model.parameters()])\\n trainable_params = sum([p.numel() for p in lora_model.parameters() if p.requires_grad])\\n print(\\n f\\\"\\\"\\\"\\n {total_params} total params,\\n {trainable_params}\\\" trainable params,\\n {(100.0 * trainable_params / total_params):.2f}% of all params are trainable.\\n \\\"\\\"\\\"\\n )\\n\\n 6742609920 total params,\\n 4194304 trainable params,\\n 0.06% of all params are trainable.\\n\\n.. note::\\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\\n of in the recipe.\\n\\n\\n.. _lora_recipe_label:\\n\\nLoRA finetuning recipe in torchtune\\n-----------------------------------\\n\\nFinally, we can put it all together and finetune a model using torchtune's `LoRA recipe \", \"tool_name\": \"knowledge_search\"}]}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"ToolResponseMessage\", \"data\": {\"call_id\": \"\", \"content\": [{\"text\": \"knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n\", \"type\": \"text\"}, {\"text\": \"Result 1:\\nDocument_id:57768\\nContent: conversational data, :func:`~torchtune.datasets.chat_dataset` seems to be a good fit. For any\\ncustom local dataset we always need to specify ``source``, ``data_files``, and ``split`` for any dataset\\nbuilder in torchtune. For :func:`~torchtune.datasets.chat_dataset`, we additionally need to specify\\n``conversation_column`` and ``conversation_style``. Our data follows the ``\\\"sharegpt\\\"`` format, so\\nwe can specify that here. Altogether, our :func:`~torchtune.datasets.chat_dataset` call should\\nlook like so:\\n\\n.. code-block:: python\\n\\n from torchtune.datasets import chat_dataset\\n from torchtune.models.llama3 import llama3_tokenizer\\n\\n tokenizer = llama3_tokenizer(\\\"\")\\n ds = chat_dataset(\\n tokenizer=tokenizer,\\n source=\\\"json\\\",\\n data_files=\\\"data/my_data.json\\\",\\n split=\\\"train\\\",\\n conversation_column=\\\"dialogue\\\",\\n conversation_style=\\\"sharegpt\\\",\\n )\\n\\n.. code-block:: yaml\\n\\n # In config\\n tokenizer:\\n _component_: torchtune.models.llama3.llama3_tokenizer\\n path: dataset:\\n _component_: torchtune.datasets.chat_dataset\\n source: json\\n data_files: data/my_data.json\\n split: train\\n conversation_column: dialogue\\n conversation_style: sharegpt\\n\\n.. note::\\n You can pass in any keyword argument for `load_dataset `_ into all our\\n Dataset classes and they will honor them. This is useful for common parameters\\n such as specifying the data split with :code:`split` or configuration with\\n :code:`name`\\n\\nIf you needed to add a prompt template, you would simply pass it into the tokenizer.\\nSince we're fine-tuning Llama3, the tokenizer will handle all formatting for\\nus and prompt templates are optional. Other models such as Mistral's :class:`~torchtune.models.mistral._tokenizer.MistralTokenizer`,\\nuse a chat template by default (:class:`~torchtune.models.mistral.MistralChatTemplate`) to format\\nall messages according to their `recommendations `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW `.\\n.. .. _glossary_fsdp2:\\n\\n\", \"type\": \"text\"}, {\"text\": \"Result 4:\\nDocument_id:4fd22\\nContent: 06% of all params are trainable.\\n\\n.. note::\\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\\n of in the recipe.\\n\\n\\n.. _lora_recipe_label:\\n\\nLoRA finetuning recipe in torchtune\\n-----------------------------------\\n\\nFinally, we can put it all together and finetune a model using torchtune's `LoRA recipe `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \\\"\\\":ref:`config_tutorial_label`\\\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune's factory settings, but we may want to experiment a bit.\\nLet's take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: ['q_proj', 'v_proj']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n\", \"type\": \"text\"}, {\"text\": \"Result 5:\\nDocument_id:6e83e\\nContent: etune\\n:func:`torchtune.models.llama3.llama3_8b` with DoRA, you would use :func:`torchtune.models.llama3.lora_llama3_8b` with ``use_dora=True``:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.use_dora=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n use_dora: True\\n\\nSince DoRA extends LoRA, the parameters for :ref:`customizing LoRA ` are identical. You can also quantize the base model weights like in :ref:`glossary_qlora` by using ``quantize=True`` to reap\\neven more memory savings!\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.apply_lora_to_mlp=True \\\\\\n model.lora_attn_modules=[\\\"q_proj\\\",\\\"k_proj\\\",\\\"v_proj\\\"] \\\\\\n model.lora_rank=16 \\\\\\n model.lora_alpha=32 \\\\\\n model.use_dora=True \\\\\\n model.quantize_base=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n apply_lora_to_mlp: True\\n lora_attn_modules: [\\\"q_proj\\\", \\\"k_proj\\\", \\\"v_proj\\\"]\\n lora_rank: 16\\n lora_alpha: 32\\n use_dora: True\\n quantize_base: True\\n\\n\\n.. note::\\n\\n Under the hood, we've enabled DoRA by adding the :class:`~torchtune.modules.peft.DoRALinear` module, which we swap\\n out for :class:`~torchtune.modules.peft.LoRALinear` when ``use_dora=True``.\\n\\n.. _glossary_distrib:\\n\\n\\n.. TODO\\n\\n.. Distributed\\n.. -----------\\n\\n.. .. _glossary_fsdp:\\n\\n.. Fully Sharded Data Parallel (FSDP)\\n.. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\\n\\n.. All our ``_distributed`` recipes use `FSDP `.\\n.. .. _glossary_fsdp2:\\n\\n\", \"type\": \"text\"}, {\"text\": \"END of knowledge_search tool results.\\n\", \"type\": \"text\"}], \"role\": \"tool\", \"tool_name\": \"knowledge_search\"}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"CompletionMessage\", \"data\": {\"content\": \"I'm ready to help. What's your question about Torchtune?\", \"role\": \"assistant\", \"stop_reason\": {\"__enum__\": \"StopReason\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"end_of_turn\"}, \"tool_calls\": []}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"UserMessage\", \"data\": {\"content\": \"Tell me how to use LoRA\", \"context\": null, \"role\": \"user\"}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"CompletionMessage\", \"data\": {\"content\": \"\", \"role\": \"assistant\", \"stop_reason\": {\"__enum__\": \"StopReason\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"end_of_turn\"}, \"tool_calls\": [{\"arguments\": {\"query\": \"using LoRA in Torchtune\"}, \"call_id\": \"\", \"tool_name\": \"knowledge_search\"}]}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"ToolResponseMessage\", \"data\": {\"call_id\": \"\", \"content\": [{\"text\": \"knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n\", \"type\": \"text\"}, {\"text\": \"Result 1:\\nDocument_id:4fd22\\nContent: .. _lora_finetune_label:\\n\\n============================\\nFine-Tuning Llama2 with LoRA\\n============================\\n\\nThis guide will teach you about `LoRA `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW ` alone will not handle the definition of which parameters are trainable.\\n See :ref:`below` for how to do this.\\n\\nLet's inspect each of these models a bit more closely.\\n\\n.. code-block:: bash\\n\\n # Print the first layer's self-attention in the usual Llama2 model\\n >>> print(base_model.layers[0].attn)\\n MultiHeadAttention(\\n (q_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (k_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (v_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (output_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (pos_embeddings): RotaryPositionalEmbeddings()\\n )\\n\\n # Print the same for Llama2 with LoRA weights\\n >>> print(lora_model.layers[0].attn)\\n MultiHeadAttention(\\n (q_proj): LoRALinear(\\n (dropout): Dropout(p=0.0, inplace=False)\\n \\n\", \"type\": \"text\"}, {\"text\": \"Result 3:\\nDocument_id:4fd22\\nContent: 06% of all params are trainable.\\n\\n.. note::\\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\\n of in the recipe.\\n\\n\\n.. _lora_recipe_label:\\n\\nLoRA finetuning recipe in torchtune\\n-----------------------------------\\n\\nFinally, we can put it all together and finetune a model using torchtune's `LoRA recipe `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \\\"\\\":ref:`config_tutorial_label`\\\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune's factory settings, but we may want to experiment a bit.\\nLet's take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: ['q_proj', 'v_proj']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n\", \"type\": \"text\"}, {\"text\": \"Result 4:\\nDocument_id:4fd22\\nContent: from our Llama2\\nmodel without any wrappers or custom checkpoint conversion logic.\\n\\n.. code-block:: python\\n\\n # Assuming that base_model already has the pretrained Llama2 weights,\\n # this will directly load them into your LoRA model without any conversion necessary.\\n lora_model.load_state_dict(base_model.state_dict(), strict=False)\\n\\n.. note::\\n Whenever loading weights with :code:`strict=False`, you should verify that any missing or extra keys in\\n the loaded :code:`state_dict` are as expected. torchtune's LoRA recipes do this by default via\\n :func:`validate_missing_and_unexpected_for_lora() `.\\n\\nOnce we've loaded the base model weights, we also want to set only LoRA parameters to trainable.\\n\\n.. _setting_trainable_params:\\n\\n.. code-block:: python\\n\\n from torchtune.modules.peft.peft_utils import get_adapter_params, set_trainable_params\\n\\n # Fetch all params from the model that are associated with LoRA.\\n lora_params = get_adapter_params(lora_model)\\n\\n # Set requires_grad=True on lora_params, and requires_grad=False on all others.\\n set_trainable_params(lora_model, lora_params)\\n\\n # Print the total number of parameters\\n total_params = sum([p.numel() for p in lora_model.parameters()])\\n trainable_params = sum([p.numel() for p in lora_model.parameters() if p.requires_grad])\\n print(\\n f\\\"\\\"\\\"\\n {total_params} total params,\\n {trainable_params}\\\" trainable params,\\n {(100.0 * trainable_params / total_params):.2f}% of all params are trainable.\\n \\\"\\\"\\\"\\n )\\n\\n 6742609920 total params,\\n 4194304 trainable params,\\n 0.06% of all params are trainable.\\n\\n.. note::\\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\\n of in the recipe.\\n\\n\\n.. _lora_recipe_label:\\n\\nLoRA finetuning recipe in torchtune\\n-----------------------------------\\n\\nFinally, we can put it all together and finetune a model using torchtune's `LoRA recipe \", \"tool_name\": \"knowledge_search\"}]}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"ToolResponseMessage\", \"data\": {\"call_id\": \"\", \"content\": [{\"text\": \"knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n\", \"type\": \"text\"}, {\"text\": \"Result 1:\\nDocument_id:57768\\nContent: conversational data, :func:`~torchtune.datasets.chat_dataset` seems to be a good fit. For any\\ncustom local dataset we always need to specify ``source``, ``data_files``, and ``split`` for any dataset\\nbuilder in torchtune. For :func:`~torchtune.datasets.chat_dataset`, we additionally need to specify\\n``conversation_column`` and ``conversation_style``. Our data follows the ``\\\"sharegpt\\\"`` format, so\\nwe can specify that here. Altogether, our :func:`~torchtune.datasets.chat_dataset` call should\\nlook like so:\\n\\n.. code-block:: python\\n\\n from torchtune.datasets import chat_dataset\\n from torchtune.models.llama3 import llama3_tokenizer\\n\\n tokenizer = llama3_tokenizer(\\\"\")\\n ds = chat_dataset(\\n tokenizer=tokenizer,\\n source=\\\"json\\\",\\n data_files=\\\"data/my_data.json\\\",\\n split=\\\"train\\\",\\n conversation_column=\\\"dialogue\\\",\\n conversation_style=\\\"sharegpt\\\",\\n )\\n\\n.. code-block:: yaml\\n\\n # In config\\n tokenizer:\\n _component_: torchtune.models.llama3.llama3_tokenizer\\n path: dataset:\\n _component_: torchtune.datasets.chat_dataset\\n source: json\\n data_files: data/my_data.json\\n split: train\\n conversation_column: dialogue\\n conversation_style: sharegpt\\n\\n.. note::\\n You can pass in any keyword argument for `load_dataset `_ into all our\\n Dataset classes and they will honor them. This is useful for common parameters\\n such as specifying the data split with :code:`split` or configuration with\\n :code:`name`\\n\\nIf you needed to add a prompt template, you would simply pass it into the tokenizer.\\nSince we're fine-tuning Llama3, the tokenizer will handle all formatting for\\nus and prompt templates are optional. Other models such as Mistral's :class:`~torchtune.models.mistral._tokenizer.MistralTokenizer`,\\nuse a chat template by default (:class:`~torchtune.models.mistral.MistralChatTemplate`) to format\\nall messages according to their `recommendations `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW `.\\n.. .. _glossary_fsdp2:\\n\\n\", \"type\": \"text\"}, {\"text\": \"Result 4:\\nDocument_id:4fd22\\nContent: 06% of all params are trainable.\\n\\n.. note::\\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\\n of in the recipe.\\n\\n\\n.. _lora_recipe_label:\\n\\nLoRA finetuning recipe in torchtune\\n-----------------------------------\\n\\nFinally, we can put it all together and finetune a model using torchtune's `LoRA recipe `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \\\"\\\":ref:`config_tutorial_label`\\\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune's factory settings, but we may want to experiment a bit.\\nLet's take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: ['q_proj', 'v_proj']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n\", \"type\": \"text\"}, {\"text\": \"Result 5:\\nDocument_id:6e83e\\nContent: etune\\n:func:`torchtune.models.llama3.llama3_8b` with DoRA, you would use :func:`torchtune.models.llama3.lora_llama3_8b` with ``use_dora=True``:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.use_dora=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n use_dora: True\\n\\nSince DoRA extends LoRA, the parameters for :ref:`customizing LoRA ` are identical. You can also quantize the base model weights like in :ref:`glossary_qlora` by using ``quantize=True`` to reap\\neven more memory savings!\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.apply_lora_to_mlp=True \\\\\\n model.lora_attn_modules=[\\\"q_proj\\\",\\\"k_proj\\\",\\\"v_proj\\\"] \\\\\\n model.lora_rank=16 \\\\\\n model.lora_alpha=32 \\\\\\n model.use_dora=True \\\\\\n model.quantize_base=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n apply_lora_to_mlp: True\\n lora_attn_modules: [\\\"q_proj\\\", \\\"k_proj\\\", \\\"v_proj\\\"]\\n lora_rank: 16\\n lora_alpha: 32\\n use_dora: True\\n quantize_base: True\\n\\n\\n.. note::\\n\\n Under the hood, we've enabled DoRA by adding the :class:`~torchtune.modules.peft.DoRALinear` module, which we swap\\n out for :class:`~torchtune.modules.peft.LoRALinear` when ``use_dora=True``.\\n\\n.. _glossary_distrib:\\n\\n\\n.. TODO\\n\\n.. Distributed\\n.. -----------\\n\\n.. .. _glossary_fsdp:\\n\\n.. Fully Sharded Data Parallel (FSDP)\\n.. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\\n\\n.. All our ``_distributed`` recipes use `FSDP `.\\n.. .. _glossary_fsdp2:\\n\\n\", \"type\": \"text\"}, {\"text\": \"END of knowledge_search tool results.\\n\", \"type\": \"text\"}], \"role\": \"tool\", \"tool_name\": \"knowledge_search\"}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"CompletionMessage\", \"data\": {\"content\": \"I'm ready to help. What's your question about Torchtune?\", \"role\": \"assistant\", \"stop_reason\": {\"__enum__\": \"StopReason\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"end_of_turn\"}, \"tool_calls\": []}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"UserMessage\", \"data\": {\"content\": \"Tell me how to use LoRA\", \"context\": null, \"role\": \"user\"}}]], {\"response_format\": null, \"sampling_params\": {\"__module__\": \"llama_stack.models.llama.datatypes\", \"__pydantic__\": \"SamplingParams\", \"data\": {\"max_tokens\": 0, \"repetition_penalty\": 1.0, \"strategy\": {\"temperature\": 0.0001, \"top_p\": 0.9, \"type\": \"top_p\"}}}, \"stream\": true, \"tool_config\": {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"ToolConfig\", \"data\": {\"system_message_behavior\": {\"__enum__\": \"SystemMessageBehavior\", \"__module__\": \"llama_stack.apis.inference.inference\", \"value\": \"append\"}, \"tool_choice\": {\"__enum__\": \"ToolChoice\", \"__module__\": \"llama_stack.apis.inference.inference\", \"value\": \"auto\"}, \"tool_prompt_format\": null}}, \"tool_prompt_format\": null, \"tools\": [{\"__module__\": \"llama_stack.models.llama.datatypes\", \"__pydantic__\": \"ToolDefinition\", \"data\": {\"description\": \"Search for information in a database.\", \"parameters\": {\"query\": {\"default\": null, \"description\": \"The query to search for. Can be a natural language sentence or keywords.\", \"param_type\": \"string\", \"required\": true}}, \"tool_name\": \"knowledge_search\"}}]}]": { + "chunks": [ + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "start" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "[k", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "nowledge_search(query=\"using LoRA in Torchtune", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "\")]", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "__module__": "llama_stack.apis.common.content_types", + "value": "succeeded" + }, + "tool_call": { + "arguments": { + "query": "using LoRA in Torchtune" + }, + "call_id": "b21a18ba-4bb8-4f64-8ae9-acb263ba0654", + "tool_name": "knowledge_search" + }, + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "__module__": "llama_stack.models.llama.datatypes", + "value": "end_of_turn" + } + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "complete" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "__module__": "llama_stack.models.llama.datatypes", + "value": "end_of_turn" + } + }, + "metrics": [ + { + "metric": "prompt_tokens", + "unit": null, + "value": 107 + }, + { + "metric": "completion_tokens", + "unit": null, + "value": 23 + }, + { + "metric": "total_tokens", + "unit": null, + "value": 130 + } + ] + } + } + ], + "type": "generator" + }, + "[[\"meta-llama/Llama-3.3-70B-Instruct\", [{\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"SystemMessage\", \"data\": {\"content\": \"You are a helpful assistant\", \"role\": \"system\"}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"UserMessage\", \"data\": {\"content\": \"I am attaching some documentation for Torchtune. Help me answer questions I will ask next.\", \"context\": null, \"role\": \"user\"}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"CompletionMessage\", \"data\": {\"content\": \"\", \"role\": \"assistant\", \"stop_reason\": {\"__enum__\": \"StopReason\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"end_of_turn\"}, \"tool_calls\": [{\"arguments\": {\"query\": \"Torchtune documentation\"}, \"call_id\": \"\", \"tool_name\": \"knowledge_search\"}]}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"ToolResponseMessage\", \"data\": {\"call_id\": \"\", \"content\": [{\"text\": \"knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n\", \"type\": \"text\"}, {\"text\": \"Result 1:\\nDocument_id:57768\\nContent: conversational data, :func:`~torchtune.datasets.chat_dataset` seems to be a good fit. For any\\ncustom local dataset we always need to specify ``source``, ``data_files``, and ``split`` for any dataset\\nbuilder in torchtune. For :func:`~torchtune.datasets.chat_dataset`, we additionally need to specify\\n``conversation_column`` and ``conversation_style``. Our data follows the ``\\\"sharegpt\\\"`` format, so\\nwe can specify that here. Altogether, our :func:`~torchtune.datasets.chat_dataset` call should\\nlook like so:\\n\\n.. code-block:: python\\n\\n from torchtune.datasets import chat_dataset\\n from torchtune.models.llama3 import llama3_tokenizer\\n\\n tokenizer = llama3_tokenizer(\\\"\")\\n ds = chat_dataset(\\n tokenizer=tokenizer,\\n source=\\\"json\\\",\\n data_files=\\\"data/my_data.json\\\",\\n split=\\\"train\\\",\\n conversation_column=\\\"dialogue\\\",\\n conversation_style=\\\"sharegpt\\\",\\n )\\n\\n.. code-block:: yaml\\n\\n # In config\\n tokenizer:\\n _component_: torchtune.models.llama3.llama3_tokenizer\\n path: dataset:\\n _component_: torchtune.datasets.chat_dataset\\n source: json\\n data_files: data/my_data.json\\n split: train\\n conversation_column: dialogue\\n conversation_style: sharegpt\\n\\n.. note::\\n You can pass in any keyword argument for `load_dataset `_ into all our\\n Dataset classes and they will honor them. This is useful for common parameters\\n such as specifying the data split with :code:`split` or configuration with\\n :code:`name`\\n\\nIf you needed to add a prompt template, you would simply pass it into the tokenizer.\\nSince we're fine-tuning Llama3, the tokenizer will handle all formatting for\\nus and prompt templates are optional. Other models such as Mistral's :class:`~torchtune.models.mistral._tokenizer.MistralTokenizer`,\\nuse a chat template by default (:class:`~torchtune.models.mistral.MistralChatTemplate`) to format\\nall messages according to their `recommendations `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW `.\\n.. .. _glossary_fsdp2:\\n\\n\", \"type\": \"text\"}, {\"text\": \"Result 4:\\nDocument_id:4fd22\\nContent: 06% of all params are trainable.\\n\\n.. note::\\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\\n of in the recipe.\\n\\n\\n.. _lora_recipe_label:\\n\\nLoRA finetuning recipe in torchtune\\n-----------------------------------\\n\\nFinally, we can put it all together and finetune a model using torchtune's `LoRA recipe `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \\\"\\\":ref:`config_tutorial_label`\\\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune's factory settings, but we may want to experiment a bit.\\nLet's take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: ['q_proj', 'v_proj']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n\", \"type\": \"text\"}, {\"text\": \"Result 5:\\nDocument_id:6e83e\\nContent: etune\\n:func:`torchtune.models.llama3.llama3_8b` with DoRA, you would use :func:`torchtune.models.llama3.lora_llama3_8b` with ``use_dora=True``:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.use_dora=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n use_dora: True\\n\\nSince DoRA extends LoRA, the parameters for :ref:`customizing LoRA ` are identical. You can also quantize the base model weights like in :ref:`glossary_qlora` by using ``quantize=True`` to reap\\neven more memory savings!\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.apply_lora_to_mlp=True \\\\\\n model.lora_attn_modules=[\\\"q_proj\\\",\\\"k_proj\\\",\\\"v_proj\\\"] \\\\\\n model.lora_rank=16 \\\\\\n model.lora_alpha=32 \\\\\\n model.use_dora=True \\\\\\n model.quantize_base=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n apply_lora_to_mlp: True\\n lora_attn_modules: [\\\"q_proj\\\", \\\"k_proj\\\", \\\"v_proj\\\"]\\n lora_rank: 16\\n lora_alpha: 32\\n use_dora: True\\n quantize_base: True\\n\\n\\n.. note::\\n\\n Under the hood, we've enabled DoRA by adding the :class:`~torchtune.modules.peft.DoRALinear` module, which we swap\\n out for :class:`~torchtune.modules.peft.LoRALinear` when ``use_dora=True``.\\n\\n.. _glossary_distrib:\\n\\n\\n.. TODO\\n\\n.. Distributed\\n.. -----------\\n\\n.. .. _glossary_fsdp:\\n\\n.. Fully Sharded Data Parallel (FSDP)\\n.. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\\n\\n.. All our ``_distributed`` recipes use `FSDP `.\\n.. .. _glossary_fsdp2:\\n\\n\", \"type\": \"text\"}, {\"text\": \"END of knowledge_search tool results.\\n\", \"type\": \"text\"}], \"role\": \"tool\", \"tool_name\": \"knowledge_search\"}}]], {\"response_format\": null, \"sampling_params\": {\"__module__\": \"llama_stack.models.llama.datatypes\", \"__pydantic__\": \"SamplingParams\", \"data\": {\"max_tokens\": 0, \"repetition_penalty\": 1.0, \"strategy\": {\"temperature\": 0.0001, \"top_p\": 0.9, \"type\": \"top_p\"}}}, \"stream\": true, \"tool_config\": {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"ToolConfig\", \"data\": {\"system_message_behavior\": {\"__enum__\": \"SystemMessageBehavior\", \"__module__\": \"llama_stack.apis.inference.inference\", \"value\": \"append\"}, \"tool_choice\": {\"__enum__\": \"ToolChoice\", \"__module__\": \"llama_stack.apis.inference.inference\", \"value\": \"auto\"}, \"tool_prompt_format\": null}}, \"tool_prompt_format\": null, \"tools\": [{\"__module__\": \"llama_stack.models.llama.datatypes\", \"__pydantic__\": \"ToolDefinition\", \"data\": {\"description\": \"Search for information in a database.\", \"parameters\": {\"query\": {\"default\": null, \"description\": \"The query to search for. Can be a natural language sentence or keywords.\", \"param_type\": \"string\", \"required\": true}}, \"tool_name\": \"knowledge_search\"}}]}]": { + "chunks": [ + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "start" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "I", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "'m ready to help. What's", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " your question about Torchtune?", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "complete" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "__module__": "llama_stack.models.llama.datatypes", + "value": "end_of_turn" + } + }, + "metrics": [ + { + "metric": "prompt_tokens", + "unit": null, + "value": 75 + }, + { + "metric": "completion_tokens", + "unit": null, + "value": 25 + }, + { + "metric": "total_tokens", + "unit": null, + "value": 100 + } + ] + } + } + ], + "type": "generator" + }, "[[\"meta-llama/Llama-3.3-70B-Instruct\", [{\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"SystemMessage\", \"data\": {\"content\": \"You are a helpful assistant\", \"role\": \"system\"}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"UserMessage\", \"data\": {\"content\": \"I am attaching some documentation for Torchtune. Help me answer questions I will ask next.\", \"context\": null, \"role\": \"user\"}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"CompletionMessage\", \"data\": {\"content\": \"\", \"role\": \"assistant\", \"stop_reason\": {\"__enum__\": \"StopReason\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"end_of_turn\"}, \"tool_calls\": [{\"arguments\": {\"query\": \"Torchtune documentation\"}, \"call_id\": \"\", \"tool_name\": \"knowledge_search\"}]}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"ToolResponseMessage\", \"data\": {\"call_id\": \"\", \"content\": [{\"text\": \"knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n\", \"type\": \"text\"}, {\"text\": \"Result 1:\\nDocument_id:8106c\\nContent: conversational data, :func:`~torchtune.datasets.chat_dataset` seems to be a good fit. For any\\ncustom local dataset we always need to specify ``source``, ``data_files``, and ``split`` for any dataset\\nbuilder in torchtune. For :func:`~torchtune.datasets.chat_dataset`, we additionally need to specify\\n``conversation_column`` and ``conversation_style``. Our data follows the ``\\\"sharegpt\\\"`` format, so\\nwe can specify that here. Altogether, our :func:`~torchtune.datasets.chat_dataset` call should\\nlook like so:\\n\\n.. code-block:: python\\n\\n from torchtune.datasets import chat_dataset\\n from torchtune.models.llama3 import llama3_tokenizer\\n\\n tokenizer = llama3_tokenizer(\\\"/tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\\\")\\n ds = chat_dataset(\\n tokenizer=tokenizer,\\n source=\\\"json\\\",\\n data_files=\\\"data/my_data.json\\\",\\n split=\\\"train\\\",\\n conversation_column=\\\"dialogue\\\",\\n conversation_style=\\\"sharegpt\\\",\\n )\\n\\n.. code-block:: yaml\\n\\n # In config\\n tokenizer:\\n _component_: torchtune.models.llama3.llama3_tokenizer\\n path: /tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\\n\\n dataset:\\n _component_: torchtune.datasets.chat_dataset\\n source: json\\n data_files: data/my_data.json\\n split: train\\n conversation_column: dialogue\\n conversation_style: sharegpt\\n\\n.. note::\\n You can pass in any keyword argument for `load_dataset `_ into all our\\n Dataset classes and they will honor them. This is useful for common parameters\\n such as specifying the data split with :code:`split` or configuration with\\n :code:`name`\\n\\nIf you needed to add a prompt template, you would simply pass it into the tokenizer.\\nSince we're fine-tuning Llama3, the tokenizer will handle all formatting for\\nus and prompt templates are optional. Other models such as Mistral's :class:`~torchtune.models.mistral._tokenizer.MistralTokenizer`,\\nuse a chat template by default (:class:`~torchtune.models.mistral.MistralChatTemplate`) to format\\nall messages according to their `recommendations `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW `.\\n.. .. _glossary_fsdp2:\\n\\n\", \"type\": \"text\"}, {\"text\": \"Result 4:\\nDocument_id:a03f3\\nContent: 06% of all params are trainable.\\n\\n.. note::\\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\\n of in the recipe.\\n\\n\\n.. _lora_recipe_label:\\n\\nLoRA finetuning recipe in torchtune\\n-----------------------------------\\n\\nFinally, we can put it all together and finetune a model using torchtune's `LoRA recipe `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \\\"\\\":ref:`config_tutorial_label`\\\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune's factory settings, but we may want to experiment a bit.\\nLet's take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: ['q_proj', 'v_proj']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n\", \"type\": \"text\"}, {\"text\": \"Result 5:\\nDocument_id:0719d\\nContent: etune\\n:func:`torchtune.models.llama3.llama3_8b` with DoRA, you would use :func:`torchtune.models.llama3.lora_llama3_8b` with ``use_dora=True``:\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.use_dora=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n use_dora: True\\n\\nSince DoRA extends LoRA, the parameters for :ref:`customizing LoRA ` are identical. You can also quantize the base model weights like in :ref:`glossary_qlora` by using ``quantize=True`` to reap\\neven more memory savings!\\n\\n.. code-block:: bash\\n\\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\\\\n model.apply_lora_to_mlp=True \\\\\\n model.lora_attn_modules=[\\\"q_proj\\\",\\\"k_proj\\\",\\\"v_proj\\\"] \\\\\\n model.lora_rank=16 \\\\\\n model.lora_alpha=32 \\\\\\n model.use_dora=True \\\\\\n model.quantize_base=True\\n\\n.. code-block:: yaml\\n\\n model:\\n _component_: torchtune.models.lora_llama3_8b\\n apply_lora_to_mlp: True\\n lora_attn_modules: [\\\"q_proj\\\", \\\"k_proj\\\", \\\"v_proj\\\"]\\n lora_rank: 16\\n lora_alpha: 32\\n use_dora: True\\n quantize_base: True\\n\\n\\n.. note::\\n\\n Under the hood, we've enabled DoRA by adding the :class:`~torchtune.modules.peft.DoRALinear` module, which we swap\\n out for :class:`~torchtune.modules.peft.LoRALinear` when ``use_dora=True``.\\n\\n.. _glossary_distrib:\\n\\n\\n.. TODO\\n\\n.. Distributed\\n.. -----------\\n\\n.. .. _glossary_fsdp:\\n\\n.. Fully Sharded Data Parallel (FSDP)\\n.. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\\n\\n.. All our ``_distributed`` recipes use `FSDP `.\\n.. .. _glossary_fsdp2:\\n\\n\", \"type\": \"text\"}, {\"text\": \"END of knowledge_search tool results.\\n\", \"type\": \"text\"}], \"role\": \"tool\", \"tool_name\": \"knowledge_search\"}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"CompletionMessage\", \"data\": {\"content\": \"I'm ready to help. What's your first question about Torchtune?\", \"role\": \"assistant\", \"stop_reason\": {\"__enum__\": \"StopReason\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"end_of_turn\"}, \"tool_calls\": []}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"UserMessage\", \"data\": {\"content\": \"Tell me how to use LoRA\", \"context\": null, \"role\": \"user\"}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"CompletionMessage\", \"data\": {\"content\": \"\", \"role\": \"assistant\", \"stop_reason\": {\"__enum__\": \"StopReason\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"end_of_turn\"}, \"tool_calls\": [{\"arguments\": {\"query\": \"using LoRA in Torchtune\"}, \"call_id\": \"\", \"tool_name\": \"knowledge_search\"}]}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"ToolResponseMessage\", \"data\": {\"call_id\": \"\", \"content\": [{\"text\": \"knowledge_search tool found 5 chunks:\\nBEGIN of knowledge_search tool results.\\n\", \"type\": \"text\"}, {\"text\": \"Result 1:\\nDocument_id:a03f3\\nContent: .. _lora_finetune_label:\\n\\n============================\\nFine-Tuning Llama2 with LoRA\\n============================\\n\\nThis guide will teach you about `LoRA `_, a parameter-efficient finetuning technique,\\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\\nIf you already know what LoRA is and want to get straight to running\\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\\n\\n.. grid:: 2\\n\\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\\n\\n * What LoRA is and how it saves memory during finetuning\\n * An overview of LoRA components in torchtune\\n * How to run a LoRA finetune using torchtune\\n * How to experiment with different LoRA configurations\\n\\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\\n\\n * Be familiar with :ref:`torchtune`\\n * Make sure to :ref:`install torchtune`\\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\\n\\nWhat is LoRA?\\n-------------\\n\\n`LoRA `_ is an adapter-based method for\\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\\ntransformer models, in which case it is common to add the low-rank matrices\\nto some of the linear projections in each transformer layer's self-attention.\\n\\n.. note::\\n\\n If you're unfamiliar, check out these references for the `definition of rank `_\\n and discussion of `low-rank approximations `_.\\n\\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\\nyou can expect to see memory savings due to a substantial reduction in the\\nnumber of parameters with gradients. When using an optimizer with momentum,\\nlike `AdamW ` alone will not handle the definition of which parameters are trainable.\\n See :ref:`below` for how to do this.\\n\\nLet's inspect each of these models a bit more closely.\\n\\n.. code-block:: bash\\n\\n # Print the first layer's self-attention in the usual Llama2 model\\n >>> print(base_model.layers[0].attn)\\n MultiHeadAttention(\\n (q_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (k_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (v_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (output_proj): Linear(in_features=4096, out_features=4096, bias=False)\\n (pos_embeddings): RotaryPositionalEmbeddings()\\n )\\n\\n # Print the same for Llama2 with LoRA weights\\n >>> print(lora_model.layers[0].attn)\\n MultiHeadAttention(\\n (q_proj): LoRALinear(\\n (dropout): Dropout(p=0.0, inplace=False)\\n \\n\", \"type\": \"text\"}, {\"text\": \"Result 3:\\nDocument_id:a03f3\\nContent: 06% of all params are trainable.\\n\\n.. note::\\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\\n of in the recipe.\\n\\n\\n.. _lora_recipe_label:\\n\\nLoRA finetuning recipe in torchtune\\n-----------------------------------\\n\\nFinally, we can put it all together and finetune a model using torchtune's `LoRA recipe `_.\\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\\n\\n.. code-block:: bash\\n\\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\\n\\n.. note::\\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\\n or by directly modifying the :code:`7B_lora.yaml` file. See our \\\"\\\":ref:`config_tutorial_label`\\\" recipe\\n for more details on how you can easily clone and modify torchtune configs.\\n\\n.. note::\\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\\n and (b) the memory constraints of your hardware.\\n\\nThe preceding command will run a LoRA finetune with torchtune's factory settings, but we may want to experiment a bit.\\nLet's take a closer look at some of the :code:`lora_finetune_distributed` config.\\n\\n.. code-block:: yaml\\n\\n # Model Arguments\\n model:\\n _component_: lora_llama2_7b\\n lora_attn_modules: ['q_proj', 'v_proj']\\n lora_rank: 8\\n lora_alpha: 16\\n ...\\n\\nWe see that the\\n\", \"type\": \"text\"}, {\"text\": \"Result 4:\\nDocument_id:a03f3\\nContent: from our Llama2\\nmodel without any wrappers or custom checkpoint conversion logic.\\n\\n.. code-block:: python\\n\\n # Assuming that base_model already has the pretrained Llama2 weights,\\n # this will directly load them into your LoRA model without any conversion necessary.\\n lora_model.load_state_dict(base_model.state_dict(), strict=False)\\n\\n.. note::\\n Whenever loading weights with :code:`strict=False`, you should verify that any missing or extra keys in\\n the loaded :code:`state_dict` are as expected. torchtune's LoRA recipes do this by default via\\n :func:`validate_missing_and_unexpected_for_lora() `.\\n\\nOnce we've loaded the base model weights, we also want to set only LoRA parameters to trainable.\\n\\n.. _setting_trainable_params:\\n\\n.. code-block:: python\\n\\n from torchtune.modules.peft.peft_utils import get_adapter_params, set_trainable_params\\n\\n # Fetch all params from the model that are associated with LoRA.\\n lora_params = get_adapter_params(lora_model)\\n\\n # Set requires_grad=True on lora_params, and requires_grad=False on all others.\\n set_trainable_params(lora_model, lora_params)\\n\\n # Print the total number of parameters\\n total_params = sum([p.numel() for p in lora_model.parameters()])\\n trainable_params = sum([p.numel() for p in lora_model.parameters() if p.requires_grad])\\n print(\\n f\\\"\\\"\\\"\\n {total_params} total params,\\n {trainable_params}\\\" trainable params,\\n {(100.0 * trainable_params / total_params):.2f}% of all params are trainable.\\n \\\"\\\"\\\"\\n )\\n\\n 6742609920 total params,\\n 4194304 trainable params,\\n 0.06% of all params are trainable.\\n\\n.. note::\\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\\n of in the recipe.\\n\\n\\n.. _lora_recipe_label:\\n\\nLoRA finetuning recipe in torchtune\\n-----------------------------------\\n\\nFinally, we can put it all together and finetune a model using torchtune's `LoRA recipe \", \"tool_name\": {\"__enum__\": \"BuiltinTool\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"brave_search\"}}]}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"ToolResponseMessage\", \"data\": {\"call_id\": \"\", \"content\": \"{\\\"query\\\": \\\"current CEO of Meta\\\", \\\"top_k\\\": [{\\\"title\\\": \\\"Executives - Meta\\\", \\\"url\\\": \\\"https://about.meta.com/media-gallery/executives/\\\", \\\"content\\\": \\\"Mark Zuckerberg, Founder, Chairman and Chief Executive Officer Joel Kaplan, Chief Global Affairs Officer Susan Li, Chief Financial Officer Javier Olivan, Chief Operating Officer Chris Cox, Chief Product Officer Andrew \\\\u2018Boz\\\\u2019 Bosworth, Chief Technology Officer Jennifer Newstead, Chief Legal Officer Dave Wehner, Chief Strategy Officer Will Cathcart, Head of WhatsApp Naomi Gleit, Head of Product John Hegeman, Chief Revenue Officer Adam Mosseri, Head of Instagram Erin Egan, Chief Privacy Officer, Policy Michel Protti, Chief Privacy Officer, Product Alex Schultz, Chief Marketing Officer and VP of Analytics Tom Alison, Head of Facebook Nicola Mendelsohn, Head of Global Business Group Ahmad Al-Dahle, VP and Head of GenAI at Meta Joelle Pineau, Vice President of AI Research and Head of FAIR at Meta\\\", \\\"score\\\": 0.8190992, \\\"raw_content\\\": null}, {\\\"title\\\": \\\"Mark Zuckerberg, Founder, Chairman and Chief Executive Officer - Meta\\\", \\\"url\\\": \\\"https://about.meta.com/media-gallery/executives/mark-zuckerberg/\\\", \\\"content\\\": \\\"Mark Zuckerberg, Founder, Chairman and Chief Executive Officer | Meta Meta Quest Ray-Ban Meta Meta Horizon Meta AI Meta Verified Meta Pay Meta Horizon Workrooms Meta and you Learn about our community Shop Meta Meta Quest Meta Portal Meta Horizon Mark Zuckerberg is the founder, chairman and CEO of Meta, which he originally founded as Facebook in 2004. In October 2021, Facebook rebranded to Meta to reflect all of its products and services across its family of apps and a focus on developing social experiences for the metaverse \\\\u2014 moving beyond 2D screens toward immersive experiences like augmented and virtual reality to help build the next evolution in social technology. Shop Ray-Ban Meta glassesRay-Ban StoriesPrivacy informationSupported countries \\\\u00a9 2025 Meta\\\", \\\"score\\\": 0.79080546, \\\"raw_content\\\": null}, {\\\"title\\\": \\\"Meet the Executive CSuite Team of Meta (Facebook) [2025]\\\", \\\"url\\\": \\\"https://digitaldefynd.com/IQ/meet-the-executive-csuite-team-of-meta-facebook/\\\", \\\"content\\\": \\\"Harvard University Executive Programs Free Harvard University Courses As a chief financial officer of Meta, Susan Li oversees the firm\\\\u2019s finance and facilities team to keep track of the company\\\\u2019s overall financial health. The chief operating officer of Meta, Javier Olivan, oversees the firm\\\\u2019s business team, infrastructure, and other products. Andrew Bosworth, called Boz, serves as chief technology officer at Meta and is responsible for leading the firm\\\\u2019s AR/VR organization, Reality Labs. Andrew has also served as engineering director to oversee events, mobile monetization, and feed ads and as VP of ads and business platforms to lead engineering, design, analytics, and product teams. Meta\\\\u2019s c-suite team comprises experienced and diverse executives, having extensive experience in technology, finance, legal, and all major industries.\\\", \\\"score\\\": 0.7602419, \\\"raw_content\\\": null}, {\\\"title\\\": \\\"Meta - Leadership & Governance\\\", \\\"url\\\": \\\"https://investor.atmeta.com/leadership-and-governance/\\\", \\\"content\\\": \\\"Mr. Andreessen was a co-founder of Netscape Communications Corporation, a software company, serving in various positions, including Chief Technology Officer and Executive Vice President of Products. Ms. Killefer also served as Assistant Secretary for Management, Chief Financial Officer, and Chief Operating Officer of the U.S. Department of the Treasury from 1997 to 2000 and as a member of the IRS Oversight Board from 2000 to 2005, including as Chair of the IRS Oversight Board from 2002 to 2004. Ms. Travis has served as Executive Vice President and Chief Financial Officer of The Estee Lauder Companies Inc., a global manufacturer and marketer of skin care, makeup, fragrance and hair care products, since August 2012.\\\", \\\"score\\\": 0.6175132, \\\"raw_content\\\": null}, {\\\"title\\\": \\\"Mark Zuckerberg - Wikipedia\\\", \\\"url\\\": \\\"https://en.wikipedia.org/wiki/Mark_Zuckerberg\\\", \\\"content\\\": \\\"They began dating in 2003.[175] In September 2010, Chan, who was a medical student at the University of California, San Francisco at the time,[176] moved into his rented house in Palo Alto, California.[177][178] They married on May 19, 2012, in the grounds of his mansion in an event that also celebrated her graduation from medical school.[179][180] Zuckerberg revealed in July 2015 that they were expecting a baby girl and that Chan had previously experienced three miscarriages.[181] Their first daughter was born in December 2015.[182] They announced in a Chinese New Year video that their daughter's Chinese name is Chen Mingyu (Chinese: \\\\u9648\\\\u660e\\\\u5b87).[183] Their second daughter was born in August 2017.[184] Zuckerberg and his wife welcomed their third daughter in March 2023 and announced the news across his social media pages.[185] The couple also have a Puli dog named Beast,[186] who has over two million followers on Facebook.[187] Zuckerberg commissioned the visual artist Daniel Arsham to build a 7-foot-tall sculpture of his wife, which was unveiled in 2024.[188]\\\", \\\"score\\\": 0.05570498, \\\"raw_content\\\": null}]}\", \"role\": \"tool\", \"tool_name\": {\"__enum__\": \"BuiltinTool\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"brave_search\"}}}]], {\"response_format\": null, \"sampling_params\": {\"__module__\": \"llama_stack.models.llama.datatypes\", \"__pydantic__\": \"SamplingParams\", \"data\": {\"max_tokens\": 0, \"repetition_penalty\": 1.0, \"strategy\": {\"temperature\": 0.0001, \"top_p\": 0.9, \"type\": \"top_p\"}}}, \"stream\": true, \"tool_config\": {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"ToolConfig\", \"data\": {\"system_message_behavior\": {\"__enum__\": \"SystemMessageBehavior\", \"__module__\": \"llama_stack.apis.inference.inference\", \"value\": \"append\"}, \"tool_choice\": {\"__enum__\": \"ToolChoice\", \"__module__\": \"llama_stack.apis.inference.inference\", \"value\": \"auto\"}, \"tool_prompt_format\": null}}, \"tool_prompt_format\": null, \"tools\": [{\"__module__\": \"llama_stack.models.llama.datatypes\", \"__pydantic__\": \"ToolDefinition\", \"data\": {\"description\": \"Search the web for information\", \"parameters\": {\"query\": {\"default\": null, \"description\": \"The query to search for\", \"param_type\": \"string\", \"required\": true}}, \"tool_name\": {\"__enum__\": \"BuiltinTool\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"brave_search\"}}}]}]": { + "chunks": [ + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "start" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "The", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": " current CEO of Meta is Mark Zuckerberg.", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "complete" + }, + "logprobs": null, + "stop_reason": { + "__enum__": "StopReason", + "__module__": "llama_stack.models.llama.datatypes", + "value": "end_of_turn" + } + }, + "metrics": [ + { + "metric": "prompt_tokens", + "unit": null, + "value": 1235 + }, + { + "metric": "completion_tokens", + "unit": null, + "value": 19 + }, + { + "metric": "total_tokens", + "unit": null, + "value": 1254 + } + ] + } + } + ], + "type": "generator" + }, "[[\"meta-llama/Llama-3.3-70B-Instruct\", [{\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"SystemMessage\", \"data\": {\"content\": \"You are a helpful assistant\", \"role\": \"system\"}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"UserMessage\", \"data\": {\"content\": \"Search the web and tell me who the current CEO of Meta is.\", \"context\": null, \"role\": \"user\"}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"CompletionMessage\", \"data\": {\"content\": \"\", \"role\": \"assistant\", \"stop_reason\": {\"__enum__\": \"StopReason\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"end_of_turn\"}, \"tool_calls\": [{\"arguments\": {\"query\": \"current CEO of Meta\"}, \"call_id\": \"\", \"tool_name\": {\"__enum__\": \"BuiltinTool\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"brave_search\"}}]}}, {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"ToolResponseMessage\", \"data\": {\"call_id\": \"\", \"content\": \"{\\\"query\\\": \\\"current CEO of Meta\\\", \\\"top_k\\\": [{\\\"title\\\": \\\"Meta - Leadership & Governance\\\", \\\"url\\\": \\\"https://investor.atmeta.com/leadership-and-governance/\\\", \\\"content\\\": \\\"Mark Zuckerberg is the founder, chairman and CEO of Meta, which he originally founded as Facebook in 2004. Mark is responsible for setting the overall direction and product strategy for the company. He leads the design of Meta's services and development of its core technology and infrastructure. Mark studied computer science at Harvard\\\", \\\"score\\\": 0.8342047, \\\"raw_content\\\": null}, {\\\"title\\\": \\\"Executives - Meta\\\", \\\"url\\\": \\\"https://about.meta.com/media-gallery/executives/\\\", \\\"content\\\": \\\"Mark Zuckerberg, Founder, Chairman and Chief Executive Officer Joel Kaplan, Chief Global Affairs Officer Susan Li, Chief Financial Officer Javier Olivan, Chief Operating Officer Chris Cox, Chief Product Officer Andrew \\\\u2018Boz\\\\u2019 Bosworth, Chief Technology Officer Jennifer Newstead, Chief Legal Officer Dave Wehner, Chief Strategy Officer Will Cathcart, Head of WhatsApp Naomi Gleit, Head of Product John Hegeman, Chief Revenue Officer Adam Mosseri, Head of Instagram Erin Egan, Chief Privacy Officer, Policy Michel Protti, Chief Privacy Officer, Product Alex Schultz, Chief Marketing Officer and VP of Analytics Tom Alison, Head of Facebook Nicola Mendelsohn, Head of Global Business Group Ahmad Al-Dahle, VP and Head of GenAI at Meta Joelle Pineau, Vice President of AI Research and Head of FAIR at Meta\\\", \\\"score\\\": 0.8190992, \\\"raw_content\\\": null}, {\\\"title\\\": \\\"Mark Zuckerberg, Founder, Chairman and Chief Executive Officer - Meta\\\", \\\"url\\\": \\\"https://about.meta.com/media-gallery/executives/mark-zuckerberg/\\\", \\\"content\\\": \\\"Mark Zuckerberg, Founder, Chairman and Chief Executive Officer | Meta Meta Quest Ray-Ban Meta Meta Horizon Meta AI Meta Verified Meta Pay Meta Horizon Workrooms Meta and you Learn about our community Shop Meta Meta Quest Meta Portal Meta Horizon Mark Zuckerberg is the founder, chairman and CEO of Meta, which he originally founded as Facebook in 2004. In October 2021, Facebook rebranded to Meta to reflect all of its products and services across its family of apps and a focus on developing social experiences for the metaverse \\\\u2014 moving beyond 2D screens toward immersive experiences like augmented and virtual reality to help build the next evolution in social technology. Shop Ray-Ban Meta glassesRay-Ban StoriesPrivacy informationSupported countries \\\\u00a9 2025 Meta\\\", \\\"score\\\": 0.79099923, \\\"raw_content\\\": null}, {\\\"title\\\": \\\"Meet the Executive CSuite Team of Meta (Facebook) [2025]\\\", \\\"url\\\": \\\"https://digitaldefynd.com/IQ/meet-the-executive-csuite-team-of-meta-facebook/\\\", \\\"content\\\": \\\"Harvard University Executive Programs Free Harvard University Courses As a chief financial officer of Meta, Susan Li oversees the firm\\\\u2019s finance and facilities team to keep track of the company\\\\u2019s overall financial health. The chief operating officer of Meta, Javier Olivan, oversees the firm\\\\u2019s business team, infrastructure, and other products. Andrew Bosworth, called Boz, serves as chief technology officer at Meta and is responsible for leading the firm\\\\u2019s AR/VR organization, Reality Labs. Andrew has also served as engineering director to oversee events, mobile monetization, and feed ads and as VP of ads and business platforms to lead engineering, design, analytics, and product teams. Meta\\\\u2019s c-suite team comprises experienced and diverse executives, having extensive experience in technology, finance, legal, and all major industries.\\\", \\\"score\\\": 0.7602419, \\\"raw_content\\\": null}, {\\\"title\\\": \\\"Mark Zuckerberg - Wikipedia\\\", \\\"url\\\": \\\"https://en.wikipedia.org/wiki/Mark_Zuckerberg\\\", \\\"content\\\": \\\"They began dating in 2003.[175] In September 2010, Chan, who was a medical student at the University of California, San Francisco at the time,[176] moved into his rented house in Palo Alto, California.[177][178] They married on May 19, 2012, in the grounds of his mansion in an event that also celebrated her graduation from medical school.[179][180] Zuckerberg revealed in July 2015 that they were expecting a baby girl and that Chan had previously experienced three miscarriages.[181] Their first daughter was born in December 2015.[182] They announced in a Chinese New Year video that their daughter's Chinese name is Chen Mingyu (Chinese: \\\\u9648\\\\u660e\\\\u5b87).[183] Their second daughter was born in August 2017.[184] Zuckerberg and his wife welcomed their third daughter in March 2023 and announced the news across his social media pages.[185] The couple also have a Puli dog named Beast,[186] who has over two million followers on Facebook.[187] Zuckerberg commissioned the visual artist Daniel Arsham to build a 7-foot-tall sculpture of his wife, which was unveiled in 2024.[188]\\\", \\\"score\\\": 0.05564338, \\\"raw_content\\\": null}]}\", \"role\": \"tool\", \"tool_name\": {\"__enum__\": \"BuiltinTool\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"brave_search\"}}}]], {\"response_format\": null, \"sampling_params\": {\"__module__\": \"llama_stack.models.llama.datatypes\", \"__pydantic__\": \"SamplingParams\", \"data\": {\"max_tokens\": 0, \"repetition_penalty\": 1.0, \"strategy\": {\"temperature\": 0.0001, \"top_p\": 0.9, \"type\": \"top_p\"}}}, \"stream\": true, \"tool_config\": {\"__module__\": \"llama_stack.apis.inference.inference\", \"__pydantic__\": \"ToolConfig\", \"data\": {\"system_message_behavior\": {\"__enum__\": \"SystemMessageBehavior\", \"__module__\": \"llama_stack.apis.inference.inference\", \"value\": \"append\"}, \"tool_choice\": {\"__enum__\": \"ToolChoice\", \"__module__\": \"llama_stack.apis.inference.inference\", \"value\": \"auto\"}, \"tool_prompt_format\": null}}, \"tool_prompt_format\": null, \"tools\": [{\"__module__\": \"llama_stack.models.llama.datatypes\", \"__pydantic__\": \"ToolDefinition\", \"data\": {\"description\": \"Search the web for information\", \"parameters\": {\"query\": {\"default\": null, \"description\": \"The query to search for\", \"param_type\": \"string\", \"required\": true}}, \"tool_name\": {\"__enum__\": \"BuiltinTool\", \"__module__\": \"llama_stack.models.llama.datatypes\", \"value\": \"brave_search\"}}}]}]": { "chunks": [ { @@ -48301,7 +52470,7 @@ "arguments": { "query": "current CEO of Meta" }, - "call_id": "535c272b-768b-44fe-b303-2eae022f67f5", + "call_id": "94b2529c-a7f4-43ca-ab5c-92485d5cc6f3", "tool_name": { "__enum__": "BuiltinTool", "__module__": "llama_stack.models.llama.datatypes", @@ -48348,54 +52517,18 @@ }, "metrics": [ { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "prompt_tokens", - "span_id": "AZ60Ocso", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T01:44:03.907918+00:00", - "__module__": "datetime" - }, - "trace_id": "hwA8OLUhQ1qa3ecF", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 34 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "completion_tokens", - "span_id": "AZ60Ocso", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T01:44:03.907933+00:00", - "__module__": "datetime" - }, - "trace_id": "hwA8OLUhQ1qa3ecF", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 10 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "total_tokens", - "span_id": "AZ60Ocso", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T01:44:03.907936+00:00", - "__module__": "datetime" - }, - "trace_id": "hwA8OLUhQ1qa3ecF", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 44 } ] @@ -48452,27 +52585,7 @@ "data": { "event": { "delta": { - "text": " boiling point of polyjuice is -100 degrees Celsius", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "__module__": "llama_stack.apis.inference.inference", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - } - }, - { - "__module__": "llama_stack.apis.inference.inference", - "__pydantic__": "ChatCompletionResponseStreamChunk", - "data": { - "event": { - "delta": { - "text": ".", + "text": " boiling point of polyjuice is -100 degrees Celsius.", "type": "text" }, "event_type": { @@ -48509,54 +52622,18 @@ }, "metrics": [ { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "prompt_tokens", - "span_id": "drZjZkfj", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T02:04:33.852666+00:00", - "__module__": "datetime" - }, - "trace_id": "Sn0I7GFHTxKxewK2", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 77 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "completion_tokens", - "span_id": "drZjZkfj", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T02:04:33.852692+00:00", - "__module__": "datetime" - }, - "trace_id": "Sn0I7GFHTxKxewK2", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 23 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "total_tokens", - "span_id": "drZjZkfj", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T02:04:33.852699+00:00", - "__module__": "datetime" - }, - "trace_id": "Sn0I7GFHTxKxewK2", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 100 } ] @@ -48650,54 +52727,18 @@ }, "metrics": [ { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "prompt_tokens", - "span_id": "WMEZtUXH", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T02:00:32.617998+00:00", - "__module__": "datetime" - }, - "trace_id": "f9RM1qaUTk2LvaVo", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 77 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "completion_tokens", - "span_id": "WMEZtUXH", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T02:00:32.618030+00:00", - "__module__": "datetime" - }, - "trace_id": "f9RM1qaUTk2LvaVo", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 23 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "total_tokens", - "span_id": "WMEZtUXH", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T02:00:32.618036+00:00", - "__module__": "datetime" - }, - "trace_id": "f9RM1qaUTk2LvaVo", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 100 } ] @@ -48754,167 +52795,7 @@ "data": { "event": { "delta": { - "text": " function get_boiling_point is not", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "__module__": "llama_stack.apis.inference.inference", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - } - }, - { - "__module__": "llama_stack.apis.inference.inference", - "__pydantic__": "ChatCompletionResponseStreamChunk", - "data": { - "event": { - "delta": { - "text": " able", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "__module__": "llama_stack.apis.inference.inference", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - } - }, - { - "__module__": "llama_stack.apis.inference.inference", - "__pydantic__": "ChatCompletionResponseStreamChunk", - "data": { - "event": { - "delta": { - "text": " to find the", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "__module__": "llama_stack.apis.inference.inference", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - } - }, - { - "__module__": "llama_stack.apis.inference.inference", - "__pydantic__": "ChatCompletionResponseStreamChunk", - "data": { - "event": { - "delta": { - "text": " boiling point of \"polyjuice\" as", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "__module__": "llama_stack.apis.inference.inference", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - } - }, - { - "__module__": "llama_stack.apis.inference.inference", - "__pydantic__": "ChatCompletionResponseStreamChunk", - "data": { - "event": { - "delta": { - "text": " it", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "__module__": "llama_stack.apis.inference.inference", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - } - }, - { - "__module__": "llama_stack.apis.inference.inference", - "__pydantic__": "ChatCompletionResponseStreamChunk", - "data": { - "event": { - "delta": { - "text": " is not a real liquid", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "__module__": "llama_stack.apis.inference.inference", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - } - }, - { - "__module__": "llama_stack.apis.inference.inference", - "__pydantic__": "ChatCompletionResponseStreamChunk", - "data": { - "event": { - "delta": { - "text": ". Polyju", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "__module__": "llama_stack.apis.inference.inference", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - } - }, - { - "__module__": "llama_stack.apis.inference.inference", - "__pydantic__": "ChatCompletionResponseStreamChunk", - "data": { - "event": { - "delta": { - "text": "ice is a fictional substance from the", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "__module__": "llama_stack.apis.inference.inference", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - } - }, - { - "__module__": "llama_stack.apis.inference.inference", - "__pydantic__": "ChatCompletionResponseStreamChunk", - "data": { - "event": { - "delta": { - "text": " Harry Potter series.", + "text": " boiling point of polyjuice is -100 degrees Celsius.", "type": "text" }, "event_type": { @@ -48951,55 +52832,19 @@ }, "metrics": [ { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "prompt_tokens", - "span_id": "p7Vx9VAq", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T02:04:28.232189+00:00", - "__module__": "datetime" - }, - "trace_id": "WKEqFugATCeCl8mc", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 77 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "completion_tokens", - "span_id": "p7Vx9VAq", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T02:04:28.232325+00:00", - "__module__": "datetime" - }, - "trace_id": "WKEqFugATCeCl8mc", - "type": "metric", - "unit": "tokens", - "value": 51 + "unit": null, + "value": 23 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "total_tokens", - "span_id": "p7Vx9VAq", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T02:04:28.232334+00:00", - "__module__": "datetime" - }, - "trace_id": "WKEqFugATCeCl8mc", - "type": "metric", - "unit": "tokens", - "value": 128 + "unit": null, + "value": 100 } ] } @@ -50118,7 +53963,7 @@ "data": { "event": { "delta": { - "text": "get_boiling_point(liquid_name='polyjuice", + "text": "get_boiling_point(liquid_name='polyjuice', cel", "type": "text" }, "event_type": { @@ -50138,7 +53983,7 @@ "data": { "event": { "delta": { - "text": "', celcius=True)]", + "text": "cius=True)]", "type": "text" }, "event_type": { @@ -50168,7 +54013,7 @@ "celcius": true, "liquid_name": "polyjuice" }, - "call_id": "d43b2636-903d-430d-8389-91eefe5a1d75", + "call_id": "20aad753-f01f-42cc-bb68-aac292a707a1", "tool_name": "get_boiling_point" }, "type": "tool_call" @@ -50211,54 +54056,18 @@ }, "metrics": [ { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "prompt_tokens", - "span_id": "9EBiVeAT", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T02:04:32.221646+00:00", - "__module__": "datetime" - }, - "trace_id": "7kB12OwpSUOcwmJV", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 30 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "completion_tokens", - "span_id": "9EBiVeAT", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T02:04:32.221673+00:00", - "__module__": "datetime" - }, - "trace_id": "7kB12OwpSUOcwmJV", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 28 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "total_tokens", - "span_id": "9EBiVeAT", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T02:04:32.221680+00:00", - "__module__": "datetime" - }, - "trace_id": "7kB12OwpSUOcwmJV", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 58 } ] @@ -50365,7 +54174,7 @@ "celcius": true, "liquid_name": "polyjuice" }, - "call_id": "0548b2ef-daa4-4099-bb2c-b34f00752339", + "call_id": "d690dafc-3fe8-4356-ba40-5682803c9fbf", "tool_name": "get_boiling_point" }, "type": "tool_call" @@ -50408,54 +54217,18 @@ }, "metrics": [ { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "prompt_tokens", - "span_id": "lc3YWIQH", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T02:00:31.366139+00:00", - "__module__": "datetime" - }, - "trace_id": "zDQV0rn3TNKfByA0", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 30 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "completion_tokens", - "span_id": "lc3YWIQH", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T02:00:31.366166+00:00", - "__module__": "datetime" - }, - "trace_id": "zDQV0rn3TNKfByA0", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 28 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "total_tokens", - "span_id": "lc3YWIQH", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T02:00:31.366172+00:00", - "__module__": "datetime" - }, - "trace_id": "zDQV0rn3TNKfByA0", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 58 } ] @@ -50512,7 +54285,7 @@ "data": { "event": { "delta": { - "text": "juice is a fictional potion from", + "text": "juice is a fictional potion from the Harry Potter series by", "type": "text" }, "event_type": { @@ -50532,7 +54305,7 @@ "data": { "event": { "delta": { - "text": " the Harry Potter series by J.K. Rowling. As it", + "text": " J.K. Rowling. As it's not a", "type": "text" }, "event_type": { @@ -50552,7 +54325,7 @@ "data": { "event": { "delta": { - "text": "'s not a real substance, it doesn't have a boiling point", + "text": " real substance, it doesn't have a boiling point. Polyjuice Potion is", "type": "text" }, "event_type": { @@ -50572,7 +54345,7 @@ "data": { "event": { "delta": { - "text": ". Polyjuice Potion is a magical concoction", + "text": " a magical concoction that allows the drinker to assume the form and", "type": "text" }, "event_type": { @@ -50592,7 +54365,7 @@ "data": { "event": { "delta": { - "text": " that allows the drinker to assume the form and", + "text": " appearance of another person, but it's not a physical substance that can be measured or analyzed in", "type": "text" }, "event_type": { @@ -50612,47 +54385,7 @@ "data": { "event": { "delta": { - "text": " appearance", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "__module__": "llama_stack.apis.inference.inference", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - } - }, - { - "__module__": "llama_stack.apis.inference.inference", - "__pydantic__": "ChatCompletionResponseStreamChunk", - "data": { - "event": { - "delta": { - "text": " of another person, but it's not a physical substance that can", - "type": "text" - }, - "event_type": { - "__enum__": "ChatCompletionResponseEventType", - "__module__": "llama_stack.apis.inference.inference", - "value": "progress" - }, - "logprobs": null, - "stop_reason": null - }, - "metrics": null - } - }, - { - "__module__": "llama_stack.apis.inference.inference", - "__pydantic__": "ChatCompletionResponseStreamChunk", - "data": { - "event": { - "delta": { - "text": " be measured or analyzed in the same way as real-world", + "text": " the same way as real-world", "type": "text" }, "event_type": { @@ -50692,7 +54425,7 @@ "data": { "event": { "delta": { - "text": " have any other questions or", + "text": " have any other questions or if there's anything else I can", "type": "text" }, "event_type": { @@ -50712,7 +54445,7 @@ "data": { "event": { "delta": { - "text": " if there's anything else I can help you with, feel free to ask", + "text": " help you with", "type": "text" }, "event_type": { @@ -50732,7 +54465,7 @@ "data": { "event": { "delta": { - "text": "!", + "text": ", feel free to ask!", "type": "text" }, "event_type": { @@ -50769,54 +54502,18 @@ }, "metrics": [ { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "prompt_tokens", - "span_id": "M0oC9v8Y", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T02:04:30.531648+00:00", - "__module__": "datetime" - }, - "trace_id": "0CMlh2kQShSVm3zE", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 30 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "completion_tokens", - "span_id": "M0oC9v8Y", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T02:04:30.531666+00:00", - "__module__": "datetime" - }, - "trace_id": "0CMlh2kQShSVm3zE", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 113 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "total_tokens", - "span_id": "M0oC9v8Y", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T02:04:30.531671+00:00", - "__module__": "datetime" - }, - "trace_id": "0CMlh2kQShSVm3zE", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 143 } ] @@ -50873,7 +54570,7 @@ "data": { "event": { "delta": { - "text": "get_boiling_point(liquid_name='polyjuice', cel", + "text": "get_boiling_point(liquid_name='polyjuice", "type": "text" }, "event_type": { @@ -50893,7 +54590,7 @@ "data": { "event": { "delta": { - "text": "cius=True)]", + "text": "', celcius=True)]", "type": "text" }, "event_type": { @@ -50923,7 +54620,7 @@ "celcius": true, "liquid_name": "polyjuice" }, - "call_id": "acbb04a1-08f4-4277-9b66-aadda2fa2be7", + "call_id": "8671dbc5-2f70-48a3-a844-94ad7652a468", "tool_name": "get_boiling_point" }, "type": "tool_call" @@ -50966,54 +54663,18 @@ }, "metrics": [ { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "prompt_tokens", - "span_id": "jMXDDKvp", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T02:04:26.175063+00:00", - "__module__": "datetime" - }, - "trace_id": "44TwzIrGS2aqfbVn", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 30 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "completion_tokens", - "span_id": "jMXDDKvp", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T02:04:26.175128+00:00", - "__module__": "datetime" - }, - "trace_id": "44TwzIrGS2aqfbVn", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 28 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "total_tokens", - "span_id": "jMXDDKvp", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T02:04:26.175137+00:00", - "__module__": "datetime" - }, - "trace_id": "44TwzIrGS2aqfbVn", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 58 } ] @@ -51127,54 +54788,18 @@ }, "metrics": [ { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "prompt_tokens", - "span_id": "bxIams_G", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T01:44:13.404182+00:00", - "__module__": "datetime" - }, - "trace_id": "snO106yxStaL10ow", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 252 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "completion_tokens", - "span_id": "bxIams_G", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T01:44:13.404224+00:00", - "__module__": "datetime" - }, - "trace_id": "snO106yxStaL10ow", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 20 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "total_tokens", - "span_id": "bxIams_G", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T01:44:13.404230+00:00", - "__module__": "datetime" - }, - "trace_id": "snO106yxStaL10ow", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 272 } ] @@ -51266,7 +54891,7 @@ "__module__": "llama_stack.apis.common.content_types", "value": "in_progress" }, - "tool_call": "\n if n <= 3:\n return True", + "tool_call": "\n if n <=", "type": "tool_call" }, "event_type": { @@ -51291,7 +54916,7 @@ "__module__": "llama_stack.apis.common.content_types", "value": "in_progress" }, - "tool_call": "\n if n % 2 == 0 or n % 3", + "tool_call": " ", "type": "tool_call" }, "event_type": { @@ -51316,7 +54941,7 @@ "__module__": "llama_stack.apis.common.content_types", "value": "in_progress" }, - "tool_call": " == 0:\n return False\n i = 5\n ", + "tool_call": "3:\n return True\n if n % 2 == 0", "type": "tool_call" }, "event_type": { @@ -51341,7 +54966,7 @@ "__module__": "llama_stack.apis.common.content_types", "value": "in_progress" }, - "tool_call": " while i * i <= n:\n if n", + "tool_call": " or n % 3 == 0:\n return False\n i", "type": "tool_call" }, "event_type": { @@ -51366,7 +54991,7 @@ "__module__": "llama_stack.apis.common.content_types", "value": "in_progress" }, - "tool_call": " % i == 0 or n % (i", + "tool_call": " = 5\n while i * i <= n:\n if n", "type": "tool_call" }, "event_type": { @@ -51391,7 +55016,7 @@ "__module__": "llama_stack.apis.common.content_types", "value": "in_progress" }, - "tool_call": " + 2) == 0:\n return False\n i +=", + "tool_call": " % i == 0 or n % (i + 2) ==", "type": "tool_call" }, "event_type": { @@ -51416,7 +55041,7 @@ "__module__": "llama_stack.apis.common.content_types", "value": "in_progress" }, - "tool_call": " 6\n return True\n\ndef nth_prime(n):\n count =", + "tool_call": " 0:\n return False\n i += 6\n return", "type": "tool_call" }, "event_type": { @@ -51441,7 +55066,7 @@ "__module__": "llama_stack.apis.common.content_types", "value": "in_progress" }, - "tool_call": " 0\n num = 2\n while True:\n if", + "tool_call": " True\n\ndef nth_prime", "type": "tool_call" }, "event_type": { @@ -51466,7 +55091,7 @@ "__module__": "llama_stack.apis.common.content_types", "value": "in_progress" }, - "tool_call": " is_prime(num):\n count += 1\n if count == n", + "tool_call": "(n):\n count = 0\n num", "type": "tool_call" }, "event_type": { @@ -51491,7 +55116,7 @@ "__module__": "llama_stack.apis.common.content_types", "value": "in_progress" }, - "tool_call": ":\n return num\n num += 1\n\nprint(nth_prime", + "tool_call": " = 2\n while True:\n if is_prime(num):\n ", "type": "tool_call" }, "event_type": { @@ -51516,7 +55141,57 @@ "__module__": "llama_stack.apis.common.content_types", "value": "in_progress" }, - "tool_call": "(100))", + "tool_call": " count += 1\n", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "__module__": "llama_stack.apis.common.content_types", + "value": "in_progress" + }, + "tool_call": " if count == n:\n return num\n", + "type": "tool_call" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "parse_status": { + "__enum__": "ToolCallParseStatus", + "__module__": "llama_stack.apis.common.content_types", + "value": "in_progress" + }, + "tool_call": " num += 1\n\nprint(nth_prime(100))", "type": "tool_call" }, "event_type": { @@ -51545,7 +55220,7 @@ "arguments": { "code": "def is_prime(n):\n if n <= 1:\n return False\n if n <= 3:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True\n\ndef nth_prime(n):\n count = 0\n num = 2\n while True:\n if is_prime(num):\n count += 1\n if count == n:\n return num\n num += 1\n\nprint(nth_prime(100))" }, - "call_id": "e1110bc1-dc83-480d-ad33-09d49f5ccc8d", + "call_id": "836806e9-2184-4dac-9769-d94a496f9f95", "tool_name": { "__enum__": "BuiltinTool", "__module__": "llama_stack.models.llama.datatypes", @@ -51592,54 +55267,18 @@ }, "metrics": [ { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "prompt_tokens", - "span_id": "5J3hM-La", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T01:44:09.121100+00:00", - "__module__": "datetime" - }, - "trace_id": "snO106yxStaL10ow", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 40 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "completion_tokens", - "span_id": "5J3hM-La", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T01:44:09.121127+00:00", - "__module__": "datetime" - }, - "trace_id": "snO106yxStaL10ow", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 10 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "total_tokens", - "span_id": "5J3hM-La", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T01:44:09.121132+00:00", - "__module__": "datetime" - }, - "trace_id": "snO106yxStaL10ow", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 50 } ] @@ -51696,7 +55335,27 @@ "data": { "event": { "delta": { - "text": "plexity the company was founded in 2022.", + "text": "plexity the company was founded in 202", + "type": "text" + }, + "event_type": { + "__enum__": "ChatCompletionResponseEventType", + "__module__": "llama_stack.apis.inference.inference", + "value": "progress" + }, + "logprobs": null, + "stop_reason": null + }, + "metrics": null + } + }, + { + "__module__": "llama_stack.apis.inference.inference", + "__pydantic__": "ChatCompletionResponseStreamChunk", + "data": { + "event": { + "delta": { + "text": "2.", "type": "text" }, "event_type": { @@ -51733,54 +55392,18 @@ }, "metrics": [ { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "prompt_tokens", - "span_id": "6jxCq3gU", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T01:45:50.430436+00:00", - "__module__": "datetime" - }, - "trace_id": "XhZWljYTTDCYF7vI", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 68 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "completion_tokens", - "span_id": "6jxCq3gU", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T01:45:50.430477+00:00", - "__module__": "datetime" - }, - "trace_id": "XhZWljYTTDCYF7vI", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 22 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "total_tokens", - "span_id": "6jxCq3gU", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T01:45:50.430489+00:00", - "__module__": "datetime" - }, - "trace_id": "XhZWljYTTDCYF7vI", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 90 } ] @@ -51886,7 +55509,7 @@ "arguments": { "query": "Perplexity the company founding date" }, - "call_id": "199ef050-bc11-4e4b-935d-f5241c3f40ef", + "call_id": "3c2feef6-e21b-4715-83a5-61d24d09d7b5", "tool_name": "knowledge_search" }, "type": "tool_call" @@ -51929,54 +55552,18 @@ }, "metrics": [ { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "prompt_tokens", - "span_id": "m4wMGuSN", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T01:45:49.880525+00:00", - "__module__": "datetime" - }, - "trace_id": "XhZWljYTTDCYF7vI", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 29 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "completion_tokens", - "span_id": "m4wMGuSN", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T01:45:49.880576+00:00", - "__module__": "datetime" - }, - "trace_id": "XhZWljYTTDCYF7vI", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 23 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "total_tokens", - "span_id": "m4wMGuSN", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T01:45:49.880585+00:00", - "__module__": "datetime" - }, - "trace_id": "XhZWljYTTDCYF7vI", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 52 } ] @@ -52110,54 +55697,18 @@ }, "metrics": [ { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "prompt_tokens", - "span_id": "OyfVMRgR", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T01:45:53.322420+00:00", - "__module__": "datetime" - }, - "trace_id": "TMrhR55CR-KrmGp0", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 63 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "completion_tokens", - "span_id": "OyfVMRgR", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T01:45:53.322482+00:00", - "__module__": "datetime" - }, - "trace_id": "TMrhR55CR-KrmGp0", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 45 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "total_tokens", - "span_id": "OyfVMRgR", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T01:45:53.322490+00:00", - "__module__": "datetime" - }, - "trace_id": "TMrhR55CR-KrmGp0", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 108 } ] @@ -52243,7 +55794,7 @@ "arguments": { "query": "NBA creation date" }, - "call_id": "388e55ab-448a-4a98-905b-196c051bdeea", + "call_id": "2a770dea-edee-4890-b0e3-930a4cb167e3", "tool_name": "knowledge_search" }, "type": "tool_call" @@ -52286,54 +55837,18 @@ }, "metrics": [ { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "prompt_tokens", - "span_id": "QpFMmy3B", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T01:45:52.235138+00:00", - "__module__": "datetime" - }, - "trace_id": "TMrhR55CR-KrmGp0", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 27 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "completion_tokens", - "span_id": "QpFMmy3B", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T01:45:52.235160+00:00", - "__module__": "datetime" - }, - "trace_id": "TMrhR55CR-KrmGp0", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 20 }, { - "attributes": { - "model_id": "meta-llama/Llama-3.3-70B-Instruct", - "provider_id": "fireworks" - }, "metric": "total_tokens", - "span_id": "QpFMmy3B", - "timestamp": { - "__class__": "datetime", - "__datetime__": "2025-03-07T01:45:52.235165+00:00", - "__module__": "datetime" - }, - "trace_id": "TMrhR55CR-KrmGp0", - "type": "metric", - "unit": "tokens", + "unit": null, "value": 47 } ] diff --git a/tests/integration/fixtures/recorded_responses/invoke_tool.json b/tests/integration/fixtures/recorded_responses/invoke_tool.json index 8db8ad966..a0cf276c2 100644 --- a/tests/integration/fixtures/recorded_responses/invoke_tool.json +++ b/tests/integration/fixtures/recorded_responses/invoke_tool.json @@ -64,6 +64,19 @@ } } }, + "[[], {\"kwargs\": {\"code\": \"import pandas as pd\\n\\n# Load the CSV file\\ndf = pd.read_csv(\\\"\")\\n\\n# Print the first few rows of the dataframe\\nprint(df.head())\\n\\n# Print information about the dataframe\\nprint(df.info())\\n\\n# Print summary statistics of the dataframe\\nprint(df.describe())\", \"session_id\": \"\"}, \"tool_name\": \"code_interpreter\"}]": { + "type": "value", + "value": { + "__module__": "llama_stack.apis.tools.tools", + "__pydantic__": "ToolInvocationResult", + "data": { + "content": "error\n[stdout]\n[Errno 2] No such file or directory: 'bwrap'\n[/stdout]\n[stderr]\n[Errno 2] No such file or directory: 'bwrap'\n[/stderr]", + "error_code": null, + "error_message": null, + "metadata": null + } + } + }, "[[], {\"kwargs\": {\"code\": \"import pandas as pd\\ndf = pd.read_csv(\\\"\")\\nprint(df.head())\", \"session_id\": \"\"}, \"tool_name\": \"code_interpreter\"}]": { "type": "value", "value": { @@ -400,23 +413,23 @@ "type": "text" }, { - "text": "Result 1:\nDocument_id:bbddb\nContent: conversational data, :func:`~torchtune.datasets.chat_dataset` seems to be a good fit. For any\ncustom local dataset we always need to specify ``source``, ``data_files``, and ``split`` for any dataset\nbuilder in torchtune. For :func:`~torchtune.datasets.chat_dataset`, we additionally need to specify\n``conversation_column`` and ``conversation_style``. Our data follows the ``\"sharegpt\"`` format, so\nwe can specify that here. Altogether, our :func:`~torchtune.datasets.chat_dataset` call should\nlook like so:\n\n.. code-block:: python\n\n from torchtune.datasets import chat_dataset\n from torchtune.models.llama3 import llama3_tokenizer\n\n tokenizer = llama3_tokenizer(\"/tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\")\n ds = chat_dataset(\n tokenizer=tokenizer,\n source=\"json\",\n data_files=\"data/my_data.json\",\n split=\"train\",\n conversation_column=\"dialogue\",\n conversation_style=\"sharegpt\",\n )\n\n.. code-block:: yaml\n\n # In config\n tokenizer:\n _component_: torchtune.models.llama3.llama3_tokenizer\n path: /tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model\n\n dataset:\n _component_: torchtune.datasets.chat_dataset\n source: json\n data_files: data/my_data.json\n split: train\n conversation_column: dialogue\n conversation_style: sharegpt\n\n.. note::\n You can pass in any keyword argument for `load_dataset `_ into all our\n Dataset classes and they will honor them. This is useful for common parameters\n such as specifying the data split with :code:`split` or configuration with\n :code:`name`\n\nIf you needed to add a prompt template, you would simply pass it into the tokenizer.\nSince we're fine-tuning Llama3, the tokenizer will handle all formatting for\nus and prompt templates are optional. Other models such as Mistral's :class:`~torchtune.models.mistral._tokenizer.MistralTokenizer`,\nuse a chat template by default (:class:`~torchtune.models.mistral.MistralChatTemplate`) to format\nall messages according to their `recommendations `_ into all our\n Dataset classes and they will honor them. This is useful for common parameters\n such as specifying the data split with :code:`split` or configuration with\n :code:`name`\n\nIf you needed to add a prompt template, you would simply pass it into the tokenizer.\nSince we're fine-tuning Llama3, the tokenizer will handle all formatting for\nus and prompt templates are optional. Other models such as Mistral's :class:`~torchtune.models.mistral._tokenizer.MistralTokenizer`,\nuse a chat template by default (:class:`~torchtune.models.mistral.MistralChatTemplate`) to format\nall messages according to their `recommendations `_, a parameter-efficient finetuning technique,\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\nIf you already know what LoRA is and want to get straight to running\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\n\n.. grid:: 2\n\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\n\n * What LoRA is and how it saves memory during finetuning\n * An overview of LoRA components in torchtune\n * How to run a LoRA finetune using torchtune\n * How to experiment with different LoRA configurations\n\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\n\n * Be familiar with :ref:`torchtune`\n * Make sure to :ref:`install torchtune`\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\n\nWhat is LoRA?\n-------------\n\n`LoRA `_ is an adapter-based method for\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\ntransformer models, in which case it is common to add the low-rank matrices\nto some of the linear projections in each transformer layer's self-attention.\n\n.. note::\n\n If you're unfamiliar, check out these references for the `definition of rank `_\n and discussion of `low-rank approximations `_.\n\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\nyou can expect to see memory savings due to a substantial reduction in the\nnumber of parameters with gradients. When using an optimizer with momentum,\nlike `AdamW `_, a parameter-efficient finetuning technique,\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\nIf you already know what LoRA is and want to get straight to running\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\n\n.. grid:: 2\n\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\n\n * What LoRA is and how it saves memory during finetuning\n * An overview of LoRA components in torchtune\n * How to run a LoRA finetune using torchtune\n * How to experiment with different LoRA configurations\n\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\n\n * Be familiar with :ref:`torchtune`\n * Make sure to :ref:`install torchtune`\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\n\nWhat is LoRA?\n-------------\n\n`LoRA `_ is an adapter-based method for\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\ntransformer models, in which case it is common to add the low-rank matrices\nto some of the linear projections in each transformer layer's self-attention.\n\n.. note::\n\n If you're unfamiliar, check out these references for the `definition of rank `_\n and discussion of `low-rank approximations `_.\n\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\nyou can expect to see memory savings due to a substantial reduction in the\nnumber of parameters with gradients. When using an optimizer with momentum,\nlike `AdamW `.\n.. .. _glossary_fsdp2:\n\n", + "text": "Result 3:\nDocument_id:90a49\nContent: ` module, which we swap\n out for :class:`~torchtune.modules.peft.LoRALinear` when ``use_dora=True``.\n\n.. _glossary_distrib:\n\n\n.. TODO\n\n.. Distributed\n.. -----------\n\n.. .. _glossary_fsdp:\n\n.. Fully Sharded Data Parallel (FSDP)\n.. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. All our ``_distributed`` recipes use `FSDP `.\n.. .. _glossary_fsdp2:\n\n", "type": "text" }, { - "text": "Result 4:\nDocument_id:15b86\nContent: 06% of all params are trainable.\n\n.. note::\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\n of in the recipe.\n\n\n.. _lora_recipe_label:\n\nLoRA finetuning recipe in torchtune\n-----------------------------------\n\nFinally, we can put it all together and finetune a model using torchtune's `LoRA recipe `_.\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\n\n.. code-block:: bash\n\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\n\n.. note::\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\n for more details on how you can easily clone and modify torchtune configs.\n\n.. note::\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\n and (b) the memory constraints of your hardware.\n\nThe preceding command will run a LoRA finetune with torchtune's factory settings, but we may want to experiment a bit.\nLet's take a closer look at some of the :code:`lora_finetune_distributed` config.\n\n.. code-block:: yaml\n\n # Model Arguments\n model:\n _component_: lora_llama2_7b\n lora_attn_modules: ['q_proj', 'v_proj']\n lora_rank: 8\n lora_alpha: 16\n ...\n\nWe see that the\n", + "text": "Result 4:\nDocument_id:14c27\nContent: 06% of all params are trainable.\n\n.. note::\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\n of in the recipe.\n\n\n.. _lora_recipe_label:\n\nLoRA finetuning recipe in torchtune\n-----------------------------------\n\nFinally, we can put it all together and finetune a model using torchtune's `LoRA recipe `_.\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\n\n.. code-block:: bash\n\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\n\n.. note::\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\n for more details on how you can easily clone and modify torchtune configs.\n\n.. note::\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\n and (b) the memory constraints of your hardware.\n\nThe preceding command will run a LoRA finetune with torchtune's factory settings, but we may want to experiment a bit.\nLet's take a closer look at some of the :code:`lora_finetune_distributed` config.\n\n.. code-block:: yaml\n\n # Model Arguments\n model:\n _component_: lora_llama2_7b\n lora_attn_modules: ['q_proj', 'v_proj']\n lora_rank: 8\n lora_alpha: 16\n ...\n\nWe see that the\n", "type": "text" }, { - "text": "Result 5:\nDocument_id:83901\nContent: etune\n:func:`torchtune.models.llama3.llama3_8b` with DoRA, you would use :func:`torchtune.models.llama3.lora_llama3_8b` with ``use_dora=True``:\n\n.. code-block:: bash\n\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\n model.use_dora=True\n\n.. code-block:: yaml\n\n model:\n _component_: torchtune.models.lora_llama3_8b\n use_dora: True\n\nSince DoRA extends LoRA, the parameters for :ref:`customizing LoRA ` are identical. You can also quantize the base model weights like in :ref:`glossary_qlora` by using ``quantize=True`` to reap\neven more memory savings!\n\n.. code-block:: bash\n\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\n model.apply_lora_to_mlp=True \\\n model.lora_attn_modules=[\"q_proj\",\"k_proj\",\"v_proj\"] \\\n model.lora_rank=16 \\\n model.lora_alpha=32 \\\n model.use_dora=True \\\n model.quantize_base=True\n\n.. code-block:: yaml\n\n model:\n _component_: torchtune.models.lora_llama3_8b\n apply_lora_to_mlp: True\n lora_attn_modules: [\"q_proj\", \"k_proj\", \"v_proj\"]\n lora_rank: 16\n lora_alpha: 32\n use_dora: True\n quantize_base: True\n\n\n.. note::\n\n Under the hood, we've enabled DoRA by adding the :class:`~torchtune.modules.peft.DoRALinear` module, which we swap\n out for :class:`~torchtune.modules.peft.LoRALinear` when ``use_dora=True``.\n\n.. _glossary_distrib:\n\n\n.. TODO\n\n.. Distributed\n.. -----------\n\n.. .. _glossary_fsdp:\n\n.. Fully Sharded Data Parallel (FSDP)\n.. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. All our ``_distributed`` recipes use `FSDP `.\n.. .. _glossary_fsdp2:\n\n", + "text": "Result 5:\nDocument_id:90a49\nContent: etune\n:func:`torchtune.models.llama3.llama3_8b` with DoRA, you would use :func:`torchtune.models.llama3.lora_llama3_8b` with ``use_dora=True``:\n\n.. code-block:: bash\n\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\n model.use_dora=True\n\n.. code-block:: yaml\n\n model:\n _component_: torchtune.models.lora_llama3_8b\n use_dora: True\n\nSince DoRA extends LoRA, the parameters for :ref:`customizing LoRA ` are identical. You can also quantize the base model weights like in :ref:`glossary_qlora` by using ``quantize=True`` to reap\neven more memory savings!\n\n.. code-block:: bash\n\n tune run lora_finetune_single_device --config llama3/8B_lora_single_device \\\n model.apply_lora_to_mlp=True \\\n model.lora_attn_modules=[\"q_proj\",\"k_proj\",\"v_proj\"] \\\n model.lora_rank=16 \\\n model.lora_alpha=32 \\\n model.use_dora=True \\\n model.quantize_base=True\n\n.. code-block:: yaml\n\n model:\n _component_: torchtune.models.lora_llama3_8b\n apply_lora_to_mlp: True\n lora_attn_modules: [\"q_proj\", \"k_proj\", \"v_proj\"]\n lora_rank: 16\n lora_alpha: 32\n use_dora: True\n quantize_base: True\n\n\n.. note::\n\n Under the hood, we've enabled DoRA by adding the :class:`~torchtune.modules.peft.DoRALinear` module, which we swap\n out for :class:`~torchtune.modules.peft.LoRALinear` when ``use_dora=True``.\n\n.. _glossary_distrib:\n\n\n.. TODO\n\n.. Distributed\n.. -----------\n\n.. .. _glossary_fsdp:\n\n.. Fully Sharded Data Parallel (FSDP)\n.. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. All our ``_distributed`` recipes use `FSDP `.\n.. .. _glossary_fsdp2:\n\n", "type": "text" }, { @@ -428,11 +441,11 @@ "error_message": null, "metadata": { "document_ids": [ - "bbddbe62-508d-4c8d-9455-3b60bc2825a5", - "15b8638f-b1b6-4f58-adfa-eb6644c47de3", - "83901b53-33d4-4f5e-8145-b94c783e9f61", - "15b8638f-b1b6-4f58-adfa-eb6644c47de3", - "83901b53-33d4-4f5e-8145-b94c783e9f61" + "3e31d46a-5568-49d4-978e-0ab3b6598a7f", + "14c2766f-d176-468d-9124-f4a46c5166cf", + "90a49271-c246-4bdc-9db9-e3bfa9be5ff1", + "14c2766f-d176-468d-9124-f4a46c5166cf", + "90a49271-c246-4bdc-9db9-e3bfa9be5ff1" ] } } @@ -444,7 +457,7 @@ "__module__": "llama_stack.apis.tools.tools", "__pydantic__": "ToolInvocationResult", "data": { - "content": "{\"query\": \"current CEO of Meta\", \"top_k\": [{\"title\": \"Meta - Leadership & Governance\", \"url\": \"https://investor.atmeta.com/leadership-and-governance/\", \"content\": \"Mark Zuckerberg is the founder, chairman and CEO of Meta, which he originally founded as Facebook in 2004. Mark is responsible for setting the overall direction and product strategy for the company. He leads the design of Meta's services and development of its core technology and infrastructure. Mark studied computer science at Harvard\", \"score\": 0.8342047, \"raw_content\": null}, {\"title\": \"Mark Zuckerberg, Founder, Chairman and Chief Executive Officer - Meta\", \"url\": \"https://about.meta.com/media-gallery/executives/mark-zuckerberg/\", \"content\": \"Mark Zuckerberg, Founder, Chairman and Chief Executive Officer | Meta Meta Quest Ray-Ban Meta Meta Horizon Meta AI Meta Verified Meta Pay Meta Horizon Workrooms Meta and you Learn about our community Shop Meta Meta Quest Meta Portal Meta Horizon Mark Zuckerberg is the founder, chairman and CEO of Meta, which he originally founded as Facebook in 2004. In October 2021, Facebook rebranded to Meta to reflect all of its products and services across its family of apps and a focus on developing social experiences for the metaverse \\u2014 moving beyond 2D screens toward immersive experiences like augmented and virtual reality to help build the next evolution in social technology. Shop Ray-Ban Meta glassesRay-Ban StoriesPrivacy informationSupported countries \\u00a9 2025 Meta\", \"score\": 0.79099923, \"raw_content\": null}, {\"title\": \"The 11 People Running Meta's $1 Trillion Social Media and ... - Observer\", \"url\": \"https://observer.com/2024/01/meta-facebook-top-executives/\", \"content\": \"Meta has one of the most stable leadership team in the tech industry. Almost all of Meta's top executives have been with the company for well over a decade. ... 39, cofounder, chairman and CEO\", \"score\": 0.45536873, \"raw_content\": null}, {\"title\": \"Executives - Meta\", \"url\": \"https://about.meta.com/media-gallery/executives/\", \"content\": \"Meta leadership: images of senior executives for download to use in articles about the company.\", \"score\": 0.21026355, \"raw_content\": null}, {\"title\": \"Mark Zuckerberg - Wikipedia\", \"url\": \"https://en.wikipedia.org/wiki/Mark_Zuckerberg\", \"content\": \"They began dating in 2003.[175] In September 2010, Chan, who was a medical student at the University of California, San Francisco at the time,[176] moved into his rented house in Palo Alto, California.[177][178] They married on May 19, 2012, in the grounds of his mansion in an event that also celebrated her graduation from medical school.[179][180] Zuckerberg revealed in July 2015 that they were expecting a baby girl and that Chan had previously experienced three miscarriages.[181] Their first daughter was born in December 2015.[182] They announced in a Chinese New Year video that their daughter's Chinese name is Chen Mingyu (Chinese: \\u9648\\u660e\\u5b87).[183] Their second daughter was born in August 2017.[184] Zuckerberg and his wife welcomed their third daughter in March 2023 and announced the news across his social media pages.[185] The couple also have a Puli dog named Beast,[186] who has over two million followers on Facebook.[187] Zuckerberg commissioned the visual artist Daniel Arsham to build a 7-foot-tall sculpture of his wife, which was unveiled in 2024.[188]\", \"score\": 0.05564338, \"raw_content\": null}]}", + "content": "{\"query\": \"current CEO of Meta\", \"top_k\": [{\"title\": \"Executives - Meta\", \"url\": \"https://about.meta.com/media-gallery/executives/\", \"content\": \"Mark Zuckerberg, Founder, Chairman and Chief Executive Officer Joel Kaplan, Chief Global Affairs Officer Susan Li, Chief Financial Officer Javier Olivan, Chief Operating Officer Chris Cox, Chief Product Officer Andrew \\u2018Boz\\u2019 Bosworth, Chief Technology Officer Jennifer Newstead, Chief Legal Officer Dave Wehner, Chief Strategy Officer Will Cathcart, Head of WhatsApp Naomi Gleit, Head of Product John Hegeman, Chief Revenue Officer Adam Mosseri, Head of Instagram Erin Egan, Chief Privacy Officer, Policy Michel Protti, Chief Privacy Officer, Product Alex Schultz, Chief Marketing Officer and VP of Analytics Tom Alison, Head of Facebook Nicola Mendelsohn, Head of Global Business Group Ahmad Al-Dahle, VP and Head of GenAI at Meta Joelle Pineau, Vice President of AI Research and Head of FAIR at Meta\", \"score\": 0.8190992, \"raw_content\": null}, {\"title\": \"Mark Zuckerberg, Founder, Chairman and Chief Executive Officer - Meta\", \"url\": \"https://about.meta.com/media-gallery/executives/mark-zuckerberg/\", \"content\": \"Mark Zuckerberg, Founder, Chairman and Chief Executive Officer | Meta Meta Quest Ray-Ban Meta Meta Horizon Meta AI Meta Verified Meta Pay Meta Horizon Workrooms Meta and you Learn about our community Shop Meta Meta Quest Meta Portal Meta Horizon Mark Zuckerberg is the founder, chairman and CEO of Meta, which he originally founded as Facebook in 2004. In October 2021, Facebook rebranded to Meta to reflect all of its products and services across its family of apps and a focus on developing social experiences for the metaverse \\u2014 moving beyond 2D screens toward immersive experiences like augmented and virtual reality to help build the next evolution in social technology. Shop Ray-Ban Meta glassesRay-Ban StoriesPrivacy informationSupported countries \\u00a9 2025 Meta\", \"score\": 0.79080546, \"raw_content\": null}, {\"title\": \"Meet the Executive CSuite Team of Meta (Facebook) [2025]\", \"url\": \"https://digitaldefynd.com/IQ/meet-the-executive-csuite-team-of-meta-facebook/\", \"content\": \"Harvard University Executive Programs Free Harvard University Courses As a chief financial officer of Meta, Susan Li oversees the firm\\u2019s finance and facilities team to keep track of the company\\u2019s overall financial health. The chief operating officer of Meta, Javier Olivan, oversees the firm\\u2019s business team, infrastructure, and other products. Andrew Bosworth, called Boz, serves as chief technology officer at Meta and is responsible for leading the firm\\u2019s AR/VR organization, Reality Labs. Andrew has also served as engineering director to oversee events, mobile monetization, and feed ads and as VP of ads and business platforms to lead engineering, design, analytics, and product teams. Meta\\u2019s c-suite team comprises experienced and diverse executives, having extensive experience in technology, finance, legal, and all major industries.\", \"score\": 0.7602419, \"raw_content\": null}, {\"title\": \"Meta - Leadership & Governance\", \"url\": \"https://investor.atmeta.com/leadership-and-governance/\", \"content\": \"Mr. Andreessen was a co-founder of Netscape Communications Corporation, a software company, serving in various positions, including Chief Technology Officer and Executive Vice President of Products. Ms. Killefer also served as Assistant Secretary for Management, Chief Financial Officer, and Chief Operating Officer of the U.S. Department of the Treasury from 1997 to 2000 and as a member of the IRS Oversight Board from 2000 to 2005, including as Chair of the IRS Oversight Board from 2002 to 2004. Ms. Travis has served as Executive Vice President and Chief Financial Officer of The Estee Lauder Companies Inc., a global manufacturer and marketer of skin care, makeup, fragrance and hair care products, since August 2012.\", \"score\": 0.6175132, \"raw_content\": null}, {\"title\": \"Mark Zuckerberg - Wikipedia\", \"url\": \"https://en.wikipedia.org/wiki/Mark_Zuckerberg\", \"content\": \"They began dating in 2003.[175] In September 2010, Chan, who was a medical student at the University of California, San Francisco at the time,[176] moved into his rented house in Palo Alto, California.[177][178] They married on May 19, 2012, in the grounds of his mansion in an event that also celebrated her graduation from medical school.[179][180] Zuckerberg revealed in July 2015 that they were expecting a baby girl and that Chan had previously experienced three miscarriages.[181] Their first daughter was born in December 2015.[182] They announced in a Chinese New Year video that their daughter's Chinese name is Chen Mingyu (Chinese: \\u9648\\u660e\\u5b87).[183] Their second daughter was born in August 2017.[184] Zuckerberg and his wife welcomed their third daughter in March 2023 and announced the news across his social media pages.[185] The couple also have a Puli dog named Beast,[186] who has over two million followers on Facebook.[187] Zuckerberg commissioned the visual artist Daniel Arsham to build a 7-foot-tall sculpture of his wife, which was unveiled in 2024.[188]\", \"score\": 0.05570498, \"raw_content\": null}]}", "error_code": null, "error_message": null, "metadata": null @@ -463,23 +476,23 @@ "type": "text" }, { - "text": "Result 1:\nDocument_id:20e5d\nContent: .. _lora_finetune_label:\n\n============================\nFine-Tuning Llama2 with LoRA\n============================\n\nThis guide will teach you about `LoRA `_, a parameter-efficient finetuning technique,\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\nIf you already know what LoRA is and want to get straight to running\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\n\n.. grid:: 2\n\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\n\n * What LoRA is and how it saves memory during finetuning\n * An overview of LoRA components in torchtune\n * How to run a LoRA finetune using torchtune\n * How to experiment with different LoRA configurations\n\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\n\n * Be familiar with :ref:`torchtune`\n * Make sure to :ref:`install torchtune`\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\n\nWhat is LoRA?\n-------------\n\n`LoRA `_ is an adapter-based method for\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\ntransformer models, in which case it is common to add the low-rank matrices\nto some of the linear projections in each transformer layer's self-attention.\n\n.. note::\n\n If you're unfamiliar, check out these references for the `definition of rank `_\n and discussion of `low-rank approximations `_.\n\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\nyou can expect to see memory savings due to a substantial reduction in the\nnumber of parameters with gradients. When using an optimizer with momentum,\nlike `AdamW `_, a parameter-efficient finetuning technique,\nand show you how you can use torchtune to finetune a Llama2 model with LoRA.\nIf you already know what LoRA is and want to get straight to running\nyour own LoRA finetune in torchtune, you can jump to :ref:`LoRA finetuning recipe in torchtune`.\n\n.. grid:: 2\n\n .. grid-item-card:: :octicon:`mortar-board;1em;` What you will learn\n\n * What LoRA is and how it saves memory during finetuning\n * An overview of LoRA components in torchtune\n * How to run a LoRA finetune using torchtune\n * How to experiment with different LoRA configurations\n\n .. grid-item-card:: :octicon:`list-unordered;1em;` Prerequisites\n\n * Be familiar with :ref:`torchtune`\n * Make sure to :ref:`install torchtune`\n * Make sure you have downloaded the :ref:`Llama2-7B model weights`\n\nWhat is LoRA?\n-------------\n\n`LoRA `_ is an adapter-based method for\nparameter-efficient finetuning that adds trainable low-rank decomposition matrices to different layers of a neural network,\nthen freezes the network's remaining parameters. LoRA is most commonly applied to\ntransformer models, in which case it is common to add the low-rank matrices\nto some of the linear projections in each transformer layer's self-attention.\n\n.. note::\n\n If you're unfamiliar, check out these references for the `definition of rank `_\n and discussion of `low-rank approximations `_.\n\nBy finetuning with LoRA (as opposed to finetuning all model parameters),\nyou can expect to see memory savings due to a substantial reduction in the\nnumber of parameters with gradients. When using an optimizer with momentum,\nlike `AdamW ` alone will not handle the definition of which parameters are trainable.\n See :ref:`below` for how to do this.\n\nLet's inspect each of these models a bit more closely.\n\n.. code-block:: bash\n\n # Print the first layer's self-attention in the usual Llama2 model\n >>> print(base_model.layers[0].attn)\n MultiHeadAttention(\n (q_proj): Linear(in_features=4096, out_features=4096, bias=False)\n (k_proj): Linear(in_features=4096, out_features=4096, bias=False)\n (v_proj): Linear(in_features=4096, out_features=4096, bias=False)\n (output_proj): Linear(in_features=4096, out_features=4096, bias=False)\n (pos_embeddings): RotaryPositionalEmbeddings()\n )\n\n # Print the same for Llama2 with LoRA weights\n >>> print(lora_model.layers[0].attn)\n MultiHeadAttention(\n (q_proj): LoRALinear(\n (dropout): Dropout(p=0.0, inplace=False)\n \n", + "text": "Result 2:\nDocument_id:14c27\nContent: LoRA to Llama2 models\n------------------------------\n\nWith torchtune, we can easily apply LoRA to Llama2 with a variety of different configurations.\nLet's take a look at how to construct Llama2 models in torchtune with and without LoRA.\n\n.. code-block:: python\n\n from torchtune.models.llama2 import llama2_7b, lora_llama2_7b\n\n # Build Llama2 without any LoRA layers\n base_model = llama2_7b()\n\n # The default settings for lora_llama2_7b will match those for llama2_7b\n # We just need to define which layers we want LoRA applied to.\n # Within each self-attention, we can choose from [\"q_proj\", \"k_proj\", \"v_proj\", and \"output_proj\"].\n # We can also set apply_lora_to_mlp=True or apply_lora_to_output=True to apply LoRA to other linear\n # layers outside of the self-attention.\n lora_model = lora_llama2_7b(lora_attn_modules=[\"q_proj\", \"v_proj\"])\n\n.. note::\n\n Calling :func:`lora_llama_2_7b ` alone will not handle the definition of which parameters are trainable.\n See :ref:`below` for how to do this.\n\nLet's inspect each of these models a bit more closely.\n\n.. code-block:: bash\n\n # Print the first layer's self-attention in the usual Llama2 model\n >>> print(base_model.layers[0].attn)\n MultiHeadAttention(\n (q_proj): Linear(in_features=4096, out_features=4096, bias=False)\n (k_proj): Linear(in_features=4096, out_features=4096, bias=False)\n (v_proj): Linear(in_features=4096, out_features=4096, bias=False)\n (output_proj): Linear(in_features=4096, out_features=4096, bias=False)\n (pos_embeddings): RotaryPositionalEmbeddings()\n )\n\n # Print the same for Llama2 with LoRA weights\n >>> print(lora_model.layers[0].attn)\n MultiHeadAttention(\n (q_proj): LoRALinear(\n (dropout): Dropout(p=0.0, inplace=False)\n \n", "type": "text" }, { - "text": "Result 3:\nDocument_id:20e5d\nContent: 06% of all params are trainable.\n\n.. note::\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\n of in the recipe.\n\n\n.. _lora_recipe_label:\n\nLoRA finetuning recipe in torchtune\n-----------------------------------\n\nFinally, we can put it all together and finetune a model using torchtune's `LoRA recipe `_.\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\n\n.. code-block:: bash\n\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\n\n.. note::\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\n for more details on how you can easily clone and modify torchtune configs.\n\n.. note::\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\n and (b) the memory constraints of your hardware.\n\nThe preceding command will run a LoRA finetune with torchtune's factory settings, but we may want to experiment a bit.\nLet's take a closer look at some of the :code:`lora_finetune_distributed` config.\n\n.. code-block:: yaml\n\n # Model Arguments\n model:\n _component_: lora_llama2_7b\n lora_attn_modules: ['q_proj', 'v_proj']\n lora_rank: 8\n lora_alpha: 16\n ...\n\nWe see that the\n", + "text": "Result 3:\nDocument_id:14c27\nContent: 06% of all params are trainable.\n\n.. note::\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\n of in the recipe.\n\n\n.. _lora_recipe_label:\n\nLoRA finetuning recipe in torchtune\n-----------------------------------\n\nFinally, we can put it all together and finetune a model using torchtune's `LoRA recipe `_.\nMake sure that you have first downloaded the Llama2 weights and tokenizer by following :ref:`these instructions`.\nYou can then run the following command to perform a LoRA finetune of Llama2-7B with two GPUs (each having VRAM of at least 16GB):\n\n.. code-block:: bash\n\n tune run --nnodes 1 --nproc_per_node 2 lora_finetune_distributed --config llama2/7B_lora\n\n.. note::\n Make sure to point to the location of your Llama2 weights and tokenizer. This can be done\n either by adding :code:`checkpointer.checkpoint_files=[my_model_checkpoint_path] tokenizer_checkpoint=my_tokenizer_checkpoint_path`\n or by directly modifying the :code:`7B_lora.yaml` file. See our \"\":ref:`config_tutorial_label`\" recipe\n for more details on how you can easily clone and modify torchtune configs.\n\n.. note::\n You can modify the value of :code:`nproc_per_node` depending on (a) the number of GPUs you have available,\n and (b) the memory constraints of your hardware.\n\nThe preceding command will run a LoRA finetune with torchtune's factory settings, but we may want to experiment a bit.\nLet's take a closer look at some of the :code:`lora_finetune_distributed` config.\n\n.. code-block:: yaml\n\n # Model Arguments\n model:\n _component_: lora_llama2_7b\n lora_attn_modules: ['q_proj', 'v_proj']\n lora_rank: 8\n lora_alpha: 16\n ...\n\nWe see that the\n", "type": "text" }, { - "text": "Result 4:\nDocument_id:20e5d\nContent: from our Llama2\nmodel without any wrappers or custom checkpoint conversion logic.\n\n.. code-block:: python\n\n # Assuming that base_model already has the pretrained Llama2 weights,\n # this will directly load them into your LoRA model without any conversion necessary.\n lora_model.load_state_dict(base_model.state_dict(), strict=False)\n\n.. note::\n Whenever loading weights with :code:`strict=False`, you should verify that any missing or extra keys in\n the loaded :code:`state_dict` are as expected. torchtune's LoRA recipes do this by default via\n :func:`validate_missing_and_unexpected_for_lora() `.\n\nOnce we've loaded the base model weights, we also want to set only LoRA parameters to trainable.\n\n.. _setting_trainable_params:\n\n.. code-block:: python\n\n from torchtune.modules.peft.peft_utils import get_adapter_params, set_trainable_params\n\n # Fetch all params from the model that are associated with LoRA.\n lora_params = get_adapter_params(lora_model)\n\n # Set requires_grad=True on lora_params, and requires_grad=False on all others.\n set_trainable_params(lora_model, lora_params)\n\n # Print the total number of parameters\n total_params = sum([p.numel() for p in lora_model.parameters()])\n trainable_params = sum([p.numel() for p in lora_model.parameters() if p.requires_grad])\n print(\n f\"\"\"\n {total_params} total params,\n {trainable_params}\" trainable params,\n {(100.0 * trainable_params / total_params):.2f}% of all params are trainable.\n \"\"\"\n )\n\n 6742609920 total params,\n 4194304 trainable params,\n 0.06% of all params are trainable.\n\n.. note::\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\n of in the recipe.\n\n\n.. _lora_recipe_label:\n\nLoRA finetuning recipe in torchtune\n-----------------------------------\n\nFinally, we can put it all together and finetune a model using torchtune's `LoRA recipe `.\n\nOnce we've loaded the base model weights, we also want to set only LoRA parameters to trainable.\n\n.. _setting_trainable_params:\n\n.. code-block:: python\n\n from torchtune.modules.peft.peft_utils import get_adapter_params, set_trainable_params\n\n # Fetch all params from the model that are associated with LoRA.\n lora_params = get_adapter_params(lora_model)\n\n # Set requires_grad=True on lora_params, and requires_grad=False on all others.\n set_trainable_params(lora_model, lora_params)\n\n # Print the total number of parameters\n total_params = sum([p.numel() for p in lora_model.parameters()])\n trainable_params = sum([p.numel() for p in lora_model.parameters() if p.requires_grad])\n print(\n f\"\"\"\n {total_params} total params,\n {trainable_params}\" trainable params,\n {(100.0 * trainable_params / total_params):.2f}% of all params are trainable.\n \"\"\"\n )\n\n 6742609920 total params,\n 4194304 trainable params,\n 0.06% of all params are trainable.\n\n.. note::\n If you are directly using the LoRA recipe (as detailed :ref:`here`), you need only pass the\n relevant checkpoint path. Loading model weights and setting trainable parameters will be taken care\n of in the recipe.\n\n\n.. _lora_recipe_label:\n\nLoRA finetuning recipe in torchtune\n-----------------------------------\n\nFinally, we can put it all together and finetune a model using torchtune's `LoRA recipe