mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-31 16:01:46 +00:00
changed from vision to text model
This commit is contained in:
parent
efb0fbfaea
commit
92f16ed27b
4 changed files with 12 additions and 12 deletions
|
@ -7,7 +7,7 @@
|
|||
"source": [
|
||||
"# Llama Stack Inference Guide\n",
|
||||
"\n",
|
||||
"This document provides instructions on how to use Llama Stack's `chat_completion` function for generating text using the `Llama3.2-11B-Vision-Instruct` model. \n",
|
||||
"This document provides instructions on how to use Llama Stack's `chat_completion` function for generating text using the `Llama3.1-8B-Instruct` model. \n",
|
||||
"\n",
|
||||
"Before you begin, please ensure Llama Stack is installed and set up by following the [Getting Started Guide](https://llama-stack.readthedocs.io/en/latest/getting_started/index.html).\n",
|
||||
"\n",
|
||||
|
@ -105,7 +105,7 @@
|
|||
" SystemMessage(content='You are a friendly assistant.', role='system'),\n",
|
||||
" UserMessage(content='Write a two-sentence poem about llama.', role='user')\n",
|
||||
" ],\n",
|
||||
" model='Llama3.2-11B-Vision-Instruct',\n",
|
||||
" model='Llama3.1-8B-Instruct',\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(response.completion_message.content)"
|
||||
|
@ -149,7 +149,7 @@
|
|||
" SystemMessage(content='You are shakespeare.', role='system'),\n",
|
||||
" UserMessage(content='Write a two-sentence poem about llama.', role='user')\n",
|
||||
" ],\n",
|
||||
" model='Llama3.2-11B-Vision-Instruct',\n",
|
||||
" model='Llama3.1-8B-Instruct',\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(response.completion_message.content)"
|
||||
|
@ -220,7 +220,7 @@
|
|||
" message = UserMessage(content=user_input, role='user')\n",
|
||||
" response = client.inference.chat_completion(\n",
|
||||
" messages=[message],\n",
|
||||
" model='Llama3.2-11B-Vision-Instruct',\n",
|
||||
" model='Llama3.1-8B-Instruct',\n",
|
||||
" )\n",
|
||||
" cprint(f'> Response: {response.completion_message.content}', 'cyan')\n",
|
||||
"\n",
|
||||
|
@ -319,7 +319,7 @@
|
|||
"\n",
|
||||
" response = client.inference.chat_completion(\n",
|
||||
" messages=conversation_history,\n",
|
||||
" model='Llama3.2-11B-Vision-Instruct',\n",
|
||||
" model='Llama3.1-8B-Instruct',\n",
|
||||
" )\n",
|
||||
" cprint(f'> Response: {response.completion_message.content}', 'cyan')\n",
|
||||
"\n",
|
||||
|
@ -378,7 +378,7 @@
|
|||
"\n",
|
||||
" response = client.inference.chat_completion(\n",
|
||||
" messages=[message],\n",
|
||||
" model='Llama3.2-11B-Vision-Instruct',\n",
|
||||
" model='Llama3.1-8B-Instruct',\n",
|
||||
" stream=stream,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
|
|
|
@ -143,7 +143,7 @@
|
|||
"\n",
|
||||
" response = client.inference.chat_completion(\n",
|
||||
" messages=[message],\n",
|
||||
" model='Llama3.2-11B-Vision-Instruct',\n",
|
||||
" model='Llama3.1-8B-Instruct',\n",
|
||||
" stream=stream,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
|
|
|
@ -154,7 +154,7 @@
|
|||
"outputs": [],
|
||||
"source": [
|
||||
"response = client.inference.chat_completion(\n",
|
||||
" messages=few_shot_examples, model='Llama3.2-11B-Vision-Instruct'\n",
|
||||
" messages=few_shot_examples, model='Llama3.1-8B-Instruct'\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Llama Stack Quickstart Guide
|
||||
|
||||
This guide will walk you through setting up an end-to-end workflow with Llama Stack, enabling you to perform text generation using the `Llama3.2-11B-Vision-Instruct` model. Follow these steps to get started quickly.
|
||||
This guide will walk you through setting up an end-to-end workflow with Llama Stack, enabling you to perform text generation using the `Llama3.1-8B-Instruct` model. Follow these steps to get started quickly.
|
||||
|
||||
If you're looking for more specific topics like tool calling or agent setup, we have a [Zero to Hero Guide](#next-steps) that covers everything from Tool Calling to Agents in detail. Feel free to skip to the end to explore the advanced topics you're interested in.
|
||||
|
||||
|
@ -42,7 +42,7 @@ pip install llama-stack
|
|||
Download the necessary Llama model checkpoints using the `llama` CLI:
|
||||
|
||||
```bash
|
||||
llama download --model-id Llama3.2-11B-Vision-Instruct
|
||||
llama download --model-id Llama3.1-8B-Instruct
|
||||
```
|
||||
|
||||
*Follow the CLI prompts to complete the download. You may need to accept a license agreement. Obtain an instant license [here](https://www.llama.com/llama-downloads/).*
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue