mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-15 22:47:59 +00:00
changed from vision to text model
This commit is contained in:
parent
efb0fbfaea
commit
92f16ed27b
4 changed files with 12 additions and 12 deletions
|
@ -7,7 +7,7 @@
|
|||
"source": [
|
||||
"# Llama Stack Inference Guide\n",
|
||||
"\n",
|
||||
"This document provides instructions on how to use Llama Stack's `chat_completion` function for generating text using the `Llama3.2-11B-Vision-Instruct` model. \n",
|
||||
"This document provides instructions on how to use Llama Stack's `chat_completion` function for generating text using the `Llama3.1-8B-Instruct` model. \n",
|
||||
"\n",
|
||||
"Before you begin, please ensure Llama Stack is installed and set up by following the [Getting Started Guide](https://llama-stack.readthedocs.io/en/latest/getting_started/index.html).\n",
|
||||
"\n",
|
||||
|
@ -105,7 +105,7 @@
|
|||
" SystemMessage(content='You are a friendly assistant.', role='system'),\n",
|
||||
" UserMessage(content='Write a two-sentence poem about llama.', role='user')\n",
|
||||
" ],\n",
|
||||
" model='Llama3.2-11B-Vision-Instruct',\n",
|
||||
" model='Llama3.1-8B-Instruct',\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(response.completion_message.content)"
|
||||
|
@ -149,7 +149,7 @@
|
|||
" SystemMessage(content='You are shakespeare.', role='system'),\n",
|
||||
" UserMessage(content='Write a two-sentence poem about llama.', role='user')\n",
|
||||
" ],\n",
|
||||
" model='Llama3.2-11B-Vision-Instruct',\n",
|
||||
" model='Llama3.1-8B-Instruct',\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"print(response.completion_message.content)"
|
||||
|
@ -220,7 +220,7 @@
|
|||
" message = UserMessage(content=user_input, role='user')\n",
|
||||
" response = client.inference.chat_completion(\n",
|
||||
" messages=[message],\n",
|
||||
" model='Llama3.2-11B-Vision-Instruct',\n",
|
||||
" model='Llama3.1-8B-Instruct',\n",
|
||||
" )\n",
|
||||
" cprint(f'> Response: {response.completion_message.content}', 'cyan')\n",
|
||||
"\n",
|
||||
|
@ -319,7 +319,7 @@
|
|||
"\n",
|
||||
" response = client.inference.chat_completion(\n",
|
||||
" messages=conversation_history,\n",
|
||||
" model='Llama3.2-11B-Vision-Instruct',\n",
|
||||
" model='Llama3.1-8B-Instruct',\n",
|
||||
" )\n",
|
||||
" cprint(f'> Response: {response.completion_message.content}', 'cyan')\n",
|
||||
"\n",
|
||||
|
@ -378,7 +378,7 @@
|
|||
"\n",
|
||||
" response = client.inference.chat_completion(\n",
|
||||
" messages=[message],\n",
|
||||
" model='Llama3.2-11B-Vision-Instruct',\n",
|
||||
" model='Llama3.1-8B-Instruct',\n",
|
||||
" stream=stream,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue