mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-02 08:44:44 +00:00
Update 02_Prompt_Engineering101.ipynb
This commit is contained in:
parent
be8bb484db
commit
bd06a8b04b
1 changed files with 13 additions and 10 deletions
|
@ -47,7 +47,8 @@
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"HOST = \"localhost\" # Replace with your host\n",
|
"HOST = \"localhost\" # Replace with your host\n",
|
||||||
"PORT = 5000 # Replace with your port"
|
"PORT = 5001 # Replace with your port\n",
|
||||||
|
"MODEL_NAME='meta-llama/Llama-3.2-3B-Instruct'"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -146,13 +147,13 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 4,
|
"execution_count": 8,
|
||||||
"id": "8b321089",
|
"id": "8b321089",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"response = client.inference.chat_completion(\n",
|
"response = client.inference.chat_completion(\n",
|
||||||
" messages=few_shot_examples, model='Llama3.1-8B-Instruct'\n",
|
" messages=few_shot_examples, model_id=MODEL_NAME\n",
|
||||||
")"
|
")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
@ -168,7 +169,7 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 5,
|
"execution_count": 9,
|
||||||
"id": "4ac1ac3e",
|
"id": "4ac1ac3e",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
|
@ -176,7 +177,7 @@
|
||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
"\u001b[36m> Response: That's Llama!\u001b[0m\n"
|
"\u001b[36m> Response: That sounds like a Donkey or an Ass (also known as a Burro)!\u001b[0m\n"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
@ -197,7 +198,7 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 6,
|
"execution_count": 15,
|
||||||
"id": "524189bd",
|
"id": "524189bd",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
|
@ -205,7 +206,9 @@
|
||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
"\u001b[36m> Response: That's Llama!\u001b[0m\n"
|
"\u001b[36m> Response: You're thinking of a Llama again!\n",
|
||||||
|
"\n",
|
||||||
|
"Is that correct?\u001b[0m\n"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
@ -250,7 +253,7 @@
|
||||||
" \"content\": 'Generally taller and more robust, commonly seen as guard animals.'\n",
|
" \"content\": 'Generally taller and more robust, commonly seen as guard animals.'\n",
|
||||||
" }\n",
|
" }\n",
|
||||||
"],\n",
|
"],\n",
|
||||||
" model='Llama3.2-11B-Vision-Instruct',\n",
|
" model_id=MODEL_NAME,\n",
|
||||||
")\n",
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"cprint(f'> Response: {response.completion_message.content}', 'cyan')"
|
"cprint(f'> Response: {response.completion_message.content}', 'cyan')"
|
||||||
|
@ -269,7 +272,7 @@
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"kernelspec": {
|
"kernelspec": {
|
||||||
"display_name": "Python 3 (ipykernel)",
|
"display_name": "base",
|
||||||
"language": "python",
|
"language": "python",
|
||||||
"name": "python3"
|
"name": "python3"
|
||||||
},
|
},
|
||||||
|
@ -283,7 +286,7 @@
|
||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.10.15"
|
"version": "3.12.2"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue