mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-15 22:47:59 +00:00
doc enhancement for the inference, prompt engineer, local-cloud
This commit is contained in:
parent
3c707e0a05
commit
e746f741d1
3 changed files with 308 additions and 124 deletions
|
@ -42,7 +42,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 1,
|
||||
"id": "38a39e44",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
|
@ -52,11 +52,9 @@
|
|||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d1d097ab",
|
||||
"cell_type": "markdown",
|
||||
"id": "7dacaa2d-94e9-42e9-82a0-73522dfc7010",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"### 1. Set Up the Client\n",
|
||||
"\n",
|
||||
|
@ -65,7 +63,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 2,
|
||||
"id": "7a573752",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
|
@ -73,7 +71,7 @@
|
|||
"from llama_stack_client import LlamaStackClient\n",
|
||||
"from llama_stack_client.types import SystemMessage, UserMessage\n",
|
||||
"\n",
|
||||
"client = LlamaStackClient(base_url='http://{HOST}:{PORT}')"
|
||||
"client = LlamaStackClient(base_url=f'http://{HOST}:{PORT}')"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -88,10 +86,19 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 3,
|
||||
"id": "77c29dba",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"A gentle llama roams the land,\n",
|
||||
"With soft fur and a gentle hand.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"response = client.inference.chat_completion(\n",
|
||||
" messages=[\n",
|
||||
|
@ -123,10 +130,19 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 4,
|
||||
"id": "5c6812da",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"O, fairest llama, with thy softest fleece,\n",
|
||||
"Thy gentle eyes, like sapphires, in serenity do cease.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"response = client.inference.chat_completion(\n",
|
||||
" messages=[\n",
|
||||
|
@ -151,17 +167,48 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 5,
|
||||
"id": "02211625",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdin",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"User> Write me a 3 sentence poem about alpaca\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[36m> Response: Softly grazing, gentle soul,\n",
|
||||
"Alpaca's fleece, a treasure whole,\n",
|
||||
"In Andean fields, they softly roll.\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdin",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"User> exit\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[33mEnding conversation. Goodbye!\u001b[0m\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import asyncio\n",
|
||||
"from llama_stack_client import LlamaStackClient\n",
|
||||
"from llama_stack_client.types import UserMessage\n",
|
||||
"from termcolor import cprint\n",
|
||||
"\n",
|
||||
"client = LlamaStackClient(base_url='http://{HOST}:{PORT}')\n",
|
||||
"client = LlamaStackClient(base_url=f'http://{HOST}:{PORT}')\n",
|
||||
"\n",
|
||||
"async def chat_loop():\n",
|
||||
" while True:\n",
|
||||
|
@ -177,7 +224,10 @@
|
|||
" )\n",
|
||||
" cprint(f'> Response: {response.completion_message.content}', 'cyan')\n",
|
||||
"\n",
|
||||
"asyncio.run(chat_loop())"
|
||||
"# Run the chat loop in a Jupyter Notebook cell using `await`\n",
|
||||
"await chat_loop()\n",
|
||||
"# To run it in a python file, use this line instead\n",
|
||||
"# asyncio.run(chat_loop())"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -192,10 +242,69 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 6,
|
||||
"id": "9496f75c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdin",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"User> what is 1+1\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[36m> Response: 1 + 1 = 2\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdin",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"User> what is llama + alpaca\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[36m> Response: That's a creative and imaginative question. However, since llamas and alpacas are animals, not numbers, we can't perform a mathematical operation on them.\n",
|
||||
"\n",
|
||||
"But if we were to interpret this as a creative or humorous question, we could say that the result of \"llama + alpaca\" is a fun and fuzzy bundle of South American camelid cuteness!\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdin",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"User> what was the first question\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[36m> Response: The first question was \"what is 1+1\"\u001b[0m\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdin",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"User> exit\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[33mEnding conversation. Goodbye!\u001b[0m\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"async def chat_loop():\n",
|
||||
" conversation_history = []\n",
|
||||
|
@ -217,7 +326,10 @@
|
|||
" assistant_message = UserMessage(content=response.completion_message.content, role='user')\n",
|
||||
" conversation_history.append(assistant_message)\n",
|
||||
"\n",
|
||||
"asyncio.run(chat_loop())"
|
||||
"# Use `await` in the Jupyter Notebook cell to call the function\n",
|
||||
"await chat_loop()\n",
|
||||
"# To run it in a python file, use this line instead\n",
|
||||
"# asyncio.run(chat_loop())"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -234,10 +346,21 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 7,
|
||||
"id": "d119026e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\u001b[32mUser> Write me a 3 sentence poem about llama\u001b[0m\n",
|
||||
"\u001b[36mAssistant> \u001b[0m\u001b[33mSoft\u001b[0m\u001b[33mly\u001b[0m\u001b[33m padded\u001b[0m\u001b[33m feet\u001b[0m\u001b[33m on\u001b[0m\u001b[33m the\u001b[0m\u001b[33m ground\u001b[0m\u001b[33m,\n",
|
||||
"\u001b[0m\u001b[33mA\u001b[0m\u001b[33m gentle\u001b[0m\u001b[33m llama\u001b[0m\u001b[33m's\u001b[0m\u001b[33m peaceful\u001b[0m\u001b[33m sound\u001b[0m\u001b[33m,\n",
|
||||
"\u001b[0m\u001b[33mF\u001b[0m\u001b[33murry\u001b[0m\u001b[33m coat\u001b[0m\u001b[33m and\u001b[0m\u001b[33m calm\u001b[0m\u001b[33m,\u001b[0m\u001b[33m serene\u001b[0m\u001b[33m eyes\u001b[0m\u001b[33m all\u001b[0m\u001b[33m around\u001b[0m\u001b[33m.\u001b[0m\u001b[97m\u001b[0m\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import asyncio\n",
|
||||
"from llama_stack_client import LlamaStackClient\n",
|
||||
|
@ -246,12 +369,12 @@
|
|||
"from termcolor import cprint\n",
|
||||
"\n",
|
||||
"async def run_main(stream: bool = True):\n",
|
||||
" client = LlamaStackClient(base_url='http://{HOST}:{PORT}')\n",
|
||||
" client = LlamaStackClient(base_url=f'http://{HOST}:{PORT}')\n",
|
||||
"\n",
|
||||
" message = UserMessage(\n",
|
||||
" content='hello world, write me a 2 sentence poem about the moon', role='user'\n",
|
||||
" content='Write me a 3 sentence poem about llama', role='user'\n",
|
||||
" )\n",
|
||||
" print(f'User>{message.content}', 'green')\n",
|
||||
" cprint(f'User> {message.content}', 'green')\n",
|
||||
"\n",
|
||||
" response = client.inference.chat_completion(\n",
|
||||
" messages=[message],\n",
|
||||
|
@ -260,22 +383,37 @@
|
|||
" )\n",
|
||||
"\n",
|
||||
" if not stream:\n",
|
||||
" cprint(f'> Response: {response}', 'cyan')\n",
|
||||
" cprint(f'> Response: {response.completion_message.content}', 'cyan')\n",
|
||||
" else:\n",
|
||||
" async for log in EventLogger().log(response):\n",
|
||||
" log.print()\n",
|
||||
"\n",
|
||||
" \n",
|
||||
" models_response = client.models.list()\n",
|
||||
" print(models_response)\n",
|
||||
"\n",
|
||||
"if __name__ == '__main__':\n",
|
||||
" asyncio.run(run_main())"
|
||||
"# In a Jupyter Notebook cell, use `await` to call the function\n",
|
||||
"await run_main()\n",
|
||||
"# To run it in a python file, use this line instead\n",
|
||||
"# asyncio.run(chat_loop())"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.15"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue