mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-16 06:53:47 +00:00
standardized port and also included pre-req for all notebooks
This commit is contained in:
parent
d0baf24999
commit
b556cd91fd
8 changed files with 177 additions and 42 deletions
|
@ -7,6 +7,8 @@
|
|||
"source": [
|
||||
"## Getting Started with LlamaStack Vision API\n",
|
||||
"\n",
|
||||
"Before you begin, please ensure Llama Stack is installed and set up by following the [Getting Started Guide](https://llama-stack.readthedocs.io/en/latest/getting_started/index.html).\n",
|
||||
"\n",
|
||||
"Let's import the necessary packages"
|
||||
]
|
||||
},
|
||||
|
@ -37,7 +39,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": null,
|
||||
"id": "1d293479-9dde-4b68-94ab-d0c4c61ab08c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
|
@ -65,33 +67,33 @@
|
|||
"def encode_image_to_data_url(file_path: str) -> str:\n",
|
||||
" \"\"\"\n",
|
||||
" Encode an image file to a data URL.\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" file_path (str): Path to the image file\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" Returns:\n",
|
||||
" str: Data URL string\n",
|
||||
" \"\"\"\n",
|
||||
" mime_type, _ = mimetypes.guess_type(file_path)\n",
|
||||
" if mime_type is None:\n",
|
||||
" raise ValueError(\"Could not determine MIME type of the file\")\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" with open(file_path, \"rb\") as image_file:\n",
|
||||
" encoded_string = base64.b64encode(image_file.read()).decode(\"utf-8\")\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" return f\"data:{mime_type};base64,{encoded_string}\"\n",
|
||||
"\n",
|
||||
"async def process_image(client: LlamaStackClient, image_path: str, stream: bool = True):\n",
|
||||
" \"\"\"\n",
|
||||
" Process an image through the LlamaStack Vision API.\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" Args:\n",
|
||||
" client (LlamaStackClient): Initialized client\n",
|
||||
" image_path (str): Path to image file\n",
|
||||
" stream (bool): Whether to stream the response\n",
|
||||
" \"\"\"\n",
|
||||
" data_url = encode_image_to_data_url(image_path)\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" message = UserMessage(\n",
|
||||
" role=\"user\",\n",
|
||||
" content=[\n",
|
||||
|
@ -99,14 +101,14 @@
|
|||
" \"Describe what is in this image.\",\n",
|
||||
" ],\n",
|
||||
" )\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" cprint(f\"User> Sending image for analysis...\", \"green\")\n",
|
||||
" response = client.inference.chat_completion(\n",
|
||||
" messages=[message],\n",
|
||||
" model=\"Llama3.2-11B-Vision-Instruct\",\n",
|
||||
" stream=stream,\n",
|
||||
" )\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" if not stream:\n",
|
||||
" cprint(f\"> Response: {response}\", \"cyan\")\n",
|
||||
" else:\n",
|
||||
|
@ -146,10 +148,10 @@
|
|||
" client = LlamaStackClient(\n",
|
||||
" base_url=f\"http://{HOST}:{PORT}\",\n",
|
||||
" )\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" # Process image\n",
|
||||
" await process_image(client, \"logo.png\")\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" # Query available models\n",
|
||||
" models_response = client.models.list()\n",
|
||||
" print(\"\\nAvailable Models:\")\n",
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue