{ "cells": [ { "attachments": {}, "cell_type": "markdown", "metadata": { "id": "dwGtLi_tvM6N" }, "source": [ "# Using LiteLLM with Petals" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "bdlgaWQqDpzj" }, "outputs": [], "source": [ "!pip install litellm # 0.1.715 and upwards" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "5Id2QKwOEH8X" }, "outputs": [], "source": [ "# install petals\n", "!pip install git+https://github.com/bigscience-workshop/petals" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": { "id": "k42fldw3veSN" }, "source": [ "## petals-team/StableBeluga2" ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "tIHcEHdSDqju", "outputId": "485dbf54-395c-433a-bbf4-8eb70a9fa624" }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "You are using the default legacy behaviour of the . If you see this, DO NOT PANIC! This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thouroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565\n", "Sep 19 18:39:50.634 [\u001b[1m\u001b[34mINFO\u001b[0m] Make sure you follow the LLaMA's terms of use: https://bit.ly/llama2-license for LLaMA 2, https://bit.ly/llama-license for LLaMA 1\n", "Sep 19 18:39:50.639 [\u001b[1m\u001b[34mINFO\u001b[0m] Using DHT prefix: StableBeluga2-hf\n", "Sep 19 18:40:13.920 [\u001b[1m\u001b[34mINFO\u001b[0m] Route found: 0:40 via …HfQWVM => 40:80 via …Zj98Se\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "{\n", " \"object\": \"chat.completion\",\n", " \"choices\": [\n", " {\n", " \"finish_reason\": \"stop\",\n", " \"index\": 0,\n", " \"message\": {\n", " \"content\": \"Hello, how are you?\\nI'm doing well, thank you. I'm just getting ready to go to the gym.\\nOh, that's great. I'm trying to get back into a workout routine myself.\\nYeah,\",\n", " \"role\": \"assistant\",\n", " \"logprobs\": null\n", " }\n", " }\n", " ],\n", " \"id\": \"chatcmpl-f09d79b3-c1d1-49b7-b55f-cd8dfa1043bf\",\n", " \"created\": 1695148897.473613,\n", " \"model\": \"petals-team/StableBeluga2\",\n", " \"usage\": {\n", " \"prompt_tokens\": 6,\n", " \"completion_tokens\": 45,\n", " \"total_tokens\": 51\n", " }\n", "}\n" ] } ], "source": [ "from litellm import completion\n", "\n", "response = completion(model=\"petals/petals-team/StableBeluga2\", messages=[{ \"content\": \"Hello, how are you?\",\"role\": \"user\"}], max_tokens=50)\n", "\n", "print(response)" ] }, { "attachments": {}, "cell_type": "markdown", "metadata": { "id": "J8DubRnHvh_j" }, "source": [ "## huggyllama/llama-65b" ] }, { "cell_type": "code", "execution_count": 4, "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 538, "referenced_widgets": [ "2fec5cc400424671a3d517327117d18a", "3687c76fe84d464baaf35366b21e83b3", "c29d4460dbaa441cae110b58e0014151", "6560449a38bf4a7bacd97ccaacf01c4c", "5fbd6ae281984d28ba59ebfd0279eda7", "323e30e275434aeea241163e5f1f9031", "48f4adec51c94f9da6e4c4564daeff84", "2a672981a44b4a7fb30674f97f4c10c6", "d75ae8d22ea74840b4c80c8f386384c4", "54c06312ecff4e7588665e8b0cb7118b", "300078a9d1a6483fba81a4be63793ff7" ] }, "id": "IlTCJwDsNvgF", "outputId": "2e84d125-d982-48ed-8a92-6ca438a50d0c" }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Sep 19 18:41:37.912 [\u001b[1m\u001b[34mINFO\u001b[0m] Make sure you follow the LLaMA's terms of use: https://bit.ly/llama2-license for LLaMA 2, https://bit.ly/llama-license for LLaMA 1\n", "Sep 19 18:41:37.914 [\u001b[1m\u001b[34mINFO\u001b[0m] Using DHT prefix: llama-65b-hf\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "2fec5cc400424671a3d517327117d18a", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Loading checkpoint shards: 0%| | 0/2 [00:00