mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-24 10:14:26 +00:00
update hf cookbook
This commit is contained in:
parent
c7dcbf2933
commit
d4df9ae276
1 changed files with 0 additions and 153 deletions
153
cookbook/liteLLM_Hugging_Face_Example.ipynb
vendored
153
cookbook/liteLLM_Hugging_Face_Example.ipynb
vendored
|
@ -1,153 +0,0 @@
|
|||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## Install liteLLM https://github.com/BerriAI/litellm\n",
|
||||
"liteLLM provides one interface to call gpt 3.5, hugging face inference endpoints"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "IGQZtR61AZSd"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "x_4jcmmXcdm-",
|
||||
"outputId": "c89e7817-561d-4867-904b-aa1634565cbb"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"Requirement already satisfied: litellm==0.1.362 in /usr/local/lib/python3.10/dist-packages (0.1.362)\n",
|
||||
"Requirement already satisfied: openai<0.28.0,>=0.27.8 in /usr/local/lib/python3.10/dist-packages (from litellm==0.1.362) (0.27.8)\n",
|
||||
"Requirement already satisfied: python-dotenv<2.0.0,>=1.0.0 in /usr/local/lib/python3.10/dist-packages (from litellm==0.1.362) (1.0.0)\n",
|
||||
"Requirement already satisfied: tiktoken<0.5.0,>=0.4.0 in /usr/local/lib/python3.10/dist-packages (from litellm==0.1.362) (0.4.0)\n",
|
||||
"Requirement already satisfied: requests>=2.20 in /usr/local/lib/python3.10/dist-packages (from openai<0.28.0,>=0.27.8->litellm==0.1.362) (2.28.2)\n",
|
||||
"Requirement already satisfied: tqdm in /usr/local/lib/python3.10/dist-packages (from openai<0.28.0,>=0.27.8->litellm==0.1.362) (4.65.0)\n",
|
||||
"Requirement already satisfied: aiohttp in /usr/local/lib/python3.10/dist-packages (from openai<0.28.0,>=0.27.8->litellm==0.1.362) (3.8.5)\n",
|
||||
"Requirement already satisfied: regex>=2022.1.18 in /usr/local/lib/python3.10/dist-packages (from tiktoken<0.5.0,>=0.4.0->litellm==0.1.362) (2022.10.31)\n",
|
||||
"Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests>=2.20->openai<0.28.0,>=0.27.8->litellm==0.1.362) (3.2.0)\n",
|
||||
"Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests>=2.20->openai<0.28.0,>=0.27.8->litellm==0.1.362) (3.4)\n",
|
||||
"Requirement already satisfied: urllib3<1.27,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests>=2.20->openai<0.28.0,>=0.27.8->litellm==0.1.362) (1.26.16)\n",
|
||||
"Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests>=2.20->openai<0.28.0,>=0.27.8->litellm==0.1.362) (2023.7.22)\n",
|
||||
"Requirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->openai<0.28.0,>=0.27.8->litellm==0.1.362) (23.1.0)\n",
|
||||
"Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.10/dist-packages (from aiohttp->openai<0.28.0,>=0.27.8->litellm==0.1.362) (6.0.4)\n",
|
||||
"Requirement already satisfied: async-timeout<5.0,>=4.0.0a3 in /usr/local/lib/python3.10/dist-packages (from aiohttp->openai<0.28.0,>=0.27.8->litellm==0.1.362) (4.0.2)\n",
|
||||
"Requirement already satisfied: yarl<2.0,>=1.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->openai<0.28.0,>=0.27.8->litellm==0.1.362) (1.9.2)\n",
|
||||
"Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.10/dist-packages (from aiohttp->openai<0.28.0,>=0.27.8->litellm==0.1.362) (1.4.0)\n",
|
||||
"Requirement already satisfied: aiosignal>=1.1.2 in /usr/local/lib/python3.10/dist-packages (from aiohttp->openai<0.28.0,>=0.27.8->litellm==0.1.362) (1.3.1)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"!pip install litellm==\"0.1.362\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"from litellm import completion\n",
|
||||
"import os\n",
|
||||
"user_message = \"Hello, whats the weather in San Francisco??\"\n",
|
||||
"messages = [{ \"content\": user_message,\"role\": \"user\"}]\n",
|
||||
"\n",
|
||||
"os.environ['HF_TOKEN'] = \"\"#@param\n",
|
||||
"# get your hugging face token from here:\n",
|
||||
"# https://huggingface.co/settings/tokens\n",
|
||||
"\n",
|
||||
"# Optional if you want to run OpenAI TOO\n",
|
||||
"os.environ['OPENAI_API_KEY'] = \"\" #@param\n",
|
||||
"\n",
|
||||
"response = completion(\"stabilityai/stablecode-completion-alpha-3b-4k\", messages=messages, hugging_face=True)\n",
|
||||
"print(\"Response from stabilityai/stablecode-completion-alpha-3b-4k\")\n",
|
||||
"print(response['choices'][0]['message']['content'])\n",
|
||||
"print(\"\\n\\n\")\n",
|
||||
"\n",
|
||||
"response = completion(\"bigcode/starcoder\", messages=messages, hugging_face=True)\n",
|
||||
"print(\"Response from bigcode/starcoder\")\n",
|
||||
"print(response['choices'][0]['message']['content'])\n",
|
||||
"print(\"\\n\\n\")\n",
|
||||
"\n",
|
||||
"response = completion(\"google/flan-t5-xxl\", messages=messages, hugging_face=True)\n",
|
||||
"print(\"Response from google/flan-t5-xxl\")\n",
|
||||
"print(response['choices'][0]['message']['content'])\n",
|
||||
"print(\"\\n\\n\")\n",
|
||||
"\n",
|
||||
"response = completion(\"google/flan-t5-large\", messages=messages, hugging_face=True)\n",
|
||||
"print(\"Response from google/flan-t5-large\")\n",
|
||||
"print(response['choices'][0]['message']['content'])\n",
|
||||
"print(\"\\n\\n\")\n",
|
||||
"\n",
|
||||
"response = completion(model=\"gpt-3.5-turbo\", messages=messages)\n",
|
||||
"print(response['choices'][0]['message']['content'])\n",
|
||||
"print(response)\n"
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "vC54VW3jvLnN",
|
||||
"outputId": "e6616221-12c9-4313-dd03-fd94fa095e8e"
|
||||
},
|
||||
"execution_count": 5,
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": [
|
||||
"Response from stabilityai/stablecode-completion-alpha-3b-4k\n",
|
||||
"Hello, whats the weather in San Francisco??\",\n",
|
||||
" \"id\": 1,\n",
|
||||
" \"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Response from bigcode/starcoder\n",
|
||||
"Hello, whats the weather in San Francisco??\")\n",
|
||||
"\n",
|
||||
"# print(response)\n",
|
||||
"\n",
|
||||
"# print(response.text)\n",
|
||||
"\n",
|
||||
"#\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Response from google/flan-t5-xxl\n",
|
||||
"a little cold\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Response from google/flan-t5-large\n",
|
||||
"cool\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"I'm sorry, but I am an AI language model and do not have real-time data. However, you can check the weather in San Francisco by searching for \"San Francisco weather\" on a search engine or checking a reliable weather website or app.\n"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue