This commit is contained in:
ishaan-jaff 2023-09-20 21:43:03 -07:00
parent 8a21e75700
commit d9b9cbdc7e

179
cookbook/LiteLLM_OpenRouter.ipynb vendored Normal file
View file

@ -0,0 +1,179 @@
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": []
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
}
},
"cells": [
{
"cell_type": "markdown",
"source": [
"# LiteLLM OpenRouter Cookbook"
],
"metadata": {
"id": "iFEmsVJI_2BR"
}
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "cBlUhCEP_xj4"
},
"outputs": [],
"source": [
"!pip install litellm"
]
},
{
"cell_type": "code",
"source": [
"import os\n",
"\n",
"os.environ['OPENROUTER_API_KEY'] = \"\""
],
"metadata": {
"id": "p-MQqWOT_1a7"
},
"execution_count": 14,
"outputs": []
},
{
"cell_type": "code",
"source": [
"from litellm import completion\n",
"response = completion(\n",
" model=\"openrouter/google/palm-2-chat-bison\",\n",
" messages=[{\"role\": \"user\", \"content\": \"write code for saying hi\"}]\n",
")\n",
"response"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "Ze8JqMqWAARO",
"outputId": "64f3e836-69fa-4f8e-fb35-088a913bbe98"
},
"execution_count": 11,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"<OpenAIObject id=gen-W8FTMSIEorCp3vG5iYIgNMR4IeBv at 0x7c3dcef1f060> JSON: {\n",
" \"id\": \"gen-W8FTMSIEorCp3vG5iYIgNMR4IeBv\",\n",
" \"model\": \"chat-bison@001\",\n",
" \"choices\": [\n",
" {\n",
" \"message\": {\n",
" \"role\": \"assistant\",\n",
" \"content\": \"```\\n#include <stdio.h>\\n\\nint main() {\\n printf(\\\"Hi!\\\\n\\\");\\n return 0;\\n}\\n```\"\n",
" }\n",
" }\n",
" ],\n",
" \"response_ms\": 7817.777999999999\n",
"}"
]
},
"metadata": {},
"execution_count": 11
}
]
},
{
"cell_type": "code",
"source": [
"response = completion(\n",
" model=\"openrouter/anthropic/claude-2\",\n",
" messages=[{\"role\": \"user\", \"content\": \"write code for saying hi\"}]\n",
")\n",
"response"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "-LnhELrnAM_J",
"outputId": "d51c7ab7-d761-4bd1-f849-1534d9df4cd0"
},
"execution_count": 12,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"<OpenAIObject id=gen-IiuV7ZNimDufVeutBHrl8ajPuzEh at 0x7c3dcea67560> JSON: {\n",
" \"choices\": [\n",
" {\n",
" \"message\": {\n",
" \"role\": \"assistant\",\n",
" \"content\": \" Here is some simple code to print \\\"Hi\\\":\\n\\n```python\\nprint(\\\"Hi\\\")\\n```\\n\\nThis uses the print() function in Python to output the text \\\"Hi\\\".\"\n",
" },\n",
" \"finish_reason\": \"stop_sequence\"\n",
" }\n",
" ],\n",
" \"model\": \"claude-2.0\",\n",
" \"id\": \"gen-IiuV7ZNimDufVeutBHrl8ajPuzEh\",\n",
" \"response_ms\": 8112.443000000001\n",
"}"
]
},
"metadata": {},
"execution_count": 12
}
]
},
{
"cell_type": "code",
"source": [
"response = completion(\n",
" model=\"openrouter/meta-llama/llama-2-70b-chat\",\n",
" messages=[{\"role\": \"user\", \"content\": \"write code for saying hi\"}]\n",
")\n",
"response"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "dJBOUYdwCEn1",
"outputId": "ffa18679-ec15-4dad-fe2b-68665cdf36b0"
},
"execution_count": 13,
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"<OpenAIObject id=gen-PyMd3yyJ0aQsCgIY9R8XGZoAtPbl at 0x7c3dceefcae0> JSON: {\n",
" \"id\": \"gen-PyMd3yyJ0aQsCgIY9R8XGZoAtPbl\",\n",
" \"model\": \"togethercomputer/llama-2-70b-chat\",\n",
" \"choices\": [\n",
" {\n",
" \"message\": {\n",
" \"role\": \"assistant\",\n",
" \"content\": \"*gives a sly smile as they type*\\n\\nHey there, handsome. \\ud83d\\ude0f\\n\\nWhat brings you to my neck of the woods today? \\ud83d\\ude18\"\n",
" }\n",
" }\n",
" ],\n",
" \"response_ms\": 9618.775\n",
"}"
]
},
"metadata": {},
"execution_count": 13
}
]
}
]
}