From 408b9a4e22df55052a5ac1777c9eebcefefb8ba9 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 15 Aug 2023 15:28:34 -0700 Subject: [PATCH] fix docs hierarchy --- cookbook/TogetherAI_liteLLM.ipynb | 111 ++++++++++--------- docs/my-website/docs/completion/supported.md | 2 +- 2 files changed, 58 insertions(+), 55 deletions(-) diff --git a/cookbook/TogetherAI_liteLLM.ipynb b/cookbook/TogetherAI_liteLLM.ipynb index 419bbfdce..b81659576 100644 --- a/cookbook/TogetherAI_liteLLM.ipynb +++ b/cookbook/TogetherAI_liteLLM.ipynb @@ -1,72 +1,56 @@ { - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "provenance": [] - }, - "kernelspec": { - "name": "python3", - "display_name": "Python 3" - }, - "language_info": { - "name": "python" - } - }, "cells": [ { + "attachments": {}, "cell_type": "markdown", + "metadata": { + "id": "WemkFEdDAnJL" + }, "source": [ "## liteLLM Together AI Tutorial\n", "https://together.ai/\n" - ], - "metadata": { - "id": "WemkFEdDAnJL" - } + ] }, { "cell_type": "code", - "source": [ - "!pip install litellm==0.1.371" - ], + "execution_count": null, "metadata": { "id": "pc6IO4V99O25" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "!pip install litellm==0.1.371" + ] }, { "cell_type": "code", + "execution_count": 5, + "metadata": { + "id": "TMI3739_9q97" + }, + "outputs": [], "source": [ "import os\n", "from litellm import completion\n", "os.environ[\"TOGETHER_AI_TOKEN\"] = \"\" #@param\n", "user_message = \"Hello, whats the weather in San Francisco??\"\n", "messages = [{ \"content\": user_message,\"role\": \"user\"}]" - ], - "metadata": { - "id": "TMI3739_9q97" - }, - "execution_count": 5, - "outputs": [] + ] }, { + "attachments": {}, "cell_type": "markdown", + "metadata": { + "id": "bEqJ2HHjBJqq" + }, "source": [ "## Calling togethercomputer/llama-2-70b-chat\n", "https://api.together.xyz/playground/chat?model=togethercomputer%2Fllama-2-70b-chat" - ], - "metadata": { - "id": "bEqJ2HHjBJqq" - } + ] }, { "cell_type": "code", - "source": [ - "model_name = \"togethercomputer/llama-2-70b-chat\"\n", - "response = completion(model=model_name, messages=messages, together_ai=True)\n", - "print(response)" - ], + "execution_count": 6, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -74,34 +58,34 @@ "id": "Jrrt8puj523f", "outputId": "5a5b5beb-cda3-413e-8e83-4423d392cb44" }, - "execution_count": 6, "outputs": [ { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ "{'choices': [{'finish_reason': 'stop', 'index': 0, 'message': {'role': 'assistant', 'content': \"\\n\\nI'm not able to provide real-time weather information. However, I can suggest\"}}], 'created': 1691629657.9288375, 'model': 'togethercomputer/llama-2-70b-chat', 'usage': {'prompt_tokens': 9, 'completion_tokens': 17, 'total_tokens': 26}}\n" ] } + ], + "source": [ + "model_name = \"togethercomputer/llama-2-70b-chat\"\n", + "response = completion(model=model_name, messages=messages, custom_llm_provider=\"together_ai\")\n", + "print(response)" ] }, { + "attachments": {}, "cell_type": "markdown", - "source": [ - "## With Streaming" - ], "metadata": { "id": "sfWtgf-mBQcM" - } + }, + "source": [ + "## With Streaming" + ] }, { "cell_type": "code", - "source": [ - "response = completion(model=model_name, messages=messages, together_ai=True, stream=True)\n", - "print(response)\n", - "for chunk in response:\n", - " print(chunk['choices'][0]['delta']) # same as openai format" - ], + "execution_count": 8, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -109,11 +93,10 @@ "id": "wuBhlZtC6MH5", "outputId": "fcb82177-6494-4963-8e37-8716d3b9e616" }, - "execution_count": 8, "outputs": [ { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ "\n", "{'role': 'assistant', 'content': '\\\\n'}\n", @@ -136,7 +119,27 @@ "{'role': 'assistant', 'content': ' can'}\n" ] } + ], + "source": [ + "response = completion(model=model_name, messages=messages, stream=True, custom_llm_provider=\"together_ai\")\n", + "print(response)\n", + "for chunk in response:\n", + " print(chunk['choices'][0]['delta']) # same as openai format" ] } - ] -} \ No newline at end of file + ], + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + }, + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/docs/my-website/docs/completion/supported.md b/docs/my-website/docs/completion/supported.md index f91b5c0eb..7ae64024a 100644 --- a/docs/my-website/docs/completion/supported.md +++ b/docs/my-website/docs/completion/supported.md @@ -9,7 +9,7 @@ | gpt-3.5-turbo-16k-0613 | `completion('gpt-3.5-turbo-16k-0613', messages)` | `os.environ['OPENAI_API_KEY']` | | gpt-4 | `completion('gpt-4', messages)` | `os.environ['OPENAI_API_KEY']` | -## Azure OpenAI Chat Completion Models +### Azure OpenAI Chat Completion Models | Model Name | Function Call | Required OS Variables | |------------------|-----------------------------------------|-------------------------------------------|