mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
(code quality) run ruff rule to ban unused imports (#7313)
* remove unused imports * fix AmazonConverseConfig * fix test * fix import * ruff check fixes * test fixes * fix testing * fix imports
This commit is contained in:
parent
5e344497ce
commit
c7f14e936a
347 changed files with 5473 additions and 7207 deletions
315
cookbook/LiteLLM_batch_completion.ipynb
vendored
315
cookbook/LiteLLM_batch_completion.ipynb
vendored
|
@ -1,166 +1,163 @@
|
|||
{
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"name": "python3",
|
||||
"display_name": "Python 3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "MbLbs1tbISk-"
|
||||
},
|
||||
"source": [
|
||||
"# LiteLLM Batch Completions Example\n",
|
||||
"\n",
|
||||
"* This tutorial walks through using `batch_completion`\n",
|
||||
"* Docs: https://docs.litellm.ai/docs/completion/batching"
|
||||
]
|
||||
},
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"# LiteLLM Batch Completions Example\n",
|
||||
"\n",
|
||||
"* This tutorial walks through using `batch_completion`\n",
|
||||
"* Docs: https://docs.litellm.ai/docs/completion/batching"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "MbLbs1tbISk-"
|
||||
}
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "Ty6-ko_aDlPF"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install litellm"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "KGhNJRUCIh1j"
|
||||
},
|
||||
"source": [
|
||||
"## Import Batch Completion"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {
|
||||
"id": "LOtI43snDrSK"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"from litellm import batch_completion\n",
|
||||
"\n",
|
||||
"# set your API_KEY\n",
|
||||
"os.environ['ANTHROPIC_API_KEY'] = \"\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "Xhv92NBaIpaw"
|
||||
},
|
||||
"source": [
|
||||
"## Calling `litellm.batch_completion`\n",
|
||||
"\n",
|
||||
"In the batch_completion method, you provide a list of messages where each sub-list of messages is passed to litellm.completion(), allowing you to process multiple prompts efficiently in a single API call."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "yY7GIRLsDywu",
|
||||
"outputId": "009ea67f-95d5-462b-947f-b0d21e60c5bb"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"id": "Ty6-ko_aDlPF"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install litellm"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## Import Batch Completion"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "KGhNJRUCIh1j"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"import litellm\n",
|
||||
"import os\n",
|
||||
"from litellm import batch_completion\n",
|
||||
"\n",
|
||||
"# set your API_KEY\n",
|
||||
"os.environ['ANTHROPIC_API_KEY'] = \"\""
|
||||
],
|
||||
"metadata": {
|
||||
"id": "LOtI43snDrSK"
|
||||
},
|
||||
"execution_count": 7,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"source": [
|
||||
"## Calling `litellm.batch_completion`\n",
|
||||
"\n",
|
||||
"In the batch_completion method, you provide a list of messages where each sub-list of messages is passed to litellm.completion(), allowing you to process multiple prompts efficiently in a single API call."
|
||||
],
|
||||
"metadata": {
|
||||
"id": "Xhv92NBaIpaw"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"import litellm\n",
|
||||
"import os\n",
|
||||
"from litellm import batch_completion\n",
|
||||
"\n",
|
||||
"os.environ['ANTHROPIC_API_KEY'] = \"\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"responses = batch_completion(\n",
|
||||
" model=\"claude-2\",\n",
|
||||
" messages = [\n",
|
||||
" [\n",
|
||||
" {\n",
|
||||
" \"role\": \"user\",\n",
|
||||
" \"content\": \"good morning? \"\n",
|
||||
" }\n",
|
||||
" ],\n",
|
||||
" [\n",
|
||||
" {\n",
|
||||
" \"role\": \"user\",\n",
|
||||
" \"content\": \"what's the time? \"\n",
|
||||
" }\n",
|
||||
" ]\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"responses"
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
},
|
||||
"id": "yY7GIRLsDywu",
|
||||
"outputId": "009ea67f-95d5-462b-947f-b0d21e60c5bb"
|
||||
},
|
||||
"execution_count": 11,
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "execute_result",
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[<ModelResponse at 0x7a164eed4450> JSON: {\n",
|
||||
" \"choices\": [\n",
|
||||
" {\n",
|
||||
" \"finish_reason\": \"stop\",\n",
|
||||
" \"index\": 0,\n",
|
||||
" \"message\": {\n",
|
||||
" \"content\": \" Good morning!\",\n",
|
||||
" \"role\": \"assistant\",\n",
|
||||
" \"logprobs\": null\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
" ],\n",
|
||||
" \"created\": 1694030351.309254,\n",
|
||||
" \"model\": \"claude-2\",\n",
|
||||
" \"usage\": {\n",
|
||||
" \"prompt_tokens\": 11,\n",
|
||||
" \"completion_tokens\": 3,\n",
|
||||
" \"total_tokens\": 14\n",
|
||||
" }\n",
|
||||
" },\n",
|
||||
" <ModelResponse at 0x7a164eed5800> JSON: {\n",
|
||||
" \"choices\": [\n",
|
||||
" {\n",
|
||||
" \"finish_reason\": \"stop\",\n",
|
||||
" \"index\": 0,\n",
|
||||
" \"message\": {\n",
|
||||
" \"content\": \" I'm an AI assistant created by Anthropic. I don't actually have a concept of the current time.\",\n",
|
||||
" \"role\": \"assistant\",\n",
|
||||
" \"logprobs\": null\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
" ],\n",
|
||||
" \"created\": 1694030352.1215081,\n",
|
||||
" \"model\": \"claude-2\",\n",
|
||||
" \"usage\": {\n",
|
||||
" \"prompt_tokens\": 13,\n",
|
||||
" \"completion_tokens\": 22,\n",
|
||||
" \"total_tokens\": 35\n",
|
||||
" }\n",
|
||||
" }]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"execution_count": 11
|
||||
}
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[<ModelResponse at 0x7a164eed4450> JSON: {\n",
|
||||
" \"choices\": [\n",
|
||||
" {\n",
|
||||
" \"finish_reason\": \"stop\",\n",
|
||||
" \"index\": 0,\n",
|
||||
" \"message\": {\n",
|
||||
" \"content\": \" Good morning!\",\n",
|
||||
" \"role\": \"assistant\",\n",
|
||||
" \"logprobs\": null\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
" ],\n",
|
||||
" \"created\": 1694030351.309254,\n",
|
||||
" \"model\": \"claude-2\",\n",
|
||||
" \"usage\": {\n",
|
||||
" \"prompt_tokens\": 11,\n",
|
||||
" \"completion_tokens\": 3,\n",
|
||||
" \"total_tokens\": 14\n",
|
||||
" }\n",
|
||||
" },\n",
|
||||
" <ModelResponse at 0x7a164eed5800> JSON: {\n",
|
||||
" \"choices\": [\n",
|
||||
" {\n",
|
||||
" \"finish_reason\": \"stop\",\n",
|
||||
" \"index\": 0,\n",
|
||||
" \"message\": {\n",
|
||||
" \"content\": \" I'm an AI assistant created by Anthropic. I don't actually have a concept of the current time.\",\n",
|
||||
" \"role\": \"assistant\",\n",
|
||||
" \"logprobs\": null\n",
|
||||
" }\n",
|
||||
" }\n",
|
||||
" ],\n",
|
||||
" \"created\": 1694030352.1215081,\n",
|
||||
" \"model\": \"claude-2\",\n",
|
||||
" \"usage\": {\n",
|
||||
" \"prompt_tokens\": 13,\n",
|
||||
" \"completion_tokens\": 22,\n",
|
||||
" \"total_tokens\": 35\n",
|
||||
" }\n",
|
||||
" }]"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
]
|
||||
],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ['ANTHROPIC_API_KEY'] = \"\"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"responses = batch_completion(\n",
|
||||
" model=\"claude-2\",\n",
|
||||
" messages = [\n",
|
||||
" [\n",
|
||||
" {\n",
|
||||
" \"role\": \"user\",\n",
|
||||
" \"content\": \"good morning? \"\n",
|
||||
" }\n",
|
||||
" ],\n",
|
||||
" [\n",
|
||||
" {\n",
|
||||
" \"role\": \"user\",\n",
|
||||
" \"content\": \"what's the time? \"\n",
|
||||
" }\n",
|
||||
" ]\n",
|
||||
" ]\n",
|
||||
")\n",
|
||||
"responses"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"provenance": []
|
||||
},
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"name": "python"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue