mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
docs(image_handling.md): architecture doc on image handling on the proxy
This commit is contained in:
parent
9aef9fdca8
commit
f68e27a0c8
3 changed files with 22 additions and 1 deletions
21
docs/my-website/docs/proxy/image_handling.md
Normal file
21
docs/my-website/docs/proxy/image_handling.md
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
import Image from '@theme/IdealImage';
|
||||||
|
import Tabs from '@theme/Tabs';
|
||||||
|
import TabItem from '@theme/TabItem';
|
||||||
|
|
||||||
|
# Image URL Handling
|
||||||
|
|
||||||
|
<Image img={require('../../img/image_handling.png')} style={{ width: '900px', height: 'auto' }} />
|
||||||
|
|
||||||
|
Some LLM API's don't support url's for images, but do support base-64 strings.
|
||||||
|
|
||||||
|
For those, LiteLLM will:
|
||||||
|
|
||||||
|
1. Detect a URL being passed
|
||||||
|
2. Check if the LLM API supports a URL
|
||||||
|
3. Else, will download the base64
|
||||||
|
4. Send the provider a base64 string.
|
||||||
|
|
||||||
|
|
||||||
|
LiteLLM also caches this result, in-memory to reduce latency for subsequent calls.
|
||||||
|
|
||||||
|
The limit for an in-memory cache is 1MB.
|
BIN
docs/my-website/img/image_handling.png
Normal file
BIN
docs/my-website/img/image_handling.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 66 KiB |
|
@ -53,7 +53,7 @@ const sidebars = {
|
||||||
{
|
{
|
||||||
type: "category",
|
type: "category",
|
||||||
label: "Architecture",
|
label: "Architecture",
|
||||||
items: ["proxy/architecture", "proxy/db_info", "router_architecture", "proxy/user_management_heirarchy", "proxy/jwt_auth_arch"],
|
items: ["proxy/architecture", "proxy/db_info", "router_architecture", "proxy/user_management_heirarchy", "proxy/jwt_auth_arch", "proxy/image_handling"],
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
type: "link",
|
type: "link",
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue