diff --git a/docs/my-website/docs/proxy/deploy.md b/docs/my-website/docs/proxy/deploy.md
index 20e108abf..ea8df446e 100644
--- a/docs/my-website/docs/proxy/deploy.md
+++ b/docs/my-website/docs/proxy/deploy.md
@@ -2,7 +2,7 @@ import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
import Image from '@theme/IdealImage';
-# 🐳 Docker, Deployment
+# Docker, Deployment
You can find the Dockerfile to build litellm proxy [here](https://github.com/BerriAI/litellm/blob/main/Dockerfile)
diff --git a/docs/my-website/docs/proxy/docker_quick_start.md b/docs/my-website/docs/proxy/docker_quick_start.md
index 37b251e5a..1343f47b1 100644
--- a/docs/my-website/docs/proxy/docker_quick_start.md
+++ b/docs/my-website/docs/proxy/docker_quick_start.md
@@ -1,3 +1,7 @@
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
# Getting Started - E2E Tutorial
End-to-End tutorial for LiteLLM Proxy to:
@@ -9,7 +13,11 @@ End-to-End tutorial for LiteLLM Proxy to:
## Pre-Requisites
-- Install LiteLLM Docker Image
+- Install LiteLLM Docker Image ** OR ** LiteLLM CLI (pip package)
+
+
+
+
```
docker pull ghcr.io/berriai/litellm:main-latest
@@ -17,6 +25,18 @@ docker pull ghcr.io/berriai/litellm:main-latest
[**See all docker images**](https://github.com/orgs/BerriAI/packages)
+
+
+
+
+```shell
+$ pip install 'litellm[proxy]'
+```
+
+
+
+
+
## 1. Add a model
Control LiteLLM Proxy with a config.yaml file.
@@ -58,6 +78,11 @@ LiteLLM Proxy is 100% OpenAI-compatible. Test your azure model via the `/chat/co
Save your config.yaml from step 1. as `litellm_config.yaml`.
+
+
+
+
+
```bash
docker run \
-v $(pwd)/litellm_config.yaml:/app/config.yaml \
@@ -70,6 +95,20 @@ docker run \
# RUNNING on http://0.0.0.0:4000
```
+
+
+
+
+```shell
+$ litellm --config /app/config.yaml --detailed_debug
+```
+
+
+
+
+
+
+
Confirm your config.yaml got mounted correctly
```bash
diff --git a/docs/my-website/docs/proxy/ip_address.md b/docs/my-website/docs/proxy/ip_address.md
index 31ffd98a4..80d5561da 100644
--- a/docs/my-website/docs/proxy/ip_address.md
+++ b/docs/my-website/docs/proxy/ip_address.md
@@ -1,5 +1,5 @@
-# ✨ IP Address Filtering
+# IP Address Filtering
:::info
diff --git a/docs/my-website/docs/proxy/virtual_keys.md b/docs/my-website/docs/proxy/virtual_keys.md
index d3e67e3ec..3b9a2a03e 100644
--- a/docs/my-website/docs/proxy/virtual_keys.md
+++ b/docs/my-website/docs/proxy/virtual_keys.md
@@ -1,7 +1,7 @@
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
-# 🔑 Virtual Keys
+# Virtual Keys
Track Spend, and control model access via virtual keys for the proxy
:::info
diff --git a/docs/my-website/sidebars.js b/docs/my-website/sidebars.js
index 6da9978ba..7cec9eadf 100644
--- a/docs/my-website/sidebars.js
+++ b/docs/my-website/sidebars.js
@@ -20,19 +20,29 @@ const sidebars = {
{ type: "doc", id: "index" }, // NEW
{
type: "category",
- label: "💥 LiteLLM Proxy Server",
+ label: "LiteLLM Proxy Server",
link: {
type: "generated-index",
- title: "💥 LiteLLM Proxy Server (LLM Gateway)",
+ title: "LiteLLM Proxy Server (LLM Gateway)",
description: `OpenAI Proxy Server (LLM Gateway) to call 100+ LLMs in a unified interface & track spend, set budgets per virtual key/user`,
slug: "/simple_proxy",
},
items: [
- "proxy/quick_start",
- "proxy/docker_quick_start",
- "proxy/deploy",
+ "proxy/docker_quick_start",
+ {
+ type: "category",
+ label: "Setup & Deployment",
+ items: [
+ "proxy/deploy",
+ "proxy/prod",
+ "proxy/configs",
+ "proxy/cli",
+ "proxy/model_management",
+ "proxy/health",
+ "proxy/debugging",
+ ],
+ },
"proxy/demo",
- "proxy/prod",
{
type: "category",
label: "Architecture",
@@ -45,17 +55,24 @@ const sidebars = {
},
"proxy/enterprise",
"proxy/user_keys",
- "proxy/configs",
"proxy/response_headers",
"proxy/reliability",
{
type: "category",
- label: "🔑 Authentication",
- items: ["proxy/virtual_keys", "proxy/token_auth", "proxy/service_accounts", "proxy/access_control","proxy/ip_address"],
+ label: "Authentication",
+ items: [
+ "proxy/virtual_keys",
+ "proxy/token_auth",
+ "proxy/service_accounts",
+ "proxy/access_control",
+ "proxy/ip_address",
+ "proxy/email",
+ "proxy/multiple_admins",
+ ],
},
{
type: "category",
- label: "💸 Spend Tracking + Budgets",
+ label: "Spend Tracking + Budgets",
items: ["proxy/cost_tracking", "proxy/users", "proxy/custom_pricing", "proxy/team_budgets", "proxy/billing", "proxy/customers"],
},
{
@@ -91,7 +108,7 @@ const sidebars = {
},
{
type: "category",
- label: "🛡️ [Beta] Guardrails",
+ label: "[Beta] Guardrails",
items: [
"proxy/guardrails/quick_start",
"proxy/guardrails/aporia_api",
@@ -114,19 +131,13 @@ const sidebars = {
},
"proxy/caching",
"proxy/pass_through",
- "proxy/email",
- "proxy/multiple_admins",
- "proxy/model_management",
- "proxy/health",
- "proxy/debugging",
"proxy/call_hooks",
- "proxy/rules",
- "proxy/cli",
+ "proxy/rules",
]
},
{
type: "category",
- label: "💯 Supported Models & Providers",
+ label: "Supported Models & Providers",
link: {
type: "generated-index",
title: "Providers",
@@ -183,7 +194,6 @@ const sidebars = {
"providers/openrouter",
"providers/palm",
"providers/sambanova",
- // "providers/custom_openai_proxy",
"providers/custom_llm_server",
"providers/petals",
@@ -191,7 +201,7 @@ const sidebars = {
},
{
type: "category",
- label: "Chat Completions (litellm.completion + PROXY)",
+ label: "Guides",
link: {
type: "generated-index",
title: "Chat Completions",
@@ -245,7 +255,7 @@ const sidebars = {
"scheduler",
{
type: "category",
- label: "🚅 LiteLLM Python SDK",
+ label: "LiteLLM Python SDK",
items: [
"set_keys",
"completion/token_usage",