From 65d978622db6378f1dc7fbf06dc6900ac3b6dae0 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Thu, 23 Nov 2023 11:23:04 -0800 Subject: [PATCH] (docs) proxy --- docs/my-website/docs/simple_proxy.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/my-website/docs/simple_proxy.md b/docs/my-website/docs/simple_proxy.md index 364b2241ab..aa699b30e0 100644 --- a/docs/my-website/docs/simple_proxy.md +++ b/docs/my-website/docs/simple_proxy.md @@ -7,8 +7,7 @@ import TabItem from '@theme/TabItem'; LiteLLM Server manages: * Calling 100+ LLMs [Huggingface/Bedrock/TogetherAI/etc.](#other-supported-models) in the OpenAI `ChatCompletions` & `Completions` format -* Authentication - [Virtual Keys](#managing-auth---virtual-keys) -* Set custom prompt templates + model-specific configs (`temperature`, `max_tokens`, etc.) +* Authentication & Spend Tracking [Virtual Keys](#managing-auth---virtual-keys) * Load balancing - Routing between [Multiple Models](#multiple-models---quick-start) + [Deployments of the same model](#multiple-instances-of-1-model) [**See LiteLLM Proxy code**](https://github.com/BerriAI/litellm/tree/main/litellm/proxy)