diff --git a/docs/my-website/docs/providers/litellm_proxy.md b/docs/my-website/docs/providers/litellm_proxy.md new file mode 100644 index 000000000..de026967f --- /dev/null +++ b/docs/my-website/docs/providers/litellm_proxy.md @@ -0,0 +1,90 @@ +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# LiteLLM Proxy (LLM Gateway) + + +**[LiteLLM Proxy](../simple_proxy) is OpenAI compatible**, you just need the `openai/` prefix before the model + +:::tip + +[LiteLLM Providers a **self hosted** proxy server (AI Gateway)](../simple_proxy) to call all the LLMs in the OpenAI format + +::: + +## Required Variables + +```python +os.environ["OPENAI_API_KEY"] = "" # "sk-1234" your litellm proxy api key +os.environ["OPENAI_API_BASE"] = "" # "http://localhost:4000" your litellm proxy api base +``` + + +## Usage (Non Streaming) +```python +import os +import litellm +from litellm import completion + +os.environ["OPENAI_API_KEY"] = "" + +# set custom api base to your proxy +# either set .env or litellm.api_base +# os.environ["OPENAI_API_BASE"] = "" +litellm.api_base = "your-openai-proxy-url" + + +messages = [{ "content": "Hello, how are you?","role": "user"}] + +# openai call +response = completion(model="openai/your-model-name", messages) +``` + +## Usage - passing `api_base`, `api_key` per request + +If you need to set api_base dynamically, just pass it in completions instead - completions(...,api_base="your-proxy-api-base") + +```python +import os +import litellm +from litellm import completion + +os.environ["OPENAI_API_KEY"] = "" + +messages = [{ "content": "Hello, how are you?","role": "user"}] + +# openai call +response = completion( + model="openai/your-model-name", + messages, + api_base = "your-litellm-proxy-url", + api_key = "your-litellm-proxy-api-key" +) +``` +## Usage - Streaming + +```python +import os +import litellm +from litellm import completion + +os.environ["OPENAI_API_KEY"] = "" + +messages = [{ "content": "Hello, how are you?","role": "user"}] + +# openai call +response = completion( + model="openai/your-model-name", + messages, + api_base = "your-litellm-proxy-url", + stream=True +) + +for chunk in response: + print(chunk) +``` + + +## **Usage with Langchain, LLamaindex, OpenAI Js, Anthropic SDK, Instructor** + +[Follow this doc to see how to use litellm proxy with langchain, llamaindex, anthropic etc](../proxy/user_keys) \ No newline at end of file diff --git a/docs/my-website/sidebars.js b/docs/my-website/sidebars.js index 8c8c87fb8..beae3544f 100644 --- a/docs/my-website/sidebars.js +++ b/docs/my-website/sidebars.js @@ -128,6 +128,7 @@ const sidebars = { "providers/anthropic", "providers/aws_sagemaker", "providers/bedrock", + "providers/litellm_proxy", "providers/mistral", "providers/codestral", "providers/cohere",