diff --git a/docs/my-website/docs/observability/humanloop.md b/docs/my-website/docs/observability/humanloop.md
new file mode 100644
index 0000000000..2c73699cb3
--- /dev/null
+++ b/docs/my-website/docs/observability/humanloop.md
@@ -0,0 +1,176 @@
+import Image from '@theme/IdealImage';
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+# Humanloop
+
+[Humanloop](https://humanloop.com/docs/v5/getting-started/overview) enables product teams to build robust AI features with LLMs, using best-in-class tooling for Evaluation, Prompt Management, and Observability.
+
+
+## Getting Started
+
+Use Humanloop to manage prompts across all LiteLLM Providers.
+
+
+
+
+
+
+
+```python
+import os
+import litellm
+
+os.environ["HUMANLOOP_API_KEY"] = "" # [OPTIONAL] set here or in `.completion`
+
+litellm.set_verbose = True # see raw request to provider
+
+resp = litellm.completion(
+ model="humanloop/gpt-3.5-turbo",
+ prompt_id="test-chat-prompt",
+ prompt_variables={"user_message": "this is used"}, # [OPTIONAL]
+ messages=[{"role": "user", "content": ""}],
+ # humanloop_api_key="..." ## alternative to setting env var
+)
+```
+
+
+
+
+
+
+1. Setup config.yaml
+
+```yaml
+model_list:
+ - model_name: gpt-3.5-turbo
+ litellm_params:
+ model: humanloop/gpt-3.5-turbo
+ prompt_id: ""
+ api_key: os.environ/OPENAI_API_KEY
+```
+
+2. Start the proxy
+
+```bash
+litellm --config config.yaml --detailed_debug
+```
+
+3. Test it!
+
+
+
+
+```bash
+curl -L -X POST 'http://0.0.0.0:4000/v1/chat/completions' \
+-H 'Content-Type: application/json' \
+-H 'Authorization: Bearer sk-1234' \
+-d '{
+ "model": "gpt-3.5-turbo",
+ "messages": [
+ {
+ "role": "user",
+ "content": "THIS WILL BE IGNORED"
+ }
+ ],
+ "prompt_variables": {
+ "key": "this is used"
+ }
+}'
+```
+
+
+
+```python
+import openai
+client = openai.OpenAI(
+ api_key="anything",
+ base_url="http://0.0.0.0:4000"
+)
+
+# request sent to model set on litellm proxy, `litellm --model`
+response = client.chat.completions.create(
+ model="gpt-3.5-turbo",
+ messages = [
+ {
+ "role": "user",
+ "content": "this is a test request, write a short poem"
+ }
+ ],
+ extra_body={
+ "prompt_variables": { # [OPTIONAL]
+ "key": "this is used"
+ }
+ }
+)
+
+print(response)
+```
+
+
+
+
+
+
+
+
+**Expected Logs:**
+
+```
+POST Request Sent from LiteLLM:
+curl -X POST \
+https://api.openai.com/v1/ \
+-d '{'model': 'gpt-3.5-turbo', 'messages': }'
+```
+
+## How to set model
+
+
+## How to set model
+
+### Set the model on LiteLLM
+
+You can do `humanloop/`
+
+
+
+
+```python
+litellm.completion(
+ model="humanloop/gpt-3.5-turbo", # or `humanloop/anthropic/claude-3-5-sonnet`
+ ...
+)
+```
+
+
+
+
+```yaml
+model_list:
+ - model_name: gpt-3.5-turbo
+ litellm_params:
+ model: humanloop/gpt-3.5-turbo # OR humanloop/anthropic/claude-3-5-sonnet
+ prompt_id:
+ api_key: os.environ/OPENAI_API_KEY
+```
+
+
+
+
+### Set the model on Humanloop
+
+LiteLLM will call humanloop's `https://api.humanloop.com/v5/prompts/` endpoint, to get the prompt template.
+
+This also returns the template model set on Humanloop.
+
+```bash
+{
+ "template": [
+ {
+ ... # your prompt template
+ }
+ ],
+ "model": "gpt-3.5-turbo" # your template model
+}
+```
+
diff --git a/docs/my-website/docs/proxy/prompt_management.md b/docs/my-website/docs/proxy/prompt_management.md
index 328a73b8eb..34902f3d0b 100644
--- a/docs/my-website/docs/proxy/prompt_management.md
+++ b/docs/my-website/docs/proxy/prompt_management.md
@@ -8,6 +8,7 @@ Run experiments or change the specific model (e.g. from gpt-4o to gpt4o-mini fin
Supported Integrations:
- [Langfuse](https://langfuse.com/docs/prompts/get-started)
+- [Humanloop](../observability/humanloop)
## Quick Start
@@ -173,7 +174,6 @@ model_list:
- `prompt_variables`: A dictionary of variables that will be used to replace parts of the prompt.
-
## What is 'prompt_id'?
- `prompt_id`: The ID of the prompt that will be used for the request.
diff --git a/docs/my-website/sidebars.js b/docs/my-website/sidebars.js
index a3d1c602ed..bead129921 100644
--- a/docs/my-website/sidebars.js
+++ b/docs/my-website/sidebars.js
@@ -390,6 +390,7 @@ const sidebars = {
"debugging/local_debugging",
"observability/raw_request_response",
"observability/custom_callback",
+ "observability/humanloop",
"observability/scrub_data",
"observability/braintrust",
"observability/sentry",