diff --git a/docs/my-website/docs/observability/callbacks.md b/docs/my-website/docs/observability/callbacks.md
index 4f9e91944..5a01864dd 100644
--- a/docs/my-website/docs/observability/callbacks.md
+++ b/docs/my-website/docs/observability/callbacks.md
@@ -8,6 +8,7 @@ liteLLM supports:
- [LLMonitor](https://llmonitor.com/docs)
- [Helicone](https://docs.helicone.ai/introduction)
+- [Traceloop](https://traceloop.com/docs)
- [Sentry](https://docs.sentry.io/platforms/python/)
- [PostHog](https://posthog.com/docs/libraries/python)
- [Slack](https://slack.dev/bolt-python/concepts)
@@ -25,6 +26,7 @@ litellm.failure_callback=["sentry", "llmonitor"]
os.environ['SENTRY_API_URL'], os.environ['SENTRY_API_TRACE_RATE']= ""
os.environ['POSTHOG_API_KEY'], os.environ['POSTHOG_API_URL'] = "api-key", "api-url"
os.environ["HELICONE_API_KEY"] = ""
+os.environ["TRACELOOP_API_KEY"] = ""
os.environ["LLMONITOR_APP_ID"] = ""
response = completion(model="gpt-3.5-turbo", messages=messages)
diff --git a/docs/my-website/docs/observability/integrations.md b/docs/my-website/docs/observability/integrations.md
index 3f4c8616d..b5d27ccfc 100644
--- a/docs/my-website/docs/observability/integrations.md
+++ b/docs/my-website/docs/observability/integrations.md
@@ -7,4 +7,5 @@
| Sentry | `SENTRY_API_URL` | `litellm.success_callback=["sentry"]` |
| Posthog | `POSTHOG_API_KEY`,`POSTHOG_API_URL` | `litellm.success_callback=["posthog"]` |
| Slack | `SLACK_API_TOKEN`,`SLACK_API_SECRET`,`SLACK_API_CHANNEL` | `litellm.success_callback=["slack"]` |
+| Traceloop | `TRACELOOP_API_TOKEN` | `litellm.success_callback=["traceloop"]` |
| Helicone | `HELICONE_API_TOKEN` | `litellm.success_callback=["helicone"]` |
diff --git a/docs/my-website/docs/observability/traceloop_integration.md b/docs/my-website/docs/observability/traceloop_integration.md
new file mode 100644
index 000000000..2463f40fe
--- /dev/null
+++ b/docs/my-website/docs/observability/traceloop_integration.md
@@ -0,0 +1,34 @@
+# Traceloop Tutorial
+
+[Traceloop](https://traceloop.com) is a platform for monitoring and debugging the quality of your LLM outputs.
+It provides you with a way to track the performance of your LLM application; rollout changes with confidence; and debug issues in production.
+It is based on [OpenTelemetry](https://opentelemetry.io), so it can provide full visibility to your LLM requests, as well vector DB usage, and other infra in your stack.
+
+
+
+## Getting Started
+
+First, sign up to get an API key on the [Traceloop dashboard](https://app.traceloop.com).
+While Traceloop is still in beta, [ping them](nir@traceloop.com) and mention you're using LiteLLM to get your early access code.
+
+Then, install the Traceloop SDK:
+
+```bash
+pip install traceloop
+```
+
+Use just 1 line of code, to instantly log your LLM responses:
+
+```python
+litellm.success_callback = ["traceloop"]
+```
+
+When running your app, make sure to set the `TRACELOOP_API_KEY` environment variable to your API key.
+
+To get better visualizations on how your code behaves, you may want to annotate specific parts of your LLM chain. See [Traceloop docs on decorators](https://traceloop.com/docs/python-sdk/decorators) for more information.
+
+Here's an example PR for such integration: https://github.com/Codium-ai/pr-agent/pull/244
+
+## Support
+
+For any question or issue with integration you can reach out to the Traceloop team on [Slack](https://join.slack.com/t/traceloopcommunity/shared_invite/zt-1plpfpm6r-zOHKI028VkpcWdobX65C~g) or via [email](mailto:dev@traceloop.com).
diff --git a/docs/my-website/img/traceloop_dash.png b/docs/my-website/img/traceloop_dash.png
new file mode 100644
index 000000000..4b0ebd12a
Binary files /dev/null and b/docs/my-website/img/traceloop_dash.png differ
diff --git a/litellm/integrations/traceloop.py b/litellm/integrations/traceloop.py
new file mode 100644
index 000000000..af0e2495c
--- /dev/null
+++ b/litellm/integrations/traceloop.py
@@ -0,0 +1,7 @@
+import sys
+from traceloop.tracing import Tracer
+
+
+class TraceloopLogger:
+ def __init__(self):
+ self.tracer = Tracer.init(app_name=sys.argv[0])
diff --git a/litellm/utils.py b/litellm/utils.py
index fb136a330..f3170d176 100644
--- a/litellm/utils.py
+++ b/litellm/utils.py
@@ -9,6 +9,7 @@ import uuid
encoding = tiktoken.get_encoding("cl100k_base")
import importlib.metadata
+from .integrations.traceloop import TraceloopLogger
from .integrations.helicone import HeliconeLogger
from .integrations.aispend import AISpendLogger
from .integrations.berrispend import BerriSpendLogger
@@ -729,7 +730,7 @@ def load_test_model(
def set_callbacks(callback_list):
- global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, heliconeLogger, aispendLogger, berrispendLogger, supabaseClient, liteDebuggerClient, llmonitorLogger, promptLayerLogger
+ global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, traceloopLogger, heliconeLogger, aispendLogger, berrispendLogger, supabaseClient, liteDebuggerClient, llmonitorLogger, promptLayerLogger
try:
for callback in callback_list:
print_verbose(f"callback: {callback}")
@@ -782,6 +783,8 @@ def set_callbacks(callback_list):
)
alerts_channel = os.environ["SLACK_API_CHANNEL"]
print_verbose(f"Initialized Slack App: {slack_app}")
+ elif callback == "traceloop":
+ traceloopLogger = TraceloopLogger()
elif callback == "helicone":
heliconeLogger = HeliconeLogger()
elif callback == "llmonitor":
diff --git a/mkdocs.yml b/mkdocs.yml
index b3c88c741..c9a6b255d 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -17,6 +17,7 @@ nav:
- Quick Start: advanced.md
- Output Integrations: client_integrations.md
- LLMonitor Tutorial: llmonitor_integration.md
+ - Traceloop Tutorial: traceloop_integration.md
- Helicone Tutorial: helicone_integration.md
- Supabase Tutorial: supabase_integration.md
- BerriSpend Tutorial: berrispend_integration.md
diff --git a/proxy-server/readme.md b/proxy-server/readme.md
index 9c3c13934..0bd45a3b5 100644
--- a/proxy-server/readme.md
+++ b/proxy-server/readme.md
@@ -33,7 +33,7 @@
- Call all models using the OpenAI format - `completion(model, messages)`
- Text responses will always be available at `['choices'][0]['message']['content']`
- **Error Handling** Using Model Fallbacks (if `GPT-4` fails, try `llama2`)
-- **Logging** - Log Requests, Responses and Errors to `Supabase`, `Posthog`, `Mixpanel`, `Sentry`, `LLMonitor`, `Helicone` (Any of the supported providers here: https://litellm.readthedocs.io/en/latest/advanced/
+- **Logging** - Log Requests, Responses and Errors to `Supabase`, `Posthog`, `Mixpanel`, `Sentry`, `LLMonitor`, `Traceloop`, `Helicone` (Any of the supported providers here: https://litellm.readthedocs.io/en/latest/advanced/
**Example: Logs sent to Supabase**