diff --git a/docs/my-website/docs/observability/langsmith_integration.md b/docs/my-website/docs/observability/langsmith_integration.md new file mode 100644 index 000000000..b9d726fdd --- /dev/null +++ b/docs/my-website/docs/observability/langsmith_integration.md @@ -0,0 +1,51 @@ +import Image from '@theme/IdealImage'; + +# Langsmith - Logging LLM Input/Output +An all-in-one developer platform for every step of the application lifecycle +https://smith.langchain.com/ + + + +:::info +We want to learn how we can make the callbacks better! Meet the LiteLLM [founders](https://calendly.com/d/4mp-gd3-k5k/berriai-1-1-onboarding-litellm-hosted-version) or +join our [discord](https://discord.gg/wuPM9dRgDw) +::: + +## Pre-Requisites +```shell +pip install litellm +``` + +## Quick Start +Use just 2 lines of code, to instantly log your responses **across all providers** with Langsmith + + +```python +litellm.success_callback = ["langsmith"] +``` +```python +import litellm +import os + +os.environ["LANGSMITH_API_KEY"] = "" +# LLM API Keys +os.environ['OPENAI_API_KEY']="" + +# set langsmith as a callback, litellm will send the data to langsmith +litellm.success_callback = ["langsmith"] + +# openai call +response = litellm.completion( + model="gpt-3.5-turbo", + messages=[ + {"role": "user", "content": "Hi 👋 - i'm openai"} + ] +) +``` + +## Support & Talk to Founders + +- [Schedule Demo 👋](https://calendly.com/d/4mp-gd3-k5k/berriai-1-1-onboarding-litellm-hosted-version) +- [Community Discord 💭](https://discord.gg/wuPM9dRgDw) +- Our numbers 📞 +1 (770) 8783-106 / ‭+1 (412) 618-6238‬ +- Our emails ✉️ ishaan@berri.ai / krrish@berri.ai \ No newline at end of file diff --git a/docs/my-website/docs/observability/wandb_integration.md b/docs/my-website/docs/observability/wandb_integration.md index d760e44bd..dfa7b46dd 100644 --- a/docs/my-website/docs/observability/wandb_integration.md +++ b/docs/my-website/docs/observability/wandb_integration.md @@ -17,15 +17,13 @@ pip install wandb litellm ``` ## Quick Start -Use just 2 lines of code, to instantly log your responses **across all providers** with Langfuse +Use just 2 lines of code, to instantly log your responses **across all providers** with Weights & Biases - -Get your Langfuse API Keys from https://cloud.langfuse.com/ ```python litellm.success_callback = ["wandb"] ``` ```python -# pip install langfuse +# pip install wandb import litellm import os @@ -33,7 +31,7 @@ os.environ["WANDB_API_KEY"] = "" # LLM API Keys os.environ['OPENAI_API_KEY']="" -# set langfuse as a callback, litellm will send the data to langfuse +# set wandb as a callback, litellm will send the data to Weights & Biases litellm.success_callback = ["wandb"] # openai call diff --git a/docs/my-website/img/langsmith.png b/docs/my-website/img/langsmith.png new file mode 100644 index 000000000..49d572e9e Binary files /dev/null and b/docs/my-website/img/langsmith.png differ diff --git a/docs/my-website/sidebars.js b/docs/my-website/sidebars.js index a62e14236..c405d68ee 100644 --- a/docs/my-website/sidebars.js +++ b/docs/my-website/sidebars.js @@ -118,6 +118,7 @@ const sidebars = { "observability/promptlayer_integration", "observability/wandb_integration", "observability/langfuse_integration", + "observability/langsmith_integration", "observability/traceloop_integration", "observability/llmonitor_integration", "observability/helicone_integration",