{ "nbformat": 4, "nbformat_minor": 0, "metadata": { "colab": { "provenance": [] }, "kernelspec": { "name": "python3", "display_name": "Python 3" }, "language_info": { "name": "python" } }, "cells": [ { "cell_type": "markdown", "source": [ "# Using Nemo-Guardrails with LiteLLM Server\n", "\n", "### Pre-Requisites\n", "\n", "Spin up the LiteLLM Server on port 8000: \n", "`docker run -e PORT=8000 -e AWS_ACCESS_KEY_ID= -e AWS_SECRET_ACCESS_KEY= -p 8000:8000 ghcr.io/berriai/litellm:latest`\n", "\n", "[Call Bedrock, TogetherAI, Huggingface, etc. on the server](https://docs.litellm.ai/docs/providers)" ], "metadata": { "id": "eKXncoQbU_2j" } }, { "cell_type": "code", "source": [ "pip install nemoguardrails langchain" ], "metadata": { "id": "vOUwGSJ2Vsy3" }, "execution_count": 6, "outputs": [] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "xXEJNxe7U0IN" }, "outputs": [], "source": [ "import openai\n", "from langchain.chat_models import ChatOpenAI\n", "\n", "llm = ChatOpenAI(model_name=\"anthropic.claude-v2\", openai_api_base=\"http://0.0.0.0:8000\", openai_api_key=\"my-fake-key\")\n", "\n", "from nemoguardrails import LLMRails, RailsConfig\n", "\n", "config = RailsConfig.from_path(\"./config.yml\")\n", "app = LLMRails(config, llm=llm)\n", "\n", "new_message = app.generate(messages=[{\n", " \"role\": \"user\",\n", " \"content\": \"Hello! What can you do for me?\"\n", "}])" ] } ] }