diff --git a/docs/my-website/docs/simple_proxy.md b/docs/my-website/docs/simple_proxy.md index b8501c8df..bf8c525eb 100644 --- a/docs/my-website/docs/simple_proxy.md +++ b/docs/my-website/docs/simple_proxy.md @@ -121,6 +121,72 @@ $ docker run -e PORT=8000 -e COHERE_API_KEY= -p 8000:8000 ghcr.io/ +## Tutorials (Chat-UI, NeMO-Guardrails, etc.) + + + + +Here's the `docker-compose.yml` for running LiteLLM Server with Mckay Wrigley's Chat-UI: +```yaml +version: '3' +services: + container1: + image: ghcr.io/berriai/litellm:latest + ports: + - '8000:8000' + environment: + - PORT=8000 + - OPENAI_API_KEY=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 + + container2: + image: ghcr.io/mckaywrigley/chatbot-ui:main + ports: + - '3000:3000' + environment: + - OPENAI_API_KEY=my-fake-key + - OPENAI_API_HOST=http://container1:8000 +``` + +Run this via: +```shell +docker-compose up +``` + + + +#### Adding NeMO-Guardrails to Bedrock + +1. Start server +```shell +`docker run -e PORT=8000 -e AWS_ACCESS_KEY_ID= -e AWS_SECRET_ACCESS_KEY= -p 8000:8000 ghcr.io/berriai/litellm:latest` +``` + +2. Install dependencies +```shell +pip install nemoguardrails langchain +``` + +3. Run script +```python +import openai +from langchain.chat_models import ChatOpenAI + +llm = ChatOpenAI(model_name="bedrock/anthropic.claude-v2", openai_api_base="http://0.0.0.0:8000", openai_api_key="my-fake-key") + +from nemoguardrails import LLMRails, RailsConfig + +config = RailsConfig.from_path("./config.yml") +app = LLMRails(config, llm=llm) + +new_message = app.generate(messages=[{ + "role": "user", + "content": "Hello! What can you do for me?" +}]) +``` + + + + ## Endpoints: - `/chat/completions` - chat completions endpoint to call 100+ LLMs - `/embeddings` - embedding endpoint for Azure, OpenAI, Huggingface endpoints @@ -193,38 +259,6 @@ Docker command: docker run -e LANGFUSE_PUBLIC_KEY= -e LANGFUSE_SECRET_KEY= -e LANGFUSE_HOST= -e PORT=8000 -p 8000:8000 ghcr.io/berriai/litellm:latest ``` -## Tutorials - - - -Here's the `docker-compose.yml` for running LiteLLM Server with Mckay Wrigley's Chat-UI: -```yaml -version: '3' -services: - container1: - image: ghcr.io/berriai/litellm:latest - ports: - - '8000:8000' - environment: - - PORT=8000 - - OPENAI_API_KEY=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 - - container2: - image: ghcr.io/mckaywrigley/chatbot-ui:main - ports: - - '3000:3000' - environment: - - OPENAI_API_KEY=my-fake-key - - OPENAI_API_HOST=http://container1:8000 -``` - -Run this via: -```shell -docker-compose up -``` - - - ## Local Usage ```shell