From 33d823a18d2fb66a24923e03b39230a087af140b Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Wed, 15 Nov 2023 13:20:02 -0800 Subject: [PATCH] docs(routing.md): add contributor s/o --- docs/my-website/docs/routing.md | 3 +++ litellm/__init__.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/docs/my-website/docs/routing.md b/docs/my-website/docs/routing.md index 2f19b4bdb..b3a3298de 100644 --- a/docs/my-website/docs/routing.md +++ b/docs/my-website/docs/routing.md @@ -8,6 +8,9 @@ Use this if you're trying to load-balance across multiple deployments (e.g. Azur In production, [Router connects to a Redis Cache](#redis-queue) to track usage across multiple deployments. + +(s/o [@paulpierre](https://www.linkedin.com/in/paulpierre/) for his contribution to this implementation) + ### Quick Start ```python diff --git a/litellm/__init__.py b/litellm/__init__.py index 0dd7e2bd3..fe5d4803d 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -19,7 +19,7 @@ telemetry = True max_tokens = 256 # OpenAI Defaults drop_params = False retry = True -request_timeout: float = 600 +request_timeout: float = 6000 api_key: Optional[str] = None openai_key: Optional[str] = None azure_key: Optional[str] = None