forked from phoenix/litellm-mirror
docs(routing.md): add contributor s/o
This commit is contained in:
parent
8c104e9c6a
commit
33d823a18d
2 changed files with 4 additions and 1 deletions
|
@ -8,6 +8,9 @@ Use this if you're trying to load-balance across multiple deployments (e.g. Azur
|
||||||
|
|
||||||
In production, [Router connects to a Redis Cache](#redis-queue) to track usage across multiple deployments.
|
In production, [Router connects to a Redis Cache](#redis-queue) to track usage across multiple deployments.
|
||||||
|
|
||||||
|
|
||||||
|
(s/o [@paulpierre](https://www.linkedin.com/in/paulpierre/) for his contribution to this implementation)
|
||||||
|
|
||||||
### Quick Start
|
### Quick Start
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
|
|
@ -19,7 +19,7 @@ telemetry = True
|
||||||
max_tokens = 256 # OpenAI Defaults
|
max_tokens = 256 # OpenAI Defaults
|
||||||
drop_params = False
|
drop_params = False
|
||||||
retry = True
|
retry = True
|
||||||
request_timeout: float = 600
|
request_timeout: float = 6000
|
||||||
api_key: Optional[str] = None
|
api_key: Optional[str] = None
|
||||||
openai_key: Optional[str] = None
|
openai_key: Optional[str] = None
|
||||||
azure_key: Optional[str] = None
|
azure_key: Optional[str] = None
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue