mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
fix(router.py): enabling retrying with expo backoff (without tenacity) for router
This commit is contained in:
parent
98c45f1b4e
commit
59eaeba92a
9 changed files with 147 additions and 84 deletions
|
@ -236,9 +236,6 @@ def mock_completion(model: str, messages: List, stream: Optional[bool] = False,
|
|||
raise Exception("Mock completion response failed")
|
||||
|
||||
@client
|
||||
@timeout( # type: ignore
|
||||
600
|
||||
) ## set timeouts, in case calls hang (e.g. Azure) - default is 600s, override with `force_timeout`
|
||||
def completion(
|
||||
model: str,
|
||||
# Optional OpenAI params: see https://platform.openai.com/docs/api-reference/chat/create
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue