mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
fix(main.py): fix caching for router
This commit is contained in:
parent
4f42beb9d9
commit
c6ce3fedcd
3 changed files with 5 additions and 3 deletions
|
@ -2,7 +2,7 @@ import os, types, traceback
|
|||
import json
|
||||
from enum import Enum
|
||||
import requests
|
||||
import time
|
||||
import time, httpx
|
||||
from typing import Callable, Optional
|
||||
from litellm.utils import ModelResponse, Choices, Message
|
||||
import litellm
|
||||
|
@ -11,6 +11,8 @@ class AI21Error(Exception):
|
|||
def __init__(self, status_code, message):
|
||||
self.status_code = status_code
|
||||
self.message = message
|
||||
self.request = httpx.Request(method="POST", url="https://api.replicate.com/v1/deployments")
|
||||
self.response = httpx.Response(status_code=status_code, request=self.request)
|
||||
super().__init__(
|
||||
self.message
|
||||
) # Call the base class constructor with the parameters it needs
|
||||
|
|
|
@ -147,7 +147,7 @@ async def acompletion(*args, **kwargs):
|
|||
else:
|
||||
# Await normally
|
||||
init_response = completion(*args, **kwargs)
|
||||
if isinstance(init_response, dict):
|
||||
if isinstance(init_response, dict) or isinstance(init_response, ModelResponse):
|
||||
response = init_response
|
||||
else:
|
||||
response = await init_response
|
||||
|
|
|
@ -257,7 +257,7 @@ def test_acompletion_on_router():
|
|||
traceback.print_exc()
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
|
||||
# test_acompletion_on_router()
|
||||
test_acompletion_on_router()
|
||||
|
||||
def test_function_calling_on_router():
|
||||
try:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue