forked from phoenix/litellm-mirror
fix(proxy_server.py): If master key is set, only master key can be used to generate new keys
This commit is contained in:
parent
10fe16c965
commit
604ad41eac
2 changed files with 15 additions and 5 deletions
|
@ -164,8 +164,14 @@ async def user_api_key_auth(request: Request):
|
||||||
return
|
return
|
||||||
try:
|
try:
|
||||||
api_key = await oauth2_scheme(request=request)
|
api_key = await oauth2_scheme(request=request)
|
||||||
|
route = request.url.path
|
||||||
|
|
||||||
if api_key == master_key:
|
if api_key == master_key:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
if route == "/key/generate" and api_key != master_key:
|
||||||
|
raise Exception(f"If master key is set, only master key can be used to generate new keys")
|
||||||
|
|
||||||
if api_key in config_cache:
|
if api_key in config_cache:
|
||||||
llm_model_list = config_cache[api_key].get("model_list", [])
|
llm_model_list = config_cache[api_key].get("model_list", [])
|
||||||
return
|
return
|
||||||
|
@ -593,6 +599,11 @@ async def generate_key_fn(request: Request):
|
||||||
detail={"error": "models param must be a list"},
|
detail={"error": "models param must be a list"},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@router.get("/test")
|
||||||
|
async def test_endpoint(request: Request):
|
||||||
|
return {"route": request.url.path}
|
||||||
|
|
||||||
|
#### EXPERIMENTAL QUEUING ####
|
||||||
@router.post("/queue/request", dependencies=[Depends(user_api_key_auth)])
|
@router.post("/queue/request", dependencies=[Depends(user_api_key_auth)])
|
||||||
async def async_queue_request(request: Request):
|
async def async_queue_request(request: Request):
|
||||||
global celery_fn, llm_model_list
|
global celery_fn, llm_model_list
|
||||||
|
|
|
@ -164,9 +164,7 @@ def test_completion_gpt4_vision():
|
||||||
|
|
||||||
def test_completion_perplexity_api():
|
def test_completion_perplexity_api():
|
||||||
try:
|
try:
|
||||||
litellm.set_verbose=True
|
# litellm.set_verbose=True
|
||||||
litellm.num_retries = 0
|
|
||||||
litellm.drop_params = True
|
|
||||||
messages=[{
|
messages=[{
|
||||||
"role": "system",
|
"role": "system",
|
||||||
"content": "You're a good bot"
|
"content": "You're a good bot"
|
||||||
|
@ -180,7 +178,7 @@ def test_completion_perplexity_api():
|
||||||
response = completion(
|
response = completion(
|
||||||
model="mistral-7b-instruct",
|
model="mistral-7b-instruct",
|
||||||
messages=messages,
|
messages=messages,
|
||||||
api_base="https://api.perplexity.ai", stop="Hello")
|
api_base="https://api.perplexity.ai")
|
||||||
print(response)
|
print(response)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
pytest.fail(f"Error occurred: {e}")
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
@ -446,7 +444,7 @@ def test_completion_openai():
|
||||||
pass
|
pass
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
pytest.fail(f"Error occurred: {e}")
|
pytest.fail(f"Error occurred: {e}")
|
||||||
test_completion_openai()
|
# test_completion_openai()
|
||||||
|
|
||||||
def test_completion_text_openai():
|
def test_completion_text_openai():
|
||||||
try:
|
try:
|
||||||
|
@ -832,6 +830,7 @@ def test_completion_replicate_llama2_stream():
|
||||||
# assert len(chunk.choices[0].delta["content"]) > 2
|
# assert len(chunk.choices[0].delta["content"]) > 2
|
||||||
# print(chunk)
|
# print(chunk)
|
||||||
assert len(complete_response) > 5
|
assert len(complete_response) > 5
|
||||||
|
print(f"complete_response: {complete_response}")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
pytest.fail(f"Error occurred: {e}")
|
pytest.fail(f"Error occurred: {e}")
|
||||||
test_completion_replicate_llama2_stream()
|
test_completion_replicate_llama2_stream()
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue