(fix) proxy - cloudflare + Azure bug [non-streaming]

This commit is contained in:
ishaan-jaff 2024-01-04 10:24:51 +05:30
parent b103ab1f0b
commit 6d21ee3a2f
3 changed files with 92 additions and 32 deletions

View file

@ -1326,6 +1326,7 @@ class Router:
local_only=True,
) # cache for 1 hr
cache_key = f"{model_id}_client"
_client = openai.AzureOpenAI( # type: ignore
api_key=api_key,
base_url=api_base,

View file

@ -0,0 +1,7 @@
model_list:
- model_name: azure-cloudflare
litellm_params:
model: azure/chatgpt-v-2
api_base: https://gateway.ai.cloudflare.com/v1/0399b10e77ac6668c80404a5ff49eb37/litellm-test/azure-openai/openai-gpt-4-test-v-1
api_key: os.environ/AZURE_API_KEY
api_version: 2023-07-01-preview

View file

@ -1,38 +1,90 @@
# #### What this tests ####
# # This tests using caching w/ litellm which requires SSL=True
#### What this tests ####
# This tests using caching w/ litellm which requires SSL=True
import sys, os
import traceback
from dotenv import load_dotenv
# import sys, os
# import time
# import traceback
# from dotenv import load_dotenv
load_dotenv()
import os, io
# load_dotenv()
# import os
# this file is to test litellm/proxy
# sys.path.insert(
# 0, os.path.abspath("../..")
# ) # Adds the parent directory to the system path
# import pytest
# import litellm
# from litellm import embedding, completion
# from litellm.caching import Cache
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
import pytest, logging
import litellm
from litellm import embedding, completion, completion_cost, Timeout
from litellm import RateLimitError
# messages = [{"role": "user", "content": f"who is ishaan {time.time()}"}]
# Configure logging
logging.basicConfig(
level=logging.DEBUG, # Set the desired logging level
format="%(asctime)s - %(levelname)s - %(message)s",
)
# @pytest.mark.skip(reason="local proxy test")
# def test_caching_v2(): # test in memory cache
# try:
# response1 = completion(model="openai/gpt-3.5-turbo", messages=messages, api_base="http://0.0.0.0:8000")
# response2 = completion(model="openai/gpt-3.5-turbo", messages=messages, api_base="http://0.0.0.0:8000")
# print(f"response1: {response1}")
# print(f"response2: {response2}")
# litellm.cache = None # disable cache
# if response2['choices'][0]['message']['content'] != response1['choices'][0]['message']['content']:
# print(f"response1: {response1}")
# print(f"response2: {response2}")
# raise Exception()
# except Exception as e:
# print(f"error occurred: {traceback.format_exc()}")
# pytest.fail(f"Error occurred: {e}")
# test /chat/completion request to the proxy
from fastapi.testclient import TestClient
from fastapi import FastAPI
from litellm.proxy.proxy_server import (
router,
save_worker_config,
initialize,
) # Replace with the actual module where your FastAPI router is defined
# test_caching_v2()
# Your bearer token
token = ""
headers = {"Authorization": f"Bearer {token}"}
@pytest.fixture(scope="function")
def client_no_auth():
# Assuming litellm.proxy.proxy_server is an object
from litellm.proxy.proxy_server import cleanup_router_config_variables
cleanup_router_config_variables()
filepath = os.path.dirname(os.path.abspath(__file__))
config_fp = f"{filepath}/test_configs/test_cloudflare_azure_with_cache_config.yaml"
# initialize can get run in parallel, it sets specific variables for the fast api app, sinc eit gets run in parallel different tests use the wrong variables
initialize(config=config_fp, debug=True)
app = FastAPI()
app.include_router(router) # Include your router in the test app
return TestClient(app)
def generate_random_word(length=4):
import string, random
letters = string.ascii_lowercase
return "".join(random.choice(letters) for _ in range(length))
def test_chat_completion(client_no_auth):
global headers
try:
user_message = f"Write a poem about {generate_random_word()}"
messages = [{"content": user_message, "role": "user"}]
# Your test data
test_data = {
"model": "azure-cloudflare",
"messages": messages,
"max_tokens": 10,
}
print("testing proxy server with chat completions")
response = client_no_auth.post("/v1/chat/completions", json=test_data)
print(f"response - {response.text}")
assert response.status_code == 200
response = response.json()
print(response)
content = response["choices"][0]["message"]["content"]
print("\n content", content)
assert len(content) > 1
except Exception as e:
pytest.fail(f"LiteLLM Proxy test failed. Exception - {str(e)}")