forked from phoenix/litellm-mirror
test(test_proxy.py): adding testing for proxy server
This commit is contained in:
parent
ebc34ddeb6
commit
22937b3b16
2 changed files with 30 additions and 1 deletions
|
@ -86,7 +86,7 @@ def is_port_in_use(port):
|
||||||
@click.option('--host', default='0.0.0.0', help='Host for the server to listen on.')
|
@click.option('--host', default='0.0.0.0', help='Host for the server to listen on.')
|
||||||
@click.option('--port', default=8000, help='Port to bind the server to.')
|
@click.option('--port', default=8000, help='Port to bind the server to.')
|
||||||
@click.option('--api_base', default=None, help='API base URL.')
|
@click.option('--api_base', default=None, help='API base URL.')
|
||||||
@click.option('--model', default=None, help='The model name to pass to litellm expects')
|
@click.option('--model', '-m', default=None, help='The model name to pass to litellm expects')
|
||||||
@click.option('--alias', default=None, help='The alias for the model - use this to give a litellm model name (e.g. "huggingface/codellama/CodeLlama-7b-Instruct-hf") a more user-friendly name ("codellama")')
|
@click.option('--alias', default=None, help='The alias for the model - use this to give a litellm model name (e.g. "huggingface/codellama/CodeLlama-7b-Instruct-hf") a more user-friendly name ("codellama")')
|
||||||
@click.option('--add_key', default=None, help='The model name to pass to litellm expects')
|
@click.option('--add_key', default=None, help='The model name to pass to litellm expects')
|
||||||
@click.option('--headers', default=None, help='headers for the API call')
|
@click.option('--headers', default=None, help='headers for the API call')
|
||||||
|
@ -158,6 +158,7 @@ def run_server(host, port, api_base, model, alias, add_key, headers, save, debug
|
||||||
print("\033[1;32mDone successfully\033[0m")
|
print("\033[1;32mDone successfully\033[0m")
|
||||||
return
|
return
|
||||||
if model and "ollama" in model:
|
if model and "ollama" in model:
|
||||||
|
print(f"ollama called")
|
||||||
run_ollama_serve()
|
run_ollama_serve()
|
||||||
if cost == True:
|
if cost == True:
|
||||||
print_cost_logs()
|
print_cost_logs()
|
||||||
|
|
28
litellm/tests/test_proxy.py
Normal file
28
litellm/tests/test_proxy.py
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
#### What this tests ####
|
||||||
|
# This tests the OpenAI-proxy server
|
||||||
|
|
||||||
|
import sys, os
|
||||||
|
import traceback
|
||||||
|
sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
import unittest
|
||||||
|
from unittest.mock import patch
|
||||||
|
from click.testing import CliRunner
|
||||||
|
import pytest
|
||||||
|
import litellm
|
||||||
|
from litellm.proxy.llm import litellm_completion
|
||||||
|
|
||||||
|
def test_azure_call():
|
||||||
|
try:
|
||||||
|
data = {
|
||||||
|
"model": "azure/chatgpt-v-2",
|
||||||
|
"messages": [{"role": "user", "content": "Hey!"}]
|
||||||
|
}
|
||||||
|
result = litellm_completion(data=data, user_api_base=os.getenv("AZURE_API_BASE"), type="chat_completion", user_temperature=None, user_max_tokens=None, user_model=None, user_headers=None, user_debug=False)
|
||||||
|
return result
|
||||||
|
except Exception as e:
|
||||||
|
pytest.fail(f"An error occurred: {e}")
|
||||||
|
|
||||||
|
test_azure_call()
|
Loading…
Add table
Add a link
Reference in a new issue