mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 11:43:54 +00:00
(test) custom logger proxy
This commit is contained in:
parent
f3131f1254
commit
da96f5a5ac
3 changed files with 18 additions and 6 deletions
|
@ -62,9 +62,11 @@ def test_embedding(client):
|
||||||
|
|
||||||
my_custom_logger = litellm.callbacks[0]
|
my_custom_logger = litellm.callbacks[0]
|
||||||
for callback in litellm.callbacks:
|
for callback in litellm.callbacks:
|
||||||
if "MyCustomHandler" in str(callback):
|
if "testCustomCallbackProxy" in str(callback):
|
||||||
my_custom_logger = callback
|
my_custom_logger = callback
|
||||||
break
|
break
|
||||||
|
print("LiteLLM Callbacks", litellm.callbacks)
|
||||||
|
print("my_custom_logger", my_custom_logger)
|
||||||
assert my_custom_logger.async_success_embedding == False
|
assert my_custom_logger.async_success_embedding == False
|
||||||
|
|
||||||
test_data = {
|
test_data = {
|
||||||
|
@ -73,6 +75,8 @@ def test_embedding(client):
|
||||||
}
|
}
|
||||||
response = client.post("/embeddings", json=test_data, headers=headers)
|
response = client.post("/embeddings", json=test_data, headers=headers)
|
||||||
print("made request", response.status_code, response.text)
|
print("made request", response.status_code, response.text)
|
||||||
|
print("LiteLLM Callbacks", litellm.callbacks)
|
||||||
|
print("my_custom_logger", my_custom_logger)
|
||||||
assert my_custom_logger.async_success_embedding == True # checks if the status of async_success is True, only the async_log_success_event can set this to true
|
assert my_custom_logger.async_success_embedding == True # checks if the status of async_success is True, only the async_log_success_event can set this to true
|
||||||
assert my_custom_logger.async_embedding_kwargs["model"] == "azure-embedding-model" # checks if kwargs passed to async_log_success_event are correct
|
assert my_custom_logger.async_embedding_kwargs["model"] == "azure-embedding-model" # checks if kwargs passed to async_log_success_event are correct
|
||||||
|
|
||||||
|
@ -103,9 +107,11 @@ def test_chat_completion(client):
|
||||||
# assert len(litellm.callbacks) == 1 # assert litellm is initialized with 1 callback
|
# assert len(litellm.callbacks) == 1 # assert litellm is initialized with 1 callback
|
||||||
my_custom_logger = litellm.callbacks[0]
|
my_custom_logger = litellm.callbacks[0]
|
||||||
for callback in litellm.callbacks:
|
for callback in litellm.callbacks:
|
||||||
if "MyCustomHandler" in str(callback):
|
if "testCustomCallbackProxy" in str(callback):
|
||||||
my_custom_logger = callback
|
my_custom_logger = callback
|
||||||
break
|
break
|
||||||
|
print("LiteLLM Callbacks", litellm.callbacks)
|
||||||
|
print("my_custom_logger", my_custom_logger)
|
||||||
assert my_custom_logger.async_success == False
|
assert my_custom_logger.async_success == False
|
||||||
|
|
||||||
test_data = {
|
test_data = {
|
||||||
|
@ -122,6 +128,8 @@ def test_chat_completion(client):
|
||||||
|
|
||||||
response = client.post("/chat/completions", json=test_data, headers=headers)
|
response = client.post("/chat/completions", json=test_data, headers=headers)
|
||||||
print("made request", response.status_code, response.text)
|
print("made request", response.status_code, response.text)
|
||||||
|
print("LiteLLM Callbacks", litellm.callbacks)
|
||||||
|
print("my_custom_logger", my_custom_logger)
|
||||||
assert my_custom_logger.async_success == True # checks if the status of async_success is True, only the async_log_success_event can set this to true
|
assert my_custom_logger.async_success == True # checks if the status of async_success is True, only the async_log_success_event can set this to true
|
||||||
assert my_custom_logger.async_completion_kwargs["model"] == "chatgpt-v-2" # checks if kwargs passed to async_log_success_event are correct
|
assert my_custom_logger.async_completion_kwargs["model"] == "chatgpt-v-2" # checks if kwargs passed to async_log_success_event are correct
|
||||||
print("\n\n Custom Logger Async Completion args", my_custom_logger.async_completion_kwargs)
|
print("\n\n Custom Logger Async Completion args", my_custom_logger.async_completion_kwargs)
|
||||||
|
@ -153,11 +161,15 @@ def test_chat_completion_stream(client):
|
||||||
|
|
||||||
# assert len(litellm.callbacks) == 1 # assert litellm is initialized with 1 callback
|
# assert len(litellm.callbacks) == 1 # assert litellm is initialized with 1 callback
|
||||||
my_custom_logger = litellm.callbacks[0]
|
my_custom_logger = litellm.callbacks[0]
|
||||||
|
|
||||||
for callback in litellm.callbacks:
|
for callback in litellm.callbacks:
|
||||||
if "MyCustomHandler" in str(callback):
|
if "testCustomCallbackProxy" in str(callback):
|
||||||
my_custom_logger = callback
|
my_custom_logger = callback
|
||||||
break
|
break
|
||||||
|
|
||||||
|
print("LiteLLM Callbacks", litellm.callbacks)
|
||||||
|
print("my_custom_logger", my_custom_logger)
|
||||||
|
|
||||||
assert my_custom_logger.streaming_response_obj == None # no streaming response obj is set pre call
|
assert my_custom_logger.streaming_response_obj == None # no streaming response obj is set pre call
|
||||||
|
|
||||||
test_data = {
|
test_data = {
|
||||||
|
|
|
@ -2,7 +2,7 @@ from litellm.integrations.custom_logger import CustomLogger
|
||||||
import inspect
|
import inspect
|
||||||
import litellm
|
import litellm
|
||||||
|
|
||||||
class MyCustomHandler(CustomLogger):
|
class testCustomCallbackProxy(CustomLogger):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.success: bool = False # type: ignore
|
self.success: bool = False # type: ignore
|
||||||
self.failure: bool = False # type: ignore
|
self.failure: bool = False # type: ignore
|
||||||
|
@ -107,4 +107,4 @@ class MyCustomHandler(CustomLogger):
|
||||||
|
|
||||||
self.async_completion_kwargs_fail = kwargs
|
self.async_completion_kwargs_fail = kwargs
|
||||||
|
|
||||||
my_custom_logger = MyCustomHandler()
|
my_custom_logger = testCustomCallbackProxy()
|
|
@ -326,7 +326,7 @@ def test_model_group_aliases():
|
||||||
selected_model = router.get_available_deployment("gpt-4")
|
selected_model = router.get_available_deployment("gpt-4")
|
||||||
print("\n selected model", selected_model)
|
print("\n selected model", selected_model)
|
||||||
selected_model_name = selected_model.get("model_name")
|
selected_model_name = selected_model.get("model_name")
|
||||||
if selected_model_name is not "gpt-3.5-turbo":
|
if selected_model_name != "gpt-3.5-turbo":
|
||||||
pytest.fail(f"Selected model {selected_model_name} is not gpt-3.5-turbo")
|
pytest.fail(f"Selected model {selected_model_name} is not gpt-3.5-turbo")
|
||||||
|
|
||||||
router.reset()
|
router.reset()
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue