mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
(Feat) Add logging for POST v1/fine_tuning/jobs
(#7426)
* init commit ft jobs logging * add ft logging * add logging for FineTuningJob * simple FT Job create test
This commit is contained in:
parent
2dcde8ce2b
commit
f7316f517a
5 changed files with 45 additions and 5 deletions
|
@ -19,12 +19,16 @@ import httpx
|
||||||
import litellm
|
import litellm
|
||||||
from litellm._logging import verbose_logger
|
from litellm._logging import verbose_logger
|
||||||
from litellm.llms.azure.fine_tuning.handler import AzureOpenAIFineTuningAPI
|
from litellm.llms.azure.fine_tuning.handler import AzureOpenAIFineTuningAPI
|
||||||
from litellm.llms.openai.fine_tuning.handler import FineTuningJob, OpenAIFineTuningAPI
|
from litellm.llms.openai.fine_tuning.handler import OpenAIFineTuningAPI
|
||||||
from litellm.llms.vertex_ai.fine_tuning.handler import VertexFineTuningAPI
|
from litellm.llms.vertex_ai.fine_tuning.handler import VertexFineTuningAPI
|
||||||
from litellm.secret_managers.main import get_secret_str
|
from litellm.secret_managers.main import get_secret_str
|
||||||
from litellm.types.llms.openai import FineTuningJobCreate, Hyperparameters
|
from litellm.types.llms.openai import (
|
||||||
|
FineTuningJob,
|
||||||
|
FineTuningJobCreate,
|
||||||
|
Hyperparameters,
|
||||||
|
)
|
||||||
from litellm.types.router import *
|
from litellm.types.router import *
|
||||||
from litellm.utils import supports_httpx_timeout
|
from litellm.utils import client, supports_httpx_timeout
|
||||||
|
|
||||||
####### ENVIRONMENT VARIABLES ###################
|
####### ENVIRONMENT VARIABLES ###################
|
||||||
openai_fine_tuning_apis_instance = OpenAIFineTuningAPI()
|
openai_fine_tuning_apis_instance = OpenAIFineTuningAPI()
|
||||||
|
@ -33,6 +37,7 @@ vertex_fine_tuning_apis_instance = VertexFineTuningAPI()
|
||||||
#################################################
|
#################################################
|
||||||
|
|
||||||
|
|
||||||
|
@client
|
||||||
async def acreate_fine_tuning_job(
|
async def acreate_fine_tuning_job(
|
||||||
model: str,
|
model: str,
|
||||||
training_file: str,
|
training_file: str,
|
||||||
|
@ -86,6 +91,7 @@ async def acreate_fine_tuning_job(
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
|
|
||||||
|
@client
|
||||||
def create_fine_tuning_job(
|
def create_fine_tuning_job(
|
||||||
model: str,
|
model: str,
|
||||||
training_file: str,
|
training_file: str,
|
||||||
|
|
|
@ -37,6 +37,7 @@ from litellm.litellm_core_utils.redact_messages import (
|
||||||
from litellm.types.llms.openai import (
|
from litellm.types.llms.openai import (
|
||||||
AllMessageValues,
|
AllMessageValues,
|
||||||
Batch,
|
Batch,
|
||||||
|
FineTuningJob,
|
||||||
HttpxBinaryResponseContent,
|
HttpxBinaryResponseContent,
|
||||||
)
|
)
|
||||||
from litellm.types.rerank import RerankResponse
|
from litellm.types.rerank import RerankResponse
|
||||||
|
@ -760,6 +761,7 @@ class Logging(LiteLLMLoggingBaseClass):
|
||||||
HttpxBinaryResponseContent,
|
HttpxBinaryResponseContent,
|
||||||
RerankResponse,
|
RerankResponse,
|
||||||
Batch,
|
Batch,
|
||||||
|
FineTuningJob,
|
||||||
],
|
],
|
||||||
cache_hit: Optional[bool] = None,
|
cache_hit: Optional[bool] = None,
|
||||||
) -> Optional[float]:
|
) -> Optional[float]:
|
||||||
|
@ -877,6 +879,7 @@ class Logging(LiteLLMLoggingBaseClass):
|
||||||
or isinstance(result, HttpxBinaryResponseContent) # tts
|
or isinstance(result, HttpxBinaryResponseContent) # tts
|
||||||
or isinstance(result, RerankResponse)
|
or isinstance(result, RerankResponse)
|
||||||
or isinstance(result, Batch)
|
or isinstance(result, Batch)
|
||||||
|
or isinstance(result, FineTuningJob)
|
||||||
):
|
):
|
||||||
## RESPONSE COST ##
|
## RESPONSE COST ##
|
||||||
self.model_call_details["response_cost"] = (
|
self.model_call_details["response_cost"] = (
|
||||||
|
|
|
@ -30,6 +30,7 @@ from openai.types.chat.chat_completion_prediction_content_param import (
|
||||||
ChatCompletionPredictionContentParam,
|
ChatCompletionPredictionContentParam,
|
||||||
)
|
)
|
||||||
from openai.types.embedding import Embedding as OpenAIEmbedding
|
from openai.types.embedding import Embedding as OpenAIEmbedding
|
||||||
|
from openai.types.fine_tuning.fine_tuning_job import FineTuningJob
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
from typing_extensions import Dict, Required, TypedDict, override
|
from typing_extensions import Dict, Required, TypedDict, override
|
||||||
|
|
||||||
|
|
|
@ -1234,6 +1234,7 @@ def _is_async_request(
|
||||||
or kwargs.get("arerank", False) is True
|
or kwargs.get("arerank", False) is True
|
||||||
or kwargs.get("_arealtime", False) is True
|
or kwargs.get("_arealtime", False) is True
|
||||||
or kwargs.get("acreate_batch", False) is True
|
or kwargs.get("acreate_batch", False) is True
|
||||||
|
or kwargs.get("acreate_fine_tuning_job", False) is True
|
||||||
or is_pass_through is True
|
or is_pass_through is True
|
||||||
):
|
):
|
||||||
return True
|
return True
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import traceback
|
import traceback
|
||||||
|
import json
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
sys.path.insert(
|
sys.path.insert(
|
||||||
|
@ -14,7 +14,7 @@ import litellm
|
||||||
litellm.num_retries = 0
|
litellm.num_retries = 0
|
||||||
import asyncio
|
import asyncio
|
||||||
import logging
|
import logging
|
||||||
|
from typing import Optional
|
||||||
import openai
|
import openai
|
||||||
from test_openai_batches_and_files import load_vertex_ai_credentials
|
from test_openai_batches_and_files import load_vertex_ai_credentials
|
||||||
|
|
||||||
|
@ -24,10 +24,27 @@ from litellm.llms.vertex_ai.fine_tuning.handler import (
|
||||||
FineTuningJobCreate,
|
FineTuningJobCreate,
|
||||||
VertexFineTuningAPI,
|
VertexFineTuningAPI,
|
||||||
)
|
)
|
||||||
|
from litellm.integrations.custom_logger import CustomLogger
|
||||||
|
from litellm.types.utils import StandardLoggingPayload
|
||||||
|
|
||||||
vertex_finetune_api = VertexFineTuningAPI()
|
vertex_finetune_api = VertexFineTuningAPI()
|
||||||
|
|
||||||
|
|
||||||
|
class TestCustomLogger(CustomLogger):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.standard_logging_object: Optional[StandardLoggingPayload] = None
|
||||||
|
|
||||||
|
async def async_log_success_event(self, kwargs, response_obj, start_time, end_time):
|
||||||
|
print(
|
||||||
|
"Success event logged with kwargs=",
|
||||||
|
kwargs,
|
||||||
|
"and response_obj=",
|
||||||
|
response_obj,
|
||||||
|
)
|
||||||
|
self.standard_logging_object = kwargs["standard_logging_object"]
|
||||||
|
|
||||||
|
|
||||||
def test_create_fine_tune_job():
|
def test_create_fine_tune_job():
|
||||||
try:
|
try:
|
||||||
verbose_logger.setLevel(logging.DEBUG)
|
verbose_logger.setLevel(logging.DEBUG)
|
||||||
|
@ -89,6 +106,8 @@ def test_create_fine_tune_job():
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_create_fine_tune_jobs_async():
|
async def test_create_fine_tune_jobs_async():
|
||||||
try:
|
try:
|
||||||
|
custom_logger = TestCustomLogger()
|
||||||
|
litellm.callbacks = ["datadog", custom_logger]
|
||||||
verbose_logger.setLevel(logging.DEBUG)
|
verbose_logger.setLevel(logging.DEBUG)
|
||||||
file_name = "openai_batch_completions.jsonl"
|
file_name = "openai_batch_completions.jsonl"
|
||||||
_current_dir = os.path.dirname(os.path.abspath(__file__))
|
_current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
@ -113,6 +132,16 @@ async def test_create_fine_tune_jobs_async():
|
||||||
assert create_fine_tuning_response.id is not None
|
assert create_fine_tuning_response.id is not None
|
||||||
assert create_fine_tuning_response.model == "gpt-3.5-turbo-0125"
|
assert create_fine_tuning_response.model == "gpt-3.5-turbo-0125"
|
||||||
|
|
||||||
|
await asyncio.sleep(2)
|
||||||
|
_logged_standard_logging_object = custom_logger.standard_logging_object
|
||||||
|
assert _logged_standard_logging_object is not None
|
||||||
|
print(
|
||||||
|
"custom_logger.standard_logging_object=",
|
||||||
|
json.dumps(_logged_standard_logging_object, indent=4),
|
||||||
|
)
|
||||||
|
assert _logged_standard_logging_object["model"] == "gpt-3.5-turbo-0125"
|
||||||
|
assert _logged_standard_logging_object["id"] == create_fine_tuning_response.id
|
||||||
|
|
||||||
# list fine tuning jobs
|
# list fine tuning jobs
|
||||||
print("listing ft jobs")
|
print("listing ft jobs")
|
||||||
ft_jobs = await litellm.alist_fine_tuning_jobs(limit=2)
|
ft_jobs = await litellm.alist_fine_tuning_jobs(limit=2)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue