diff --git a/litellm/fine_tuning/main.py b/litellm/fine_tuning/main.py index eace2f64a4..7672ad43a9 100644 --- a/litellm/fine_tuning/main.py +++ b/litellm/fine_tuning/main.py @@ -19,12 +19,16 @@ import httpx import litellm from litellm._logging import verbose_logger from litellm.llms.azure.fine_tuning.handler import AzureOpenAIFineTuningAPI -from litellm.llms.openai.fine_tuning.handler import FineTuningJob, OpenAIFineTuningAPI +from litellm.llms.openai.fine_tuning.handler import OpenAIFineTuningAPI from litellm.llms.vertex_ai.fine_tuning.handler import VertexFineTuningAPI from litellm.secret_managers.main import get_secret_str -from litellm.types.llms.openai import FineTuningJobCreate, Hyperparameters +from litellm.types.llms.openai import ( + FineTuningJob, + FineTuningJobCreate, + Hyperparameters, +) from litellm.types.router import * -from litellm.utils import supports_httpx_timeout +from litellm.utils import client, supports_httpx_timeout ####### ENVIRONMENT VARIABLES ################### openai_fine_tuning_apis_instance = OpenAIFineTuningAPI() @@ -33,6 +37,7 @@ vertex_fine_tuning_apis_instance = VertexFineTuningAPI() ################################################# +@client async def acreate_fine_tuning_job( model: str, training_file: str, @@ -86,6 +91,7 @@ async def acreate_fine_tuning_job( raise e +@client def create_fine_tuning_job( model: str, training_file: str, diff --git a/litellm/litellm_core_utils/litellm_logging.py b/litellm/litellm_core_utils/litellm_logging.py index ee769bb7eb..0736aa2da5 100644 --- a/litellm/litellm_core_utils/litellm_logging.py +++ b/litellm/litellm_core_utils/litellm_logging.py @@ -37,6 +37,7 @@ from litellm.litellm_core_utils.redact_messages import ( from litellm.types.llms.openai import ( AllMessageValues, Batch, + FineTuningJob, HttpxBinaryResponseContent, ) from litellm.types.rerank import RerankResponse @@ -760,6 +761,7 @@ class Logging(LiteLLMLoggingBaseClass): HttpxBinaryResponseContent, RerankResponse, Batch, + FineTuningJob, ], cache_hit: Optional[bool] = None, ) -> Optional[float]: @@ -877,6 +879,7 @@ class Logging(LiteLLMLoggingBaseClass): or isinstance(result, HttpxBinaryResponseContent) # tts or isinstance(result, RerankResponse) or isinstance(result, Batch) + or isinstance(result, FineTuningJob) ): ## RESPONSE COST ## self.model_call_details["response_cost"] = ( diff --git a/litellm/types/llms/openai.py b/litellm/types/llms/openai.py index e8cda0aae4..b63099f081 100644 --- a/litellm/types/llms/openai.py +++ b/litellm/types/llms/openai.py @@ -30,6 +30,7 @@ from openai.types.chat.chat_completion_prediction_content_param import ( ChatCompletionPredictionContentParam, ) from openai.types.embedding import Embedding as OpenAIEmbedding +from openai.types.fine_tuning.fine_tuning_job import FineTuningJob from pydantic import BaseModel, Field from typing_extensions import Dict, Required, TypedDict, override diff --git a/litellm/utils.py b/litellm/utils.py index e7d7f97ee0..72f5f80933 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -1234,6 +1234,7 @@ def _is_async_request( or kwargs.get("arerank", False) is True or kwargs.get("_arealtime", False) is True or kwargs.get("acreate_batch", False) is True + or kwargs.get("acreate_fine_tuning_job", False) is True or is_pass_through is True ): return True diff --git a/tests/batches_tests/test_fine_tuning_api.py b/tests/batches_tests/test_fine_tuning_api.py index 893849b3b5..cc53f599fa 100644 --- a/tests/batches_tests/test_fine_tuning_api.py +++ b/tests/batches_tests/test_fine_tuning_api.py @@ -1,7 +1,7 @@ import os import sys import traceback - +import json import pytest sys.path.insert( @@ -14,7 +14,7 @@ import litellm litellm.num_retries = 0 import asyncio import logging - +from typing import Optional import openai from test_openai_batches_and_files import load_vertex_ai_credentials @@ -24,10 +24,27 @@ from litellm.llms.vertex_ai.fine_tuning.handler import ( FineTuningJobCreate, VertexFineTuningAPI, ) +from litellm.integrations.custom_logger import CustomLogger +from litellm.types.utils import StandardLoggingPayload vertex_finetune_api = VertexFineTuningAPI() +class TestCustomLogger(CustomLogger): + def __init__(self): + super().__init__() + self.standard_logging_object: Optional[StandardLoggingPayload] = None + + async def async_log_success_event(self, kwargs, response_obj, start_time, end_time): + print( + "Success event logged with kwargs=", + kwargs, + "and response_obj=", + response_obj, + ) + self.standard_logging_object = kwargs["standard_logging_object"] + + def test_create_fine_tune_job(): try: verbose_logger.setLevel(logging.DEBUG) @@ -89,6 +106,8 @@ def test_create_fine_tune_job(): @pytest.mark.asyncio async def test_create_fine_tune_jobs_async(): try: + custom_logger = TestCustomLogger() + litellm.callbacks = ["datadog", custom_logger] verbose_logger.setLevel(logging.DEBUG) file_name = "openai_batch_completions.jsonl" _current_dir = os.path.dirname(os.path.abspath(__file__)) @@ -113,6 +132,16 @@ async def test_create_fine_tune_jobs_async(): assert create_fine_tuning_response.id is not None assert create_fine_tuning_response.model == "gpt-3.5-turbo-0125" + await asyncio.sleep(2) + _logged_standard_logging_object = custom_logger.standard_logging_object + assert _logged_standard_logging_object is not None + print( + "custom_logger.standard_logging_object=", + json.dumps(_logged_standard_logging_object, indent=4), + ) + assert _logged_standard_logging_object["model"] == "gpt-3.5-turbo-0125" + assert _logged_standard_logging_object["id"] == create_fine_tuning_response.id + # list fine tuning jobs print("listing ft jobs") ft_jobs = await litellm.alist_fine_tuning_jobs(limit=2)