mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 02:34:29 +00:00
211 lines
6.5 KiB
Python
211 lines
6.5 KiB
Python
from typing import Any, Coroutine, Optional, Union, Dict, List
|
|
import logging
|
|
|
|
try:
|
|
from dataclasses import dataclass
|
|
import torch
|
|
from diffusers import UNet2DConditionModel
|
|
from diffusers.optimization import get_scheduler
|
|
from transformers import CLIPTextModel, CLIPTokenizer
|
|
except:
|
|
pass
|
|
|
|
verbose_logger = logging.getLogger(__name__)
|
|
|
|
|
|
@dataclass
|
|
class FineTuningJob:
|
|
id: str
|
|
status: str
|
|
model: str
|
|
created_at: int
|
|
hyperparameters: Dict[str, Any]
|
|
result_files: List[str]
|
|
|
|
|
|
class DiffusersFineTuningAPI:
|
|
"""
|
|
Diffusers implementation for fine-tuning stable diffusion models locally
|
|
"""
|
|
|
|
def __init__(self) -> None:
|
|
self.jobs: Dict[str, FineTuningJob] = {}
|
|
super().__init__()
|
|
|
|
async def _train_diffusers_model(
|
|
self,
|
|
training_data: str,
|
|
base_model: str = "stabilityai/stable-diffusion-2",
|
|
output_dir: str = "./fine_tuned_model",
|
|
learning_rate: float = 5e-6,
|
|
train_batch_size: int = 1,
|
|
max_train_steps: int = 500,
|
|
gradient_accumulation_steps: int = 1,
|
|
mixed_precision: str = "fp16",
|
|
) -> FineTuningJob:
|
|
"""Actual training implementation for diffusers"""
|
|
job_id = f"ftjob_{len(self.jobs)+1}"
|
|
job = FineTuningJob(
|
|
id=job_id,
|
|
status="running",
|
|
model=base_model,
|
|
created_at=int(time.time()),
|
|
hyperparameters={
|
|
"learning_rate": learning_rate,
|
|
"batch_size": train_batch_size,
|
|
"steps": max_train_steps,
|
|
},
|
|
result_files=[output_dir],
|
|
)
|
|
self.jobs[job_id] = job
|
|
|
|
try:
|
|
# Load models and create pipeline
|
|
tokenizer = CLIPTokenizer.from_pretrained(base_model, subfolder="tokenizer")
|
|
text_encoder = CLIPTextModel.from_pretrained(
|
|
base_model, subfolder="text_encoder"
|
|
)
|
|
unet = UNet2DConditionModel.from_pretrained(base_model, subfolder="unet")
|
|
|
|
# Optimizer and scheduler
|
|
optimizer = torch.optim.AdamW(
|
|
unet.parameters(),
|
|
lr=learning_rate,
|
|
)
|
|
|
|
lr_scheduler = get_scheduler(
|
|
"linear",
|
|
optimizer=optimizer,
|
|
num_warmup_steps=0,
|
|
num_training_steps=max_train_steps,
|
|
)
|
|
|
|
# Training loop would go here
|
|
# This is simplified - actual implementation would need:
|
|
# 1. Data loading from training_data path
|
|
# 2. Proper training loop with forward/backward passes
|
|
# 3. Saving checkpoints
|
|
|
|
# Simulate training
|
|
for step in range(max_train_steps):
|
|
if step % 10 == 0:
|
|
verbose_logger.debug(f"Training step {step}/{max_train_steps}")
|
|
|
|
# Save the trained model
|
|
unet.save_pretrained(f"{output_dir}/unet")
|
|
text_encoder.save_pretrained(f"{output_dir}/text_encoder")
|
|
|
|
job.status = "succeeded"
|
|
return job
|
|
|
|
except Exception as e:
|
|
job.status = "failed"
|
|
verbose_logger.error(f"Training failed: {str(e)}")
|
|
raise
|
|
|
|
async def acreate_fine_tuning_job(
|
|
self,
|
|
create_fine_tuning_job_data: dict,
|
|
) -> FineTuningJob:
|
|
"""Create a fine-tuning job asynchronously"""
|
|
return await self._train_diffusers_model(**create_fine_tuning_job_data)
|
|
|
|
def create_fine_tuning_job(
|
|
self,
|
|
_is_async: bool,
|
|
create_fine_tuning_job_data: dict,
|
|
**kwargs,
|
|
) -> Union[FineTuningJob, Coroutine[Any, Any, FineTuningJob]]:
|
|
"""Create a fine-tuning job (sync or async)"""
|
|
if _is_async:
|
|
return self.acreate_fine_tuning_job(create_fine_tuning_job_data)
|
|
else:
|
|
# Run async code synchronously
|
|
import asyncio
|
|
|
|
return asyncio.run(
|
|
self.acreate_fine_tuning_job(create_fine_tuning_job_data)
|
|
)
|
|
|
|
async def alist_fine_tuning_jobs(
|
|
self,
|
|
after: Optional[str] = None,
|
|
limit: Optional[int] = None,
|
|
):
|
|
"""List fine-tuning jobs asynchronously"""
|
|
jobs = list(self.jobs.values())
|
|
if after:
|
|
jobs = [j for j in jobs if j.id > after]
|
|
if limit:
|
|
jobs = jobs[:limit]
|
|
return {"data": jobs}
|
|
|
|
def list_fine_tuning_jobs(
|
|
self,
|
|
_is_async: bool,
|
|
after: Optional[str] = None,
|
|
limit: Optional[int] = None,
|
|
**kwargs,
|
|
):
|
|
"""List fine-tuning jobs (sync or async)"""
|
|
if _is_async:
|
|
return self.alist_fine_tuning_jobs(after=after, limit=limit)
|
|
else:
|
|
# Run async code synchronously
|
|
import asyncio
|
|
|
|
return asyncio.run(self.alist_fine_tuning_jobs(after=after, limit=limit))
|
|
|
|
async def aretrieve_fine_tuning_job(
|
|
self,
|
|
fine_tuning_job_id: str,
|
|
) -> FineTuningJob:
|
|
"""Retrieve a fine-tuning job asynchronously"""
|
|
if fine_tuning_job_id not in self.jobs:
|
|
raise ValueError(f"Job {fine_tuning_job_id} not found")
|
|
return self.jobs[fine_tuning_job_id]
|
|
|
|
def retrieve_fine_tuning_job(
|
|
self,
|
|
_is_async: bool,
|
|
fine_tuning_job_id: str,
|
|
**kwargs,
|
|
):
|
|
"""Retrieve a fine-tuning job (sync or async)"""
|
|
if _is_async:
|
|
return self.aretrieve_fine_tuning_job(fine_tuning_job_id)
|
|
else:
|
|
# Run async code synchronously
|
|
import asyncio
|
|
|
|
return asyncio.run(self.aretrieve_fine_tuning_job(fine_tuning_job_id))
|
|
|
|
async def acancel_fine_tuning_job(
|
|
self,
|
|
fine_tuning_job_id: str,
|
|
) -> FineTuningJob:
|
|
"""Cancel a fine-tuning job asynchronously"""
|
|
if fine_tuning_job_id not in self.jobs:
|
|
raise ValueError(f"Job {fine_tuning_job_id} not found")
|
|
|
|
job = self.jobs[fine_tuning_job_id]
|
|
if job.status in ["succeeded", "failed", "cancelled"]:
|
|
raise ValueError(f"Cannot cancel job in status {job.status}")
|
|
|
|
job.status = "cancelled"
|
|
return job
|
|
|
|
def cancel_fine_tuning_job(
|
|
self,
|
|
_is_async: bool,
|
|
fine_tuning_job_id: str,
|
|
**kwargs,
|
|
):
|
|
"""Cancel a fine-tuning job (sync or async)"""
|
|
if _is_async:
|
|
return self.acancel_fine_tuning_job(fine_tuning_job_id)
|
|
else:
|
|
# Run async code synchronously
|
|
import asyncio
|
|
|
|
return asyncio.run(self.acancel_fine_tuning_job(fine_tuning_job_id))
|