import os
Denoising Diffusion Probabilistic Models with miniai
- skip_showdoc: true
Imports
import pickle,gzip,math,os,time,shutil,torch,random,logging
import fastcore.all as fc,matplotlib as mpl,numpy as np,matplotlib.pyplot as plt
from collections.abc import Mapping
from pathlib import Path
from functools import partial
from fastcore.foundation import L
import torchvision.transforms.functional as TF,torch.nn.functional as F
from torch import tensor,nn,optim
from torch.utils.data import DataLoader,default_collate
from torch.nn import init
from torch.optim import lr_scheduler
from fastAIcourse.datasets import *
from fastAIcourse.conv import *
from fastAIcourse.learner import *
from fastAIcourse.activations import *
from fastAIcourse.init import *
from fastAIcourse.sgd import *
from fastAIcourse.resnet import *
from fastAIcourse.augment import *
from fastAIcourse.accel import *
from torcheval.metrics import MulticlassAccuracy
from datasets import load_dataset,load_dataset_builder
'image.cmap'] = 'gray_r'
mpl.rcParams[
logging.disable(logging.WARNING)
42)
set_seed(if fc.defaults.cpus>8: fc.defaults.cpus=8
= 'image','label'
xl,yl = "fashion_mnist"
name = load_dataset(name)
dsd
@inplace
def transformi(b): b[xl] = [F.pad(TF.to_tensor(o), (2,2,2,2))-0.5 for o in b[xl]]
= 256
bs = dsd.with_transform(transformi)
tds = DataLoaders.from_dd(tds, bs, num_workers=8) dls
from types import SimpleNamespace
def linear_sched(betamin=0.0001,betamax=0.02,n_steps=1000):
= torch.linspace(betamin, betamax, n_steps)
beta return SimpleNamespace(a=1.-beta, abar=(1.-beta).cumprod(dim=0), sig=beta.sqrt())
def abar(t, T): return (t/T*math.pi/2).cos()**2
def cos_sched(n_steps=1000):
= torch.linspace(0, n_steps-1, n_steps)
ts = abar(ts,n_steps)
ab = ab/abar(ts-1,n_steps)
alp return SimpleNamespace(a=alp, abar=ab, sig=(1-alp).sqrt())
= linear_sched().abar
lin_abar = cos_sched().abar
cos_abar ='lin')
plt.plot(lin_abar, label='cos')
plt.plot(cos_abar, label; plt.legend()
1:]-lin_abar[:-1], label='lin')
plt.plot(lin_abar[1:]-cos_abar[:-1], label='cos')
plt.plot(cos_abar[; plt.legend()
= linear_sched(betamax=0.01).abar
lin_abar ='lin')
plt.plot(lin_abar, label='cos')
plt.plot(cos_abar, label; plt.legend()
1:]-lin_abar[:-1], label='lin')
plt.plot(lin_abar[1:]-cos_abar[:-1], label='cos')
plt.plot(cos_abar[; plt.legend()
= 1000
n_steps = linear_sched(betamax=0.01)
lin_abar = lin_abar.abar
alphabar = lin_abar.a
alpha = lin_abar.sig sigma
def noisify(x0, ᾱ):
= x0.device
device = len(x0)
n = torch.randint(0, n_steps, (n,), dtype=torch.long)
t = torch.randn(x0.shape, device=device)
ε = ᾱ[t].reshape(-1, 1, 1, 1).to(device)
ᾱ_t = ᾱ_t.sqrt()*x0 + (1-ᾱ_t).sqrt()*ε
xt return (xt, t.to(device)), ε
= dls.train
dt = next(iter(dt)) xb,yb
= noisify(xb[:25],alphabar)
(xt,t),ε t
tensor([ 26, 335, 620, 924, 950, 113, 378, 14, 210, 954, 231, 572, 315, 295, 567, 706, 749, 876, 73, 111, 899, 213, 541, 769, 287])
= fc.map_ex(t[:25], '{}')
titles 25], imsize=1.5, titles=titles) show_images(xt[:
Training
from diffusers import UNet2DModel
class UNet(UNet2DModel):
def forward(self, x): return super().forward(*x).sample
def init_ddpm(model):
for o in model.down_blocks:
for p in o.resnets:
p.conv2.weight.data.zero_()for p in fc.L(o.downsamplers): init.orthogonal_(p.conv.weight)
for o in model.up_blocks:
for p in o.resnets: p.conv2.weight.data.zero_()
model.conv_out.weight.data.zero_()
def collate_ddpm(b): return noisify(default_collate(b)[xl], alphabar)
def dl_ddpm(ds, nw=4): return DataLoader(ds, batch_size=bs, collate_fn=collate_ddpm, num_workers=nw)
= DataLoaders(dl_ddpm(tds['train']), dl_ddpm(tds['test'])) dls
= 1e-2
lr = 4
epochs = partial(optim.AdamW, eps=1e-5)
opt_func = epochs * len(dls.train)
tmax = partial(lr_scheduler.OneCycleLR, max_lr=lr, total_steps=tmax)
sched = [DeviceCB(), MixedPrecision(), ProgressCB(plot=True), MetricsCB(), BatchSchedCB(sched)]
cbs = UNet(in_channels=1, out_channels=1, block_out_channels=(16, 32, 64, 128), norm_num_groups=8)
model
init_ddpm(model)= Learner(model, dls, nn.MSELoss(), lr=lr, cbs=cbs, opt_func=opt_func) learn
learn.fit(epochs)
loss | epoch | train |
---|---|---|
0.158 | 0 | train |
0.038 | 0 | eval |
0.032 | 1 | train |
0.030 | 1 | eval |
0.028 | 2 | train |
0.027 | 2 | eval |
0.026 | 3 | train |
0.026 | 3 | eval |
= Path('models') mdl_path
/'fashion_ddpm3_25.pkl') torch.save(learn.model, mdl_path
= torch.load(mdl_path/'fashion_ddpm3_25.pkl').cuda() model
@torch.no_grad()
def sample(model, sz):
= next(model.parameters())
ps = torch.randn(sz).to(ps)
x_t = []
preds for t in reversed(range(n_steps)):
= torch.full((x_t.shape[0],), t, device=ps.device, dtype=torch.long)
t_batch = (torch.randn(x_t.shape) if t > 0 else torch.zeros(x_t.shape)).to(ps)
z = alphabar[t-1] if t > 0 else torch.tensor(1)
ᾱ_t1 = 1-alphabar[t]
b̄_t = 1-ᾱ_t1
b̄_t1 = model((x_t, t_batch))
noise = ((x_t - b̄_t.sqrt() * noise)/alphabar[t].sqrt())
x_0_hat = x_0_hat * ᾱ_t1.sqrt()*(1-alpha[t])/b̄_t + x_t * alpha[t].sqrt()*b̄_t1/b̄_t + sigma[t]*z
x_t float().cpu())
preds.append(x_t.return preds
= 128 n_samples
= sample(model, (n_samples, 1, 32, 32)) samples
CPU times: user 52.9 s, sys: 4.74 s, total: 57.6 s
Wall time: 57.3 s
= (samples[-1]*2)#.clamp(-1,1)
s min(),s.max() s.
(tensor(-1.20), tensor(1.56))
16], imsize=1.5) show_images(s[:
@inplace
def transformi2(b): b[xl] = [F.pad(TF.to_tensor(o), (2,2,2,2))*2-1 for o in b[xl]]
= dsd.with_transform(transformi2)
tds2 = DataLoaders.from_dd(tds2, bs, num_workers=fc.defaults.cpus)
dls2
= torch.load('models/data_aug2.pkl')
cmodel del(cmodel[8])
del(cmodel[7])
from fastAIcourse.fid import ImageEval
= ImageEval(cmodel, dls2, cbs=[DeviceCB()]) ie
ie.fid(s)
1753.90234375
min(),s.max() s.
(tensor(-7.90), tensor(7.56))
*2) ie.fid(xb
13.9842529296875
Skip sampling
@torch.no_grad()
def sample_skip(model, sz):
= next(model.parameters())
ps = torch.randn(sz).to(ps)
x_t = []
preds for t in reversed(range(n_steps)):
= torch.full((x_t.shape[0],), t, device=ps.device, dtype=torch.long)
t_batch = (torch.randn(x_t.shape) if t > 0 else torch.zeros(x_t.shape)).to(ps)
z = alphabar[t-1] if t > 0 else torch.tensor(1)
ᾱ_t1 = 1-alphabar[t]
b̄_t = 1-ᾱ_t1
b̄_t1 if t%3==0 or t<50: noise = model((x_t, t_batch))
= ((x_t - b̄_t.sqrt() * noise)/alphabar[t].sqrt())
x_0_hat = x_0_hat * ᾱ_t1.sqrt()*(1-alpha[t])/b̄_t + x_t * alpha[t].sqrt()*b̄_t1/b̄_t + sigma[t]*z
x_t float())
preds.append(x_t.cpu().return preds
= sample_skip(model, (n_samples, 1, 32, 32)) samples
CPU times: user 20.6 s, sys: 1.71 s, total: 22.3 s
Wall time: 22 s
= (samples[-1]*2)#.clamp(-1,1) s
25], imsize=1.5) show_images(s[:
ie.fid(s)
60.546875
@torch.no_grad()
def sample2(model, sz):
= next(model.parameters())
ps = torch.randn(sz).to(ps)
x_t = {t for t in range(n_steps) if (t+101)%((t+101)//100)==0}
sample_at = []
preds for t in reversed(range(n_steps)):
= torch.full((x_t.shape[0],), t, device=ps.device, dtype=torch.long)
t_batch = (torch.randn(x_t.shape) if t > 0 else torch.zeros(x_t.shape)).to(ps)
z = alphabar[t-1] if t > 0 else torch.tensor(1)
ᾱ_t1 = 1-alphabar[t]
b̄_t = 1-ᾱ_t1
b̄_t1 if t in sample_at: noise = model((x_t, t_batch))
= ((x_t - b̄_t.sqrt() * noise)/alphabar[t].sqrt())
x_0_hat = x_0_hat * ᾱ_t1.sqrt()*(1-alpha[t])/b̄_t + x_t * alpha[t].sqrt()*b̄_t1/b̄_t + sigma[t]*z
x_t if t in sample_at: preds.append(x_t.float().cpu())
return preds
= sample2(model, (n_samples, 1, 32, 32)) samples
CPU times: user 17.7 s, sys: 1.76 s, total: 19.4 s
Wall time: 17.9 s
= (samples[-1]*2)#.clamp(-1,1) s
25], imsize=1.5) show_images(s[:
ie.fid(s)
64.6614990234375