/
train_adv_interp.py
225 lines (187 loc) · 7.81 KB
/
train_adv_interp.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
'''Advsersarial Interpolation Training'''
from __future__ import print_function
import time
import numpy as np
import random
import copy
import os
import argparse
import datetime
import pickle
import utils
from utils import softCrossEntropy
from utils import one_hot_tensor
from adv_interp import adv_interp
from tqdm import tqdm
from PIL import Image
from networks import *
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
from torch.autograd.gradcheck import zero_gradients
from torch.autograd import Variable
parser = argparse.ArgumentParser(
description='Advsersarial Interpolation Training')
parser.register('type', 'bool', utils.str2bool)
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--label_adv_delta',
default=0.5,
type=float,
help='label_adv_delta')
parser.add_argument('--resume',
'-r',
action='store_true',
help='resume from checkpoint')
parser.add_argument('--model_dir', type=str, help='model path')
parser.add_argument('--init_model_pass',
default='-1',
type=str,
help='init model pass')
parser.add_argument('--save_epochs', default=10, type=int, help='save period')
parser.add_argument('--max_epoch', default=200, type=int, help='save period')
parser.add_argument('--decay_epoch1', default=60, type=int, help='save period')
parser.add_argument('--decay_epoch2', default=90, type=int, help='save period')
parser.add_argument('--decay_rate',
default=0.1,
type=float,
help='save period')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--weight_decay',
default=2e-4,
type=float,
help='weight decay factor')
parser.add_argument('--log_step', default=10, type=int, help='log_step')
parser.add_argument('--num_classes', default=10, type=int, help='num classes')
parser.add_argument('--image_size', default=32, type=int, help='image size')
parser.add_argument('--batch_size_train',
default=128,
type=int,
help='batch size for training')
args = parser.parse_args()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
start_epoch = 1
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
trainset = torchvision.datasets.CIFAR10(root='./data',
train=True,
download=True,
transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset,
batch_size=args.batch_size_train,
shuffle=True,
num_workers=2)
print('======= WideResenet 28-10 ========')
net = WideResNet(depth=28, num_classes=args.num_classes, widen_factor=10)
net = net.to(device)
config_adv_interp = {
'v_min': -1.0,
'v_max': 1.0,
'epsilon': 8.0 / 255 * 2,
'num_steps': 1,
'step_size': 8.0 / 255 * 2,
'label_adv_delta': args.label_adv_delta,
}
if device == 'cuda':
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
optimizer = optim.SGD(net.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
if args.resume and args.init_model_pass != '-1':
print('Resume training from checkpoint..')
f_path_latest = os.path.join(args.model_dir, 'latest')
f_path = os.path.join(args.model_dir,
('checkpoint-%s' % args.init_model_pass))
if not os.path.isdir(args.model_dir):
print('train from scratch: no checkpoint directory or file found')
elif args.init_model_pass == 'latest' and os.path.isfile(f_path_latest):
checkpoint = torch.load(f_path_latest)
pretrained_dict = checkpoint['net']
model_dict = net.state_dict()
model_dict.update(pretrained_dict)
net.load_state_dict(model_dict, strict=False)
#optimizer.load_state_dict(checkpoint['optimizer'])
start_epoch = checkpoint['epoch'] + 1
print('resuming training from epoch %s in latest' % start_epoch)
elif os.path.isfile(f_path):
checkpoint = torch.load(f_path)
net.load_state_dict(checkpoint['net'])
#optimizer.load_state_dict(checkpoint['optimizer'])
start_epoch = checkpoint['epoch'] + 1
print('resuming training from epoch %s' % (start_epoch - 1))
elif not os.path.isfile(f_path) or not os.path.isfile(f_path_latest):
print('train from scratch: no checkpoint directory or file found')
soft_xent_loss = softCrossEntropy()
def train_one_epoch(epoch, net):
print('\n Training for Epoch: %d' % epoch)
net.train()
# learning rate schedule
if epoch < args.decay_epoch1:
lr = args.lr
elif epoch < args.decay_epoch2:
lr = args.lr * args.decay_rate
else:
lr = args.lr * args.decay_rate * args.decay_rate
for param_group in optimizer.param_groups:
param_group['lr'] = lr
iterator = tqdm(trainloader, ncols=0, leave=False)
for batch_idx, (inputs, targets) in enumerate(iterator):
start_time = time.time()
inputs, targets = inputs.to(device), targets.to(device)
targets_onehot = one_hot_tensor(targets, args.num_classes, device)
x_tilde, y_tilde = adv_interp(inputs, targets_onehot, net,
args.num_classes,
config_adv_interp['epsilon'],
config_adv_interp['label_adv_delta'],
config_adv_interp['v_min'],
config_adv_interp['v_max'])
outputs = net(x_tilde, mode='logits')
loss = soft_xent_loss(outputs, y_tilde)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss = loss.detach().item()
duration = time.time() - start_time
if batch_idx % args.log_step == 0:
adv_acc = utils.get_acc(outputs, targets)
# natural
net_cp = copy.deepcopy(net)
nat_outputs = net_cp(inputs, mode='logits')
nat_acc = utils.get_acc(nat_outputs, targets)
print(
"Epoch %d, Step %d, lr %.4f, Duration %.2f, Training nat acc %.2f, Training adv acc %.2f, Training adv loss %.4f"
% (epoch, batch_idx, lr, duration, 100 * nat_acc,
100 * adv_acc, train_loss))
if epoch % args.save_epochs == 0 or epoch >= args.max_epoch - 2:
print('Saving..')
f_path = os.path.join(args.model_dir, ('checkpoint-%s' % epoch))
state = {
'net': net.state_dict(),
'epoch': epoch,
#'optimizer': optimizer.state_dict()
}
if not os.path.isdir(args.model_dir):
os.makedirs(args.model_dir)
torch.save(state, f_path)
if epoch >= 1:
print('Saving latest model for epoch %s..' % (epoch))
f_path = os.path.join(args.model_dir, 'latest')
state = {
'net': net.state_dict(),
'epoch': epoch,
#'optimizer': optimizer.state_dict()
}
if not os.path.isdir(args.model_dir):
os.mkdir(args.model_dir)
torch.save(state, f_path)
for epoch in range(start_epoch, args.max_epoch + 1):
train_one_epoch(epoch, net)