-
Notifications
You must be signed in to change notification settings - Fork 0
/
train_maskMLE.py
184 lines (147 loc) · 6.5 KB
/
train_maskMLE.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
import torch
import config as CONFIG
CUDA_AVAIL = torch.cuda.is_available()
import sys, argparse, os
import numpy as np
from tqdm import tqdm
# Parse arguments
parser=argparse.ArgumentParser(description='Training a language model for PennTreeBank Dataset')
parser.add_argument('--seed',default=12,type=int,required=False,help='(default=%(default)d)')
parser.add_argument('--data_dir',default='data/',type=str,required=False,help='(default=%(default)s)')
parser.add_argument('--model_size',default='medium',type=str,required=False,help='(default=%(default)s)')
parser.add_argument('--batch_size',default=20,type=int,required=False,help='(default=%(default)d)')
parser.add_argument('--seq_len',default=35,type=int,required=False,help='(default=%(default)d)')
parser.add_argument('--pretrained',action="store_false",required=False,help='(default=%(default)d')
args=parser.parse_args()
if args.model_size == 'medium': model_config = CONFIG.MediumConfig
elif args.model_size == 'large': model_config = CONFIG.LargeConfig
elif args.model_size == 'small': model_config = CONFIG.SmallConfig
else: model_config = CONFIG.TestConfig
print('*'*100,'\n',args,'\n','*'*100)
print('CUDA: ', CUDA_AVAIL)
# Set random seed
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if CUDA_AVAIL: torch.cuda.manual_seed(args.seed)
from models.seq2seq import Seq2Seq, RnnLMEncoder, RnnLMDecoder
from models.attention import Attention
from dataloaders.ptb_loader import *
def create_ptb_loader(data_dir, batch_size, seq_len):
train_data, valid_data, test_data, vocabulary_size = ptb_raw_data(data_dir)
train_x, train_y = ptb_batcher(train_data, seq_len, mask=True)
train_loader = ptb_loader(train_x , train_y, batch_size)
test_x, test_y = ptb_batcher(test_data, seq_len, mask=True)
test_loader = ptb_loader(test_x , test_y, batch_size)
valid_x, valid_y = ptb_batcher(valid_data, seq_len, mask=True)
valid_loader = ptb_loader(valid_x , valid_y, batch_size)
return train_loader, test_loader, valid_loader, vocabulary_size
def save_checkpoint(state, folder='unnamed', is_best=False, filename='checkpoint.pth.tar'):
mkdirs(os.path.join('experiments', folder))
if is_best:
save_path = os.path.join('experiments', 'maskmle', folder, 'model_best.pth.tar')
print('saving best performing checkpoint..')
torch.save(state, save_path)
else:
save_path = os.path.join('experiments','maskmle', folder, filename)
torch.save(state, save_path)
def mkdirs(paths):
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def main():
mkdirs(os.path.join('experiments','maskmle', args.model_size))
train_loader, test_loader, valid_loader, vocabulary_size = create_ptb_loader(args.data_dir, args.batch_size, args.seq_len)
# Instantiate and init the model, and move it to the GPU
model= Seq2Seq(vocabulary_size, model_config)
if args.pretrained:
model.load_pretrained_weights(os.path.join('experiments','lm', args.model_size, 'model_best.pth.tar'))
else:
print('NO PRETRAINED LANGUAGE MODEL!!')
return
if CUDA_AVAIL:
model = model.cuda()
criterion=torch.nn.NLLLoss()
# Define optimizer
optimizer=torch.optim.SGD(model.parameters(),lr= model_config.learning_rate)
lr = model_config.learning_rate
best_val_loss=np.inf
for e in tqdm(range(model_config.max_max_epoch),desc='Epoch'):
model=train(train_loader,model,criterion,optimizer)
val_loss=eval(valid_loader,model,criterion)
state = {
'arch': "RnnLM",
'state_dict': model.state_dict(),
'optimizer' : optimizer.state_dict(),
}
if val_loss<best_val_loss:
best_val_loss=val_loss
save_checkpoint(state, folder=args.model_size, is_best=True)
else:
lr/= model_config.lr_decay
optimizer=torch.optim.SGD(model.parameters(),lr=lr)
save_checkpoint(state, folder=args.model_size)
# Test
test_loss= eval(test_loader,model,criterion)
# Report
msg='Epoch %d: \tValid loss=%.4f \tTest loss=%.4f \tTest perplexity=%.1f'%(e+1,val_loss,test_loss,np.exp(test_loss))
tqdm.write(msg)
def train(dataloader,model,criterion,optimizer):
# Set model to training mode (we're using dropout)
model.train()
# Get initial hidden and memory states
states=model.get_initial_states()
if CUDA_AVAIL: states = states.cuda()
# Loop sequence length (train)
for i, batch in enumerate(dataloader):
x, y = batch
if CUDA_AVAIL:
x = x.cuda()
y = y.cuda()
# Truncated backpropagation
states=model.detach(states) # Otherwise the model would try to backprop all the way to the start of the data set
# Forward pass
logits,states=model.forward(x.long(),states)
loss=criterion(logits.view(-1,model_config.vocab_size).float(),y.long().view(-1))
# Backward pass
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm(model.parameters(), model_config.max_grad_norm)
optimizer.step()
return model
def eval(dataloader,model,criterion):
# Set model to evaluation mode (we're using dropout)
model.eval()
# Get initial hidden and memory states
states=model.get_initial_states()
if CUDA_AVAIL: states = states.cuda()
# Loop sequence length (validation)
total_loss=0
num_loss=0
for i, batch in enumerate(dataloader):
x, y = batch
if CUDA_AVAIL:
x = x.cuda()
y = y.cuda()
# Truncated backpropagation
states=model.detach(states) # Otherwise the model would try to backprop all the way to the start of the data set
# Forward pass
logits,states=model.forward(x.long(),states)
loss=criterion(logits.view(-1,model_config.vocab_size).float(),y.long().view(-1))
# Log stuff
total_loss+=loss.data.cpu().numpy()
num_loss+=np.prod(y.size())
return float(total_loss)/float(num_loss)
if __name__ == '__main__':
# a = torch.rand((20,35)).long()
# h = (torch.zeros(2,20,650),
# torch.zeros(2,20,650))
# b = Seq2Seq(10000, model_config)
# y = b(a, h)
# print(y.shape)
# mask = generate_binary_mask(a)
main()