-
Notifications
You must be signed in to change notification settings - Fork 31
/
bi_rnn.py
278 lines (244 loc) · 13.3 KB
/
bi_rnn.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
__author__ = 'max'
import time
import sys
import argparse
from lasagne_nlp.utils import utils
from lasagne_nlp.utils.regularization import dima
import lasagne_nlp.utils.data_processor as data_processor
import theano.tensor as T
import theano
import lasagne
from lasagne_nlp.networks.networks import build_BiRNN
import lasagne.nonlinearities as nonlinearities
def main():
parser = argparse.ArgumentParser(description='Tuning with bi-directional RNN')
parser.add_argument('--fine_tune', action='store_true', help='Fine tune the word embeddings')
parser.add_argument('--embedding', choices=['word2vec', 'glove', 'senna'], help='Embedding for words',
required=True)
parser.add_argument('--embedding_dict', default='data/word2vec/GoogleNews-vectors-negative300.bin',
help='path for embedding dict')
parser.add_argument('--batch_size', type=int, default=10, help='Number of sentences in each batch')
parser.add_argument('--num_units', type=int, default=100, help='Number of hidden units in RNN')
parser.add_argument('--learning_rate', type=float, default=0.1, help='Learning rate')
parser.add_argument('--decay_rate', type=float, default=0.1, help='Decay rate of learning rate')
parser.add_argument('--grad_clipping', type=float, default=0, help='Gradient clipping')
parser.add_argument('--gamma', type=float, default=1e-6, help='weight for regularization')
parser.add_argument('--oov', choices=['random', 'embedding'], help='Embedding for oov word', required=True)
parser.add_argument('--update', choices=['sgd', 'momentum', 'nesterov'], help='update algorithm', default='sgd')
parser.add_argument('--regular', choices=['none', 'l2'], help='regularization for training',
required=True)
parser.add_argument('--dropout', action='store_true', help='Apply dropout layers')
parser.add_argument('--output_prediction', action='store_true', help='Output predictions to temp files')
parser.add_argument('--train') # "data/POS-penn/wsj/split1/wsj1.train.original"
parser.add_argument('--dev') # "data/POS-penn/wsj/split1/wsj1.dev.original"
parser.add_argument('--test') # "data/POS-penn/wsj/split1/wsj1.test.original"
args = parser.parse_args()
def construct_input_layer():
if fine_tune:
layer_input = lasagne.layers.InputLayer(shape=(None, max_length), input_var=input_var, name='input')
layer_embedding = lasagne.layers.EmbeddingLayer(layer_input, input_size=alphabet_size,
output_size=embedd_dim, W=embedd_table, name='embedding')
return layer_embedding
else:
layer_input = lasagne.layers.InputLayer(shape=(None, max_length, embedd_dim), input_var=input_var,
name='input')
return layer_input
logger = utils.get_logger("BiRNN")
fine_tune = args.fine_tune
oov = args.oov
regular = args.regular
embedding = args.embedding
embedding_path = args.embedding_dict
train_path = args.train
dev_path = args.dev
test_path = args.test
update_algo = args.update
grad_clipping = args.grad_clipping
gamma = args.gamma
output_predict = args.output_prediction
dropout = args.dropout
X_train, Y_train, mask_train, X_dev, Y_dev, mask_dev, X_test, Y_test, mask_test, \
embedd_table, label_alphabet, _, _, _, _ = data_processor.load_dataset_sequence_labeling(train_path, dev_path,
test_path, oov=oov,
fine_tune=fine_tune,
embedding=embedding,
embedding_path=embedding_path)
num_labels = label_alphabet.size() - 1
logger.info("constructing network...")
# create variables
target_var = T.imatrix(name='targets')
mask_var = T.matrix(name='masks', dtype=theano.config.floatX)
if fine_tune:
input_var = T.imatrix(name='inputs')
num_data, max_length = X_train.shape
alphabet_size, embedd_dim = embedd_table.shape
else:
input_var = T.tensor3(name='inputs', dtype=theano.config.floatX)
num_data, max_length, embedd_dim = X_train.shape
# construct input and mask layers
layer_incoming = construct_input_layer()
layer_mask = lasagne.layers.InputLayer(shape=(None, max_length), input_var=mask_var, name='mask')
# construct bi-rnn
num_units = args.num_units
bi_rnn = build_BiRNN(layer_incoming, num_units, mask=layer_mask, grad_clipping=grad_clipping,
dropout=dropout)
# reshape bi-rnn to [batch * max_length, num_units]
bi_rnn = lasagne.layers.reshape(bi_rnn, (-1, [2]))
# construct output layer (dense layer with softmax)
layer_output = lasagne.layers.DenseLayer(bi_rnn, num_units=num_labels, nonlinearity=nonlinearities.softmax,
name='softmax')
# get output of bi-rnn shape=[batch * max_length, #label]
prediction_train = lasagne.layers.get_output(layer_output)
prediction_eval = lasagne.layers.get_output(layer_output, deterministic=True)
final_prediction = T.argmax(prediction_eval, axis=1)
# flat target_var to vector
target_var_flatten = target_var.flatten()
# flat mask_var to vector
mask_var_flatten = mask_var.flatten()
# compute loss
num_loss = mask_var_flatten.sum(dtype=theano.config.floatX)
# for training, we use mean of loss over number of labels
loss_train = lasagne.objectives.categorical_crossentropy(prediction_train, target_var_flatten)
loss_train = (loss_train * mask_var_flatten).sum(dtype=theano.config.floatX) / num_loss
############################################
# l2 regularization?
if regular == 'l2':
l2_penalty = lasagne.regularization.regularize_network_params(layer_output, lasagne.regularization.l2)
loss_train = loss_train + gamma * l2_penalty
# dima regularization?
# if regular == 'dima':
# params_regular = utils.get_all_params_by_name(layer_output, name=['forward.hidden_to_hidden.W',
# 'backward.hidden_to_hidden.W'])
# dima_penalty = lasagne.regularization.apply_penalty(params_regular, dima)
# loss_train = loss_train + gamma * dima_penalty
loss_eval = lasagne.objectives.categorical_crossentropy(prediction_eval, target_var_flatten)
loss_eval = (loss_eval * mask_var_flatten).sum(dtype=theano.config.floatX) / num_loss
# compute number of correct labels
corr_train = lasagne.objectives.categorical_accuracy(prediction_train, target_var_flatten)
corr_train = (corr_train * mask_var_flatten).sum(dtype=theano.config.floatX)
corr_eval = lasagne.objectives.categorical_accuracy(prediction_eval, target_var_flatten)
corr_eval = (corr_eval * mask_var_flatten).sum(dtype=theano.config.floatX)
# Create update expressions for training.
# hyper parameters to tune: learning rate, momentum, regularization.
batch_size = args.batch_size
learning_rate = args.learning_rate
decay_rate = args.decay_rate
momentum = 0.9
params = lasagne.layers.get_all_params(layer_output, trainable=True)
updates = utils.create_updates(loss_train, params, update_algo, learning_rate, momentum=momentum)
# Compile a function performing a training step on a mini-batch
train_fn = theano.function([input_var, target_var, mask_var], [loss_train, corr_train, num_loss], updates=updates)
# Compile a second function evaluating the loss and accuracy of network
eval_fn = theano.function([input_var, target_var, mask_var], [loss_eval, corr_eval, num_loss, final_prediction])
# Finally, launch the training loop.
logger.info(
"Start training: %s with regularization: %s(%f), dropout: %s, fine tune: %s (#training data: %d, batch size: %d, clip: %.1f)..." \
% (
update_algo, regular, (0.0 if regular == 'none' else gamma), dropout, fine_tune, num_data, batch_size, grad_clipping))
num_batches = num_data / batch_size
num_epochs = 1000
best_loss = 1e+12
best_acc = 0.0
best_epoch_loss = 0
best_epoch_acc = 0
best_loss_test_err = 0.
best_loss_test_corr = 0.
best_acc_test_err = 0.
best_acc_test_corr = 0.
stop_count = 0
lr = learning_rate
patience = 5
for epoch in range(1, num_epochs + 1):
print 'Epoch %d (learning rate=%.4f, decay rate=%.4f): ' % (epoch, lr, decay_rate)
train_err = 0.0
train_corr = 0.0
train_total = 0
start_time = time.time()
num_back = 0
train_batches = 0
for batch in utils.iterate_minibatches(X_train, Y_train, masks=mask_train, batch_size=batch_size, shuffle=True):
inputs, targets, masks, _ = batch
err, corr, num = train_fn(inputs, targets, masks)
train_err += err * num
train_corr += corr
train_total += num
train_batches += 1
time_ave = (time.time() - start_time) / train_batches
time_left = (num_batches - train_batches) * time_ave
# update log
sys.stdout.write("\b" * num_back)
log_info = 'train: %d/%d loss: %.4f, acc: %.2f%%, time left (estimated): %.2fs' % (
min(train_batches * batch_size, num_data), num_data,
train_err / train_total, train_corr * 100 / train_total, time_left)
sys.stdout.write(log_info)
num_back = len(log_info)
# update training log after each epoch
sys.stdout.write("\b" * num_back)
print 'train: %d/%d loss: %.4f, acc: %.2f%%, time: %.2fs' % (
min(train_batches * batch_size, num_data), num_data,
train_err / train_total, train_corr * 100 / train_total, time.time() - start_time)
# evaluate performance on dev data
dev_err = 0.0
dev_corr = 0.0
dev_total = 0
for batch in utils.iterate_minibatches(X_dev, Y_dev, masks=mask_dev, batch_size=batch_size):
inputs, targets, masks, _ = batch
err, corr, num, predictions = eval_fn(inputs, targets, masks)
dev_err += err * num
dev_corr += corr
dev_total += num
if output_predict:
utils.output_predictions(predictions, targets, masks, 'tmp/dev%d' % epoch, label_alphabet)
print 'dev loss: %.4f, corr: %d, total: %d, acc: %.2f%%' % (
dev_err / dev_total, dev_corr, dev_total, dev_corr * 100 / dev_total)
if best_loss < dev_err and best_acc > dev_corr / dev_total:
stop_count += 1
else:
update_loss = False
update_acc = False
stop_count = 0
if best_loss > dev_err:
update_loss = True
best_loss = dev_err
best_epoch_loss = epoch
if best_acc < dev_corr / dev_total:
update_acc = True
best_acc = dev_corr / dev_total
best_epoch_acc = epoch
# evaluate on test data when better performance detected
test_err = 0.0
test_corr = 0.0
test_total = 0
for batch in utils.iterate_minibatches(X_test, Y_test, masks=mask_test, batch_size=batch_size):
inputs, targets, masks, _ = batch
err, corr, num, predictions = eval_fn(inputs, targets, masks)
test_err += err * num
test_corr += corr
test_total += num
if output_predict:
utils.output_predictions(predictions, targets, masks, 'tmp/test%d' % epoch, label_alphabet)
print 'test loss: %.4f, corr: %d, total: %d, acc: %.2f%%' % (
test_err / test_total, test_corr, test_total, test_corr * 100 / test_total)
if update_loss:
best_loss_test_err = test_err
best_loss_test_corr = test_corr
if update_acc:
best_acc_test_err = test_err
best_acc_test_corr = test_corr
# stop if dev acc decrease 3 time straightly.
if stop_count == patience:
break
# re-compile a function with new learning rate for training
lr = learning_rate / (1.0 + epoch * decay_rate)
updates = utils.create_updates(loss_train, params, update_algo, lr, momentum=momentum)
train_fn = theano.function([input_var, target_var, mask_var], [loss_train, corr_train, num_loss],
updates=updates)
# print best performance on test data.
logger.info("final best loss test performance (at epoch %d)" % best_epoch_loss)
print 'test loss: %.4f, corr: %d, total: %d, acc: %.2f%%' % (
best_loss_test_err / test_total, best_loss_test_corr, test_total, best_loss_test_corr * 100 / test_total)
logger.info("final best acc test performance (at epoch %d)" % best_epoch_acc)
print 'test loss: %.4f, corr: %d, total: %d, acc: %.2f%%' % (
best_acc_test_err / test_total, best_acc_test_corr, test_total, best_acc_test_corr * 100 / test_total)
if __name__ == '__main__':
main()