/
rnn_cell.py
3394 lines (2883 loc) · 127 KB
/
rnn_cell.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for constructing RNN Cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
from tensorflow.contrib.compiler import jit
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.rnn.python.ops import core_rnn_cell
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base as base_layer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_impl # pylint: disable=unused-import
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables # pylint: disable=unused-import
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
def _get_concat_variable(name, shape, dtype, num_shards):
"""Get a sharded variable concatenated into one tensor."""
sharded_variable = _get_sharded_variable(name, shape, dtype, num_shards)
if len(sharded_variable) == 1:
return sharded_variable[0]
concat_name = name + "/concat"
concat_full_name = vs.get_variable_scope().name + "/" + concat_name + ":0"
for value in ops.get_collection(ops.GraphKeys.CONCATENATED_VARIABLES):
if value.name == concat_full_name:
return value
concat_variable = array_ops.concat(sharded_variable, 0, name=concat_name)
ops.add_to_collection(ops.GraphKeys.CONCATENATED_VARIABLES, concat_variable)
return concat_variable
def _get_sharded_variable(name, shape, dtype, num_shards):
"""Get a list of sharded variables with the given dtype."""
if num_shards > shape[0]:
raise ValueError("Too many shards: shape=%s, num_shards=%d" % (shape,
num_shards))
unit_shard_size = int(math.floor(shape[0] / num_shards))
remaining_rows = shape[0] - unit_shard_size * num_shards
shards = []
for i in range(num_shards):
current_size = unit_shard_size
if i < remaining_rows:
current_size += 1
shards.append(
vs.get_variable(
name + "_%d" % i, [current_size] + shape[1:], dtype=dtype))
return shards
def _norm(g, b, inp, scope):
shape = inp.get_shape()[-1:]
gamma_init = init_ops.constant_initializer(g)
beta_init = init_ops.constant_initializer(b)
with vs.variable_scope(scope):
# Initialize beta and gamma for use by layer_norm.
vs.get_variable("gamma", shape=shape, initializer=gamma_init)
vs.get_variable("beta", shape=shape, initializer=beta_init)
normalized = layers.layer_norm(inp, reuse=True, scope=scope)
return normalized
class CoupledInputForgetGateLSTMCell(rnn_cell_impl.RNNCell):
"""Long short-term memory unit (LSTM) recurrent network cell.
The default non-peephole implementation is based on:
http://www.bioinf.jku.at/publications/older/2604.pdf
S. Hochreiter and J. Schmidhuber.
"Long Short-Term Memory". Neural Computation, 9(8):1735-1780, 1997.
The peephole implementation is based on:
https://research.google.com/pubs/archive/43905.pdf
Hasim Sak, Andrew Senior, and Francoise Beaufays.
"Long short-term memory recurrent neural network architectures for
large scale acoustic modeling." INTERSPEECH, 2014.
The coupling of input and forget gate is based on:
http://arxiv.org/pdf/1503.04069.pdf
Greff et al. "LSTM: A Search Space Odyssey"
The class uses optional peep-hole connections, and an optional projection
layer.
Layer normalization implementation is based on:
https://arxiv.org/abs/1607.06450.
"Layer Normalization"
Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton
and is applied before the internal nonlinearities.
"""
def __init__(self,
num_units,
use_peepholes=False,
initializer=None,
num_proj=None,
proj_clip=None,
num_unit_shards=1,
num_proj_shards=1,
forget_bias=1.0,
state_is_tuple=True,
activation=math_ops.tanh,
reuse=None,
layer_norm=False,
norm_gain=1.0,
norm_shift=0.0):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: bool, set True to enable diagonal/peephole connections.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is
provided, then the projected values are clipped elementwise to within
`[-proj_clip, proj_clip]`.
num_unit_shards: How to split the weight matrix. If >1, the weight
matrix is stored across num_unit_shards.
num_proj_shards: How to split the projection matrix. If >1, the
projection matrix is stored across num_proj_shards.
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of
the training.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. By default (False), they are concatenated
along the column axis. This default behavior will soon be deprecated.
activation: Activation function of the inner states.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
layer_norm: If `True`, layer normalization will be applied.
norm_gain: float, The layer normalization gain initial value. If
`layer_norm` has been set to `False`, this argument will be ignored.
norm_shift: float, The layer normalization shift initial value. If
`layer_norm` has been set to `False`, this argument will be ignored.
"""
super(CoupledInputForgetGateLSTMCell, self).__init__(_reuse=reuse)
if not state_is_tuple:
logging.warn("%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._initializer = initializer
self._num_proj = num_proj
self._proj_clip = proj_clip
self._num_unit_shards = num_unit_shards
self._num_proj_shards = num_proj_shards
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self._activation = activation
self._reuse = reuse
self._layer_norm = layer_norm
self._norm_gain = norm_gain
self._norm_shift = norm_shift
if num_proj:
self._state_size = (
rnn_cell_impl.LSTMStateTuple(num_units, num_proj)
if state_is_tuple else num_units + num_proj)
self._output_size = num_proj
else:
self._state_size = (
rnn_cell_impl.LSTMStateTuple(num_units, num_units)
if state_is_tuple else 2 * num_units)
self._output_size = num_units
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: if `state_is_tuple` is False, this must be a state Tensor,
`2-D, batch x state_size`. If `state_is_tuple` is True, this must be a
tuple of state Tensors, both `2-D`, with column sizes `c_state` and
`m_state`.
Returns:
A tuple containing:
- A `2-D, [batch x output_dim]`, Tensor representing the output of the
LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of LSTM after reading `inputs` when
the previous state was `state`. Same type and shape(s) as `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
sigmoid = math_ops.sigmoid
num_proj = self._num_units if self._num_proj is None else self._num_proj
if self._state_is_tuple:
(c_prev, m_prev) = state
else:
c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units])
m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj])
dtype = inputs.dtype
input_size = inputs.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
concat_w = _get_concat_variable(
"W", [input_size.value + num_proj, 3 * self._num_units], dtype,
self._num_unit_shards)
b = vs.get_variable(
"B",
shape=[3 * self._num_units],
initializer=init_ops.zeros_initializer(),
dtype=dtype)
# j = new_input, f = forget_gate, o = output_gate
cell_inputs = array_ops.concat([inputs, m_prev], 1)
lstm_matrix = math_ops.matmul(cell_inputs, concat_w)
# If layer nomalization is applied, do not add bias
if not self._layer_norm:
lstm_matrix = nn_ops.bias_add(lstm_matrix, b)
j, f, o = array_ops.split(value=lstm_matrix, num_or_size_splits=3, axis=1)
# Apply layer normalization
if self._layer_norm:
j = _norm(self._norm_gain, self._norm_shift, j, "transform")
f = _norm(self._norm_gain, self._norm_shift, f, "forget")
o = _norm(self._norm_gain, self._norm_shift, o, "output")
# Diagonal connections
if self._use_peepholes:
w_f_diag = vs.get_variable(
"W_F_diag", shape=[self._num_units], dtype=dtype)
w_o_diag = vs.get_variable(
"W_O_diag", shape=[self._num_units], dtype=dtype)
if self._use_peepholes:
f_act = sigmoid(f + self._forget_bias + w_f_diag * c_prev)
else:
f_act = sigmoid(f + self._forget_bias)
c = (f_act * c_prev + (1 - f_act) * self._activation(j))
# Apply layer normalization
if self._layer_norm:
c = _norm(self._norm_gain, self._norm_shift, c, "state")
if self._use_peepholes:
m = sigmoid(o + w_o_diag * c) * self._activation(c)
else:
m = sigmoid(o) * self._activation(c)
if self._num_proj is not None:
concat_w_proj = _get_concat_variable("W_P",
[self._num_units, self._num_proj],
dtype, self._num_proj_shards)
m = math_ops.matmul(m, concat_w_proj)
if self._proj_clip is not None:
# pylint: disable=invalid-unary-operand-type
m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)
# pylint: enable=invalid-unary-operand-type
new_state = (
rnn_cell_impl.LSTMStateTuple(c, m)
if self._state_is_tuple else array_ops.concat([c, m], 1))
return m, new_state
class TimeFreqLSTMCell(rnn_cell_impl.RNNCell):
"""Time-Frequency Long short-term memory unit (LSTM) recurrent network cell.
This implementation is based on:
Tara N. Sainath and Bo Li
"Modeling Time-Frequency Patterns with LSTM vs. Convolutional Architectures
for LVCSR Tasks." submitted to INTERSPEECH, 2016.
It uses peep-hole connections and optional cell clipping.
"""
def __init__(self,
num_units,
use_peepholes=False,
cell_clip=None,
initializer=None,
num_unit_shards=1,
forget_bias=1.0,
feature_size=None,
frequency_skip=1,
reuse=None):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: bool, set True to enable diagonal/peephole connections.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_unit_shards: int, How to split the weight matrix. If >1, the weight
matrix is stored across num_unit_shards.
forget_bias: float, Biases of the forget gate are initialized by default
to 1 in order to reduce the scale of forgetting at the beginning
of the training.
feature_size: int, The size of the input feature the LSTM spans over.
frequency_skip: int, The amount the LSTM filter is shifted by in
frequency.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(TimeFreqLSTMCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._cell_clip = cell_clip
self._initializer = initializer
self._num_unit_shards = num_unit_shards
self._forget_bias = forget_bias
self._feature_size = feature_size
self._frequency_skip = frequency_skip
self._state_size = 2 * num_units
self._output_size = num_units
self._reuse = reuse
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: state Tensor, 2D, batch x state_size.
Returns:
A tuple containing:
- A 2D, batch x output_dim, Tensor representing the output of the LSTM
after reading "inputs" when previous state was "state".
Here output_dim is num_units.
- A 2D, batch x state_size, Tensor representing the new state of LSTM
after reading "inputs" when previous state was "state".
Raises:
ValueError: if an input_size was specified and the provided inputs have
a different dimension.
"""
sigmoid = math_ops.sigmoid
tanh = math_ops.tanh
freq_inputs = self._make_tf_features(inputs)
dtype = inputs.dtype
actual_input_size = freq_inputs[0].get_shape().as_list()[1]
concat_w = _get_concat_variable(
"W", [actual_input_size + 2 * self._num_units, 4 * self._num_units],
dtype, self._num_unit_shards)
b = vs.get_variable(
"B",
shape=[4 * self._num_units],
initializer=init_ops.zeros_initializer(),
dtype=dtype)
# Diagonal connections
if self._use_peepholes:
w_f_diag = vs.get_variable(
"W_F_diag", shape=[self._num_units], dtype=dtype)
w_i_diag = vs.get_variable(
"W_I_diag", shape=[self._num_units], dtype=dtype)
w_o_diag = vs.get_variable(
"W_O_diag", shape=[self._num_units], dtype=dtype)
# initialize the first freq state to be zero
m_prev_freq = array_ops.zeros(
[inputs.shape[0].value or inputs.get_shape()[0], self._num_units],
dtype)
for fq in range(len(freq_inputs)):
c_prev = array_ops.slice(state, [0, 2 * fq * self._num_units],
[-1, self._num_units])
m_prev = array_ops.slice(state, [0, (2 * fq + 1) * self._num_units],
[-1, self._num_units])
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
cell_inputs = array_ops.concat([freq_inputs[fq], m_prev, m_prev_freq], 1)
lstm_matrix = nn_ops.bias_add(math_ops.matmul(cell_inputs, concat_w), b)
i, j, f, o = array_ops.split(
value=lstm_matrix, num_or_size_splits=4, axis=1)
if self._use_peepholes:
c = (
sigmoid(f + self._forget_bias + w_f_diag * c_prev) * c_prev +
sigmoid(i + w_i_diag * c_prev) * tanh(j))
else:
c = (sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) * tanh(j))
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)
# pylint: enable=invalid-unary-operand-type
if self._use_peepholes:
m = sigmoid(o + w_o_diag * c) * tanh(c)
else:
m = sigmoid(o) * tanh(c)
m_prev_freq = m
if fq == 0:
state_out = array_ops.concat([c, m], 1)
m_out = m
else:
state_out = array_ops.concat([state_out, c, m], 1)
m_out = array_ops.concat([m_out, m], 1)
return m_out, state_out
def _make_tf_features(self, input_feat):
"""Make the frequency features.
Args:
input_feat: input Tensor, 2D, batch x num_units.
Returns:
A list of frequency features, with each element containing:
- A 2D, batch x output_dim, Tensor representing the time-frequency feature
for that frequency index. Here output_dim is feature_size.
Raises:
ValueError: if input_size cannot be inferred from static shape inference.
"""
input_size = input_feat.get_shape().with_rank(2)[-1].value
if input_size is None:
raise ValueError("Cannot infer input_size from static shape inference.")
num_feats = int(
(input_size - self._feature_size) / (self._frequency_skip)) + 1
freq_inputs = []
for f in range(num_feats):
cur_input = array_ops.slice(input_feat, [0, f * self._frequency_skip],
[-1, self._feature_size])
freq_inputs.append(cur_input)
return freq_inputs
class GridLSTMCell(rnn_cell_impl.RNNCell):
"""Grid Long short-term memory unit (LSTM) recurrent network cell.
The default is based on:
Nal Kalchbrenner, Ivo Danihelka and Alex Graves
"Grid Long Short-Term Memory," Proc. ICLR 2016.
http://arxiv.org/abs/1507.01526
When peephole connections are used, the implementation is based on:
Tara N. Sainath and Bo Li
"Modeling Time-Frequency Patterns with LSTM vs. Convolutional Architectures
for LVCSR Tasks." submitted to INTERSPEECH, 2016.
The code uses optional peephole connections, shared_weights and cell clipping.
"""
def __init__(self,
num_units,
use_peepholes=False,
share_time_frequency_weights=False,
cell_clip=None,
initializer=None,
num_unit_shards=1,
forget_bias=1.0,
feature_size=None,
frequency_skip=None,
num_frequency_blocks=None,
start_freqindex_list=None,
end_freqindex_list=None,
couple_input_forget_gates=False,
state_is_tuple=True,
reuse=None):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: (optional) bool, default False. Set True to enable
diagonal/peephole connections.
share_time_frequency_weights: (optional) bool, default False. Set True to
enable shared cell weights between time and frequency LSTMs.
cell_clip: (optional) A float value, default None, if provided the cell
state is clipped by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices, default None.
num_unit_shards: (optional) int, default 1, How to split the weight
matrix. If > 1, the weight matrix is stored across num_unit_shards.
forget_bias: (optional) float, default 1.0, The initial bias of the
forget gates, used to reduce the scale of forgetting at the beginning
of the training.
feature_size: (optional) int, default None, The size of the input feature
the LSTM spans over.
frequency_skip: (optional) int, default None, The amount the LSTM filter
is shifted by in frequency.
num_frequency_blocks: [required] A list of frequency blocks needed to
cover the whole input feature splitting defined by start_freqindex_list
and end_freqindex_list.
start_freqindex_list: [optional], list of ints, default None, The
starting frequency index for each frequency block.
end_freqindex_list: [optional], list of ints, default None. The ending
frequency index for each frequency block.
couple_input_forget_gates: (optional) bool, default False, Whether to
couple the input and forget gates, i.e. f_gate = 1.0 - i_gate, to reduce
model parameters and computation cost.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. By default (False), they are concatenated
along the column axis. This default behavior will soon be deprecated.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
Raises:
ValueError: if the num_frequency_blocks list is not specified
"""
super(GridLSTMCell, self).__init__(_reuse=reuse)
if not state_is_tuple:
logging.warn("%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._share_time_frequency_weights = share_time_frequency_weights
self._couple_input_forget_gates = couple_input_forget_gates
self._state_is_tuple = state_is_tuple
self._cell_clip = cell_clip
self._initializer = initializer
self._num_unit_shards = num_unit_shards
self._forget_bias = forget_bias
self._feature_size = feature_size
self._frequency_skip = frequency_skip
self._start_freqindex_list = start_freqindex_list
self._end_freqindex_list = end_freqindex_list
self._num_frequency_blocks = num_frequency_blocks
self._total_blocks = 0
self._reuse = reuse
if self._num_frequency_blocks is None:
raise ValueError("Must specify num_frequency_blocks")
for block_index in range(len(self._num_frequency_blocks)):
self._total_blocks += int(self._num_frequency_blocks[block_index])
if state_is_tuple:
state_names = ""
for block_index in range(len(self._num_frequency_blocks)):
for freq_index in range(self._num_frequency_blocks[block_index]):
name_prefix = "state_f%02d_b%02d" % (freq_index, block_index)
state_names += ("%s_c, %s_m," % (name_prefix, name_prefix))
self._state_tuple_type = collections.namedtuple("GridLSTMStateTuple",
state_names.strip(","))
self._state_size = self._state_tuple_type(*(
[num_units, num_units] * self._total_blocks))
else:
self._state_tuple_type = None
self._state_size = num_units * self._total_blocks * 2
self._output_size = num_units * self._total_blocks * 2
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
@property
def state_tuple_type(self):
return self._state_tuple_type
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, [batch, feature_size].
state: Tensor or tuple of Tensors, 2D, [batch, state_size], depends on the
flag self._state_is_tuple.
Returns:
A tuple containing:
- A 2D, [batch, output_dim], Tensor representing the output of the LSTM
after reading "inputs" when previous state was "state".
Here output_dim is num_units.
- A 2D, [batch, state_size], Tensor representing the new state of LSTM
after reading "inputs" when previous state was "state".
Raises:
ValueError: if an input_size was specified and the provided inputs have
a different dimension.
"""
batch_size = inputs.shape[0].value or array_ops.shape(inputs)[0]
freq_inputs = self._make_tf_features(inputs)
m_out_lst = []
state_out_lst = []
for block in range(len(freq_inputs)):
m_out_lst_current, state_out_lst_current = self._compute(
freq_inputs[block],
block,
state,
batch_size,
state_is_tuple=self._state_is_tuple)
m_out_lst.extend(m_out_lst_current)
state_out_lst.extend(state_out_lst_current)
if self._state_is_tuple:
state_out = self._state_tuple_type(*state_out_lst)
else:
state_out = array_ops.concat(state_out_lst, 1)
m_out = array_ops.concat(m_out_lst, 1)
return m_out, state_out
def _compute(self,
freq_inputs,
block,
state,
batch_size,
state_prefix="state",
state_is_tuple=True):
"""Run the actual computation of one step LSTM.
Args:
freq_inputs: list of Tensors, 2D, [batch, feature_size].
block: int, current frequency block index to process.
state: Tensor or tuple of Tensors, 2D, [batch, state_size], it depends on
the flag state_is_tuple.
batch_size: int32, batch size.
state_prefix: (optional) string, name prefix for states, defaults to
"state".
state_is_tuple: boolean, indicates whether the state is a tuple or Tensor.
Returns:
A tuple, containing:
- A list of [batch, output_dim] Tensors, representing the output of the
LSTM given the inputs and state.
- A list of [batch, state_size] Tensors, representing the LSTM state
values given the inputs and previous state.
"""
sigmoid = math_ops.sigmoid
tanh = math_ops.tanh
num_gates = 3 if self._couple_input_forget_gates else 4
dtype = freq_inputs[0].dtype
actual_input_size = freq_inputs[0].get_shape().as_list()[1]
concat_w_f = _get_concat_variable(
"W_f_%d" % block,
[actual_input_size + 2 * self._num_units, num_gates * self._num_units],
dtype, self._num_unit_shards)
b_f = vs.get_variable(
"B_f_%d" % block,
shape=[num_gates * self._num_units],
initializer=init_ops.zeros_initializer(),
dtype=dtype)
if not self._share_time_frequency_weights:
concat_w_t = _get_concat_variable("W_t_%d" % block, [
actual_input_size + 2 * self._num_units, num_gates * self._num_units
], dtype, self._num_unit_shards)
b_t = vs.get_variable(
"B_t_%d" % block,
shape=[num_gates * self._num_units],
initializer=init_ops.zeros_initializer(),
dtype=dtype)
if self._use_peepholes:
# Diagonal connections
if not self._couple_input_forget_gates:
w_f_diag_freqf = vs.get_variable(
"W_F_diag_freqf_%d" % block, shape=[self._num_units], dtype=dtype)
w_f_diag_freqt = vs.get_variable(
"W_F_diag_freqt_%d" % block, shape=[self._num_units], dtype=dtype)
w_i_diag_freqf = vs.get_variable(
"W_I_diag_freqf_%d" % block, shape=[self._num_units], dtype=dtype)
w_i_diag_freqt = vs.get_variable(
"W_I_diag_freqt_%d" % block, shape=[self._num_units], dtype=dtype)
w_o_diag_freqf = vs.get_variable(
"W_O_diag_freqf_%d" % block, shape=[self._num_units], dtype=dtype)
w_o_diag_freqt = vs.get_variable(
"W_O_diag_freqt_%d" % block, shape=[self._num_units], dtype=dtype)
if not self._share_time_frequency_weights:
if not self._couple_input_forget_gates:
w_f_diag_timef = vs.get_variable(
"W_F_diag_timef_%d" % block, shape=[self._num_units], dtype=dtype)
w_f_diag_timet = vs.get_variable(
"W_F_diag_timet_%d" % block, shape=[self._num_units], dtype=dtype)
w_i_diag_timef = vs.get_variable(
"W_I_diag_timef_%d" % block, shape=[self._num_units], dtype=dtype)
w_i_diag_timet = vs.get_variable(
"W_I_diag_timet_%d" % block, shape=[self._num_units], dtype=dtype)
w_o_diag_timef = vs.get_variable(
"W_O_diag_timef_%d" % block, shape=[self._num_units], dtype=dtype)
w_o_diag_timet = vs.get_variable(
"W_O_diag_timet_%d" % block, shape=[self._num_units], dtype=dtype)
# initialize the first freq state to be zero
m_prev_freq = array_ops.zeros([batch_size, self._num_units], dtype)
c_prev_freq = array_ops.zeros([batch_size, self._num_units], dtype)
for freq_index in range(len(freq_inputs)):
if state_is_tuple:
name_prefix = "%s_f%02d_b%02d" % (state_prefix, freq_index, block)
c_prev_time = getattr(state, name_prefix + "_c")
m_prev_time = getattr(state, name_prefix + "_m")
else:
c_prev_time = array_ops.slice(
state, [0, 2 * freq_index * self._num_units], [-1, self._num_units])
m_prev_time = array_ops.slice(
state, [0, (2 * freq_index + 1) * self._num_units],
[-1, self._num_units])
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
cell_inputs = array_ops.concat(
[freq_inputs[freq_index], m_prev_time, m_prev_freq], 1)
# F-LSTM
lstm_matrix_freq = nn_ops.bias_add(
math_ops.matmul(cell_inputs, concat_w_f), b_f)
if self._couple_input_forget_gates:
i_freq, j_freq, o_freq = array_ops.split(
value=lstm_matrix_freq, num_or_size_splits=num_gates, axis=1)
f_freq = None
else:
i_freq, j_freq, f_freq, o_freq = array_ops.split(
value=lstm_matrix_freq, num_or_size_splits=num_gates, axis=1)
# T-LSTM
if self._share_time_frequency_weights:
i_time = i_freq
j_time = j_freq
f_time = f_freq
o_time = o_freq
else:
lstm_matrix_time = nn_ops.bias_add(
math_ops.matmul(cell_inputs, concat_w_t), b_t)
if self._couple_input_forget_gates:
i_time, j_time, o_time = array_ops.split(
value=lstm_matrix_time, num_or_size_splits=num_gates, axis=1)
f_time = None
else:
i_time, j_time, f_time, o_time = array_ops.split(
value=lstm_matrix_time, num_or_size_splits=num_gates, axis=1)
# F-LSTM c_freq
# input gate activations
if self._use_peepholes:
i_freq_g = sigmoid(i_freq + w_i_diag_freqf * c_prev_freq +
w_i_diag_freqt * c_prev_time)
else:
i_freq_g = sigmoid(i_freq)
# forget gate activations
if self._couple_input_forget_gates:
f_freq_g = 1.0 - i_freq_g
else:
if self._use_peepholes:
f_freq_g = sigmoid(f_freq + self._forget_bias + w_f_diag_freqf *
c_prev_freq + w_f_diag_freqt * c_prev_time)
else:
f_freq_g = sigmoid(f_freq + self._forget_bias)
# cell state
c_freq = f_freq_g * c_prev_freq + i_freq_g * tanh(j_freq)
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c_freq = clip_ops.clip_by_value(c_freq, -self._cell_clip,
self._cell_clip)
# pylint: enable=invalid-unary-operand-type
# T-LSTM c_freq
# input gate activations
if self._use_peepholes:
if self._share_time_frequency_weights:
i_time_g = sigmoid(i_time + w_i_diag_freqf * c_prev_freq +
w_i_diag_freqt * c_prev_time)
else:
i_time_g = sigmoid(i_time + w_i_diag_timef * c_prev_freq +
w_i_diag_timet * c_prev_time)
else:
i_time_g = sigmoid(i_time)
# forget gate activations
if self._couple_input_forget_gates:
f_time_g = 1.0 - i_time_g
else:
if self._use_peepholes:
if self._share_time_frequency_weights:
f_time_g = sigmoid(f_time + self._forget_bias + w_f_diag_freqf *
c_prev_freq + w_f_diag_freqt * c_prev_time)
else:
f_time_g = sigmoid(f_time + self._forget_bias + w_f_diag_timef *
c_prev_freq + w_f_diag_timet * c_prev_time)
else:
f_time_g = sigmoid(f_time + self._forget_bias)
# cell state
c_time = f_time_g * c_prev_time + i_time_g * tanh(j_time)
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c_time = clip_ops.clip_by_value(c_time, -self._cell_clip,
self._cell_clip)
# pylint: enable=invalid-unary-operand-type
# F-LSTM m_freq
if self._use_peepholes:
m_freq = sigmoid(o_freq + w_o_diag_freqf * c_freq +
w_o_diag_freqt * c_time) * tanh(c_freq)
else:
m_freq = sigmoid(o_freq) * tanh(c_freq)
# T-LSTM m_time
if self._use_peepholes:
if self._share_time_frequency_weights:
m_time = sigmoid(o_time + w_o_diag_freqf * c_freq +
w_o_diag_freqt * c_time) * tanh(c_time)
else:
m_time = sigmoid(o_time + w_o_diag_timef * c_freq +
w_o_diag_timet * c_time) * tanh(c_time)
else:
m_time = sigmoid(o_time) * tanh(c_time)
m_prev_freq = m_freq
c_prev_freq = c_freq
# Concatenate the outputs for T-LSTM and F-LSTM for each shift
if freq_index == 0:
state_out_lst = [c_time, m_time]
m_out_lst = [m_time, m_freq]
else:
state_out_lst.extend([c_time, m_time])
m_out_lst.extend([m_time, m_freq])
return m_out_lst, state_out_lst
def _make_tf_features(self, input_feat, slice_offset=0):
"""Make the frequency features.
Args:
input_feat: input Tensor, 2D, [batch, num_units].
slice_offset: (optional) Python int, default 0, the slicing offset is only
used for the backward processing in the BidirectionalGridLSTMCell. It
specifies a different starting point instead of always 0 to enable the
forward and backward processing look at different frequency blocks.
Returns:
A list of frequency features, with each element containing:
- A 2D, [batch, output_dim], Tensor representing the time-frequency
feature for that frequency index. Here output_dim is feature_size.
Raises:
ValueError: if input_size cannot be inferred from static shape inference.
"""
input_size = input_feat.get_shape().with_rank(2)[-1].value
if input_size is None:
raise ValueError("Cannot infer input_size from static shape inference.")
if slice_offset > 0:
# Padding to the end
inputs = array_ops.pad(input_feat,
array_ops.constant(
[0, 0, 0, slice_offset],
shape=[2, 2],
dtype=dtypes.int32), "CONSTANT")
elif slice_offset < 0:
# Padding to the front
inputs = array_ops.pad(input_feat,
array_ops.constant(
[0, 0, -slice_offset, 0],
shape=[2, 2],
dtype=dtypes.int32), "CONSTANT")
slice_offset = 0
else:
inputs = input_feat
freq_inputs = []
if not self._start_freqindex_list:
if len(self._num_frequency_blocks) != 1:
raise ValueError("Length of num_frequency_blocks"
" is not 1, but instead is %d",
len(self._num_frequency_blocks))
num_feats = int(
(input_size - self._feature_size) / (self._frequency_skip)) + 1
if num_feats != self._num_frequency_blocks[0]:
raise ValueError(
"Invalid num_frequency_blocks, requires %d but gets %d, please"
" check the input size and filter config are correct." %
(self._num_frequency_blocks[0], num_feats))
block_inputs = []
for f in range(num_feats):
cur_input = array_ops.slice(
inputs, [0, slice_offset + f * self._frequency_skip],
[-1, self._feature_size])
block_inputs.append(cur_input)
freq_inputs.append(block_inputs)
else:
if len(self._start_freqindex_list) != len(self._end_freqindex_list):
raise ValueError("Length of start and end freqindex_list"
" does not match %d %d",
len(self._start_freqindex_list),
len(self._end_freqindex_list))
if len(self._num_frequency_blocks) != len(self._start_freqindex_list):
raise ValueError("Length of num_frequency_blocks"
" is not equal to start_freqindex_list %d %d",
len(self._num_frequency_blocks),
len(self._start_freqindex_list))
for b in range(len(self._start_freqindex_list)):
start_index = self._start_freqindex_list[b]
end_index = self._end_freqindex_list[b]
cur_size = end_index - start_index
block_feats = int(
(cur_size - self._feature_size) / (self._frequency_skip)) + 1
if block_feats != self._num_frequency_blocks[b]:
raise ValueError(
"Invalid num_frequency_blocks, requires %d but gets %d, please"
" check the input size and filter config are correct." %
(self._num_frequency_blocks[b], block_feats))
block_inputs = []
for f in range(block_feats):
cur_input = array_ops.slice(
inputs,
[0, start_index + slice_offset + f * self._frequency_skip],
[-1, self._feature_size])
block_inputs.append(cur_input)
freq_inputs.append(block_inputs)
return freq_inputs
class BidirectionalGridLSTMCell(GridLSTMCell):
"""Bidirectional GridLstm cell.
The bidirection connection is only used in the frequency direction, which
hence doesn't affect the time direction's real-time processing that is
required for online recognition systems.
The current implementation uses different weights for the two directions.
"""
def __init__(self,
num_units,
use_peepholes=False,
share_time_frequency_weights=False,
cell_clip=None,
initializer=None,
num_unit_shards=1,
forget_bias=1.0,
feature_size=None,
frequency_skip=None,
num_frequency_blocks=None,
start_freqindex_list=None,
end_freqindex_list=None,
couple_input_forget_gates=False,
backward_slice_offset=0,
reuse=None):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: (optional) bool, default False. Set True to enable
diagonal/peephole connections.
share_time_frequency_weights: (optional) bool, default False. Set True to
enable shared cell weights between time and frequency LSTMs.
cell_clip: (optional) A float value, default None, if provided the cell
state is clipped by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices, default None.
num_unit_shards: (optional) int, default 1, How to split the weight
matrix. If > 1, the weight matrix is stored across num_unit_shards.
forget_bias: (optional) float, default 1.0, The initial bias of the
forget gates, used to reduce the scale of forgetting at the beginning