From 7e30ca05bb02606a5b9b0ef279923ae66dc1feef Mon Sep 17 00:00:00 2001 From: Yves-Laurent Date: Sun, 10 Apr 2022 14:16:16 -0400 Subject: [PATCH] Making the PFS implementation more flexible and reproducible --- CHANGELOG.md | 35 ++++++++++++++++++++++- Makefile | 2 +- docker/kxy/Dockerfile | 2 +- kxy/__init__.py | 2 +- kxy/misc/tf/__init__.py | 2 ++ kxy/misc/tf/config.py | 27 ++++++++++++++++++ kxy/misc/tf/generators.py | 20 +++++++++---- kxy/misc/tf/initializers.py | 25 +++++++++++++++++ kxy/misc/tf/learners.py | 56 ++++++++++++++++++++++++++++++------- kxy/misc/tf/models.py | 36 +++++++++++++----------- kxy/pfs/pfs_selector.py | 7 ++++- setup.py | 2 +- tests/test_pfs.py | 6 +++- 13 files changed, 182 insertions(+), 40 deletions(-) create mode 100644 kxy/misc/tf/config.py create mode 100644 kxy/misc/tf/initializers.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 3034310..8f8aaf5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,38 @@ -## Changes + +# Change Log +## v.1.4.7 Changes + +Changes related to optimizing Principal Feature Selection. + +* Made it easy to change PFS' default learning parameters. +* Changed PFS' default learning parameters (learning rate is now 0.005 and epsilon 1e-04) +* Adding a seed parameter to PFS' fit for reproducibility. + +To globally change the learning rate to 0.003, change Adam's epsilon to 1e-5, and the number of epochs to 25, do + +```Python +from kxy.misc.tf import set_default_parameter +set_default_parameter('lr', 0.003) +set_default_parameter('epsilon', 1e-5) +set_default_parameter('epochs', 25) +``` + +To change the number epochs for a single iteration of PFS, use the `epochs` argument of the `fit` method of your `PFS` object. The `fit` method now also has a `seed` parameter you may use to make the PFS implementation deterministic. + +Example: +```Python +from kxy.pfs import PFS +selector = PFS() +selector.fit(x, y, epochs=25, seed=123) +``` + +Alternatively, you may also use the `kxy.misc.tf.set_seed` method to make PFS deterministic. + + +## v.1.4.6 Changes + +Minor PFS improvements. * Adding more (robust) mutual information loss functions. * Exposing the learned total mutual information between principal features and target as an attribute of PFS. diff --git a/Makefile b/Makefile index 9950eb4..317b2b1 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -VERSION = 1.4.6 +VERSION = 1.4.7 # Update the s3 bucket of the docs website deploy_docs: diff --git a/docker/kxy/Dockerfile b/docker/kxy/Dockerfile index 8adb3e3..af05061 100644 --- a/docker/kxy/Dockerfile +++ b/docker/kxy/Dockerfile @@ -22,7 +22,7 @@ RUN pip install boto3 RUN pip install tqdm # Install kxy -RUN pip install kxy==1.4.6 +RUN pip install kxy==1.4.7 # Copy examples into the Notebooks folder RUN git clone https://github.com/kxytechnologies/kxy-python.git /opt/kxy-python diff --git a/kxy/__init__.py b/kxy/__init__.py index f52e42e..9813931 100644 --- a/kxy/__init__.py +++ b/kxy/__init__.py @@ -19,7 +19,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . """ -__version__ = "1.4.6" +__version__ = "1.4.7" from kxy.api import * from kxy.pre_learning import * diff --git a/kxy/misc/tf/__init__.py b/kxy/misc/tf/__init__.py index 0662c8e..da1fe7b 100644 --- a/kxy/misc/tf/__init__.py +++ b/kxy/misc/tf/__init__.py @@ -27,6 +27,8 @@ from .generators import * from .ops import * +from .config import * +from .initializers import * from .layers import * from .losses import * from .models import * diff --git a/kxy/misc/tf/config.py b/kxy/misc/tf/config.py new file mode 100644 index 0000000..1afa843 --- /dev/null +++ b/kxy/misc/tf/config.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Global default training configs +""" +# LEARNING PARAMETERS +LR = 0.005 +EPOCHS = 20 + +# ADAM PARAMETERS +BETA_1 = 0.9 +BETA_2 = 0.999 +EPSILON = 1e-04 +AMSGRAD = False +BATCH_SIZE = 500 + + +def set_default_parameter(name, value): + ''' + Utility function to change parameters above at runtime. + ''' + import logging + globals()[name.upper()] = value + return + +def get_default_parameter(name): + return eval(name.upper()) \ No newline at end of file diff --git a/kxy/misc/tf/generators.py b/kxy/misc/tf/generators.py index 6dd7de9..653f82c 100644 --- a/kxy/misc/tf/generators.py +++ b/kxy/misc/tf/generators.py @@ -11,6 +11,12 @@ tf.config.set_soft_device_placement(True) from tensorflow.keras.utils import Sequence +LOCAL_SEED = None + +def set_generators_seed(seed): + globals()['LOCAL_SEED'] = seed + + rankdata = lambda x: 1.+np.argsort(np.argsort(x, axis=0), axis=0) class CopulaBatchGenerator(Sequence): ''' @@ -24,17 +30,18 @@ def __init__(self, z, batch_size=1000, steps_per_epoch=100): self.steps_per_epoch = steps_per_epoch self.emp_u = rankdata(self.z)/(self.n + 1.) self.emp_u[np.isnan(self.z)] = 0.5 + self.rnd_gen = np.random.default_rng(LOCAL_SEED) if self.n < 200*self.d: dn = 200*self.d - self.n - selected_rows = np.random.choice(self.n, dn, replace=True) + selected_rows = self.rnd_gen.choice(self.n, dn, replace=True) emp_u = self.emp_u[selected_rows, :].copy() scale = 1./(100.*self.n) - emp_u += (scale*np.random.rand(*emp_u.shape) - 0.5*scale) + emp_u += (scale*self.rnd_gen.uniform(size=emp_u.shape) - 0.5*scale) self.emp_u = np.concatenate([self.emp_u, emp_u], axis=0) self.n = self.emp_u.shape[0] - self.batch_selector = np.random.choice(self.n, self.batch_size*self.steps_per_epoch, replace=True) + self.batch_selector = self.rnd_gen.choice(self.n, self.batch_size*self.steps_per_epoch, replace=True) self.batch_selector = self.batch_selector.reshape((self.steps_per_epoch, self.batch_size)) @@ -44,7 +51,7 @@ def getitem_ndarray(self, idx): selected_rows = self.batch_selector[i] emp_u_ = self.emp_u[selected_rows, :] z_p = emp_u_.copy() - z_q = np.random.rand(*emp_u_.shape) + z_q = self.rnd_gen.uniform(size=emp_u_.shape) z = np.empty((self.batch_size, self.d, 2)) z[:, :, 0] = z_p @@ -70,6 +77,7 @@ class PFSBatchGenerator(Sequence): Random batch generator. ''' def __init__(self, x, y, ox=None, oy=None, batch_size=1000, steps_per_epoch=100, n_shuffle=5): + self.rnd_gen = np.random.default_rng(LOCAL_SEED) assert x.shape[0] == y.shape[0] self.batch_size = batch_size self.n_shuffle = n_shuffle @@ -89,7 +97,7 @@ def __init__(self, x, y, ox=None, oy=None, batch_size=1000, steps_per_epoch=100, self.steps_per_epoch = steps_per_epoch replace = False if self.n > self.batch_size*self.steps_per_epoch else True - self.batch_selector = np.random.choice(self.n, self.batch_size*self.steps_per_epoch, replace=replace) + self.batch_selector = self.rnd_gen.choice(self.n, self.batch_size*self.steps_per_epoch, replace=replace) self.batch_selector = self.batch_selector.reshape((self.steps_per_epoch, self.batch_size)) @@ -110,7 +118,7 @@ def getitem_ndarray(self, idx): z_p = z_.copy() if z_p is None else np.concatenate([z_p, z_.copy()], axis=0) y_q = y_.copy() randomize = np.arange(y_q.shape[0]) - np.random.shuffle(randomize) + self.rnd_gen.shuffle(randomize) y_q = y_q[randomize] if not self.ox is None: oy_q = oy_.copy() diff --git a/kxy/misc/tf/initializers.py b/kxy/misc/tf/initializers.py new file mode 100644 index 0000000..9fedd4a --- /dev/null +++ b/kxy/misc/tf/initializers.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Custom Tensorflow initializers. +""" +import logging + +from tensorflow.keras.initializers import GlorotUniform + +LOCAL_SEED = None +INITIALIZER_COUNT = 0 + +def frozen_glorot_uniform(): + ''' + Deterministic GlorotUniform initializer. + ''' + if LOCAL_SEED is not None: + initializer = GlorotUniform(LOCAL_SEED+INITIALIZER_COUNT) + globals()['INITIALIZER_COUNT'] = INITIALIZER_COUNT + 1 + return initializer + else: + return GlorotUniform() + +def set_initializers_seed(seed): + globals()['LOCAL_SEED'] = seed \ No newline at end of file diff --git a/kxy/misc/tf/learners.py b/kxy/misc/tf/learners.py index fd70578..1d529c3 100644 --- a/kxy/misc/tf/learners.py +++ b/kxy/misc/tf/learners.py @@ -4,24 +4,39 @@ Tensorflow learners. """ import numpy as np +import logging import tensorflow as tf from tensorflow.keras.callbacks import EarlyStopping, TerminateOnNaN from tensorflow.keras.optimizers import Adam -from .generators import CopulaBatchGenerator, PFSBatchGenerator +from .generators import CopulaBatchGenerator, PFSBatchGenerator, set_generators_seed +from .initializers import set_initializers_seed from .models import CopulaModel, PFSModel, PFSOneShotModel from .losses import MINDLoss, ApproximateMINDLoss, RectifiedMINDLoss +from .config import get_default_parameter + +def set_seed(seed): + set_generators_seed(seed) + set_initializers_seed(seed) class CopulaLearner(object): ''' Maximum-entropy learner. ''' - def __init__(self, d, beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False, \ - name='Adam', lr=0.01, subsets=[]): + def __init__(self, d, beta_1=None, beta_2=None, epsilon=None, amsgrad=None, \ + name='Adam', lr=None, subsets=[]): self.d = d self.model = CopulaModel(self.d, subsets=subsets) + beta_1 = get_default_parameter('beta_1') if beta_1 is None else beta_1 + beta_2 = get_default_parameter('beta_2') if beta_2 is None else beta_2 + lr = get_default_parameter('lr') if lr is None else lr + amsgrad = get_default_parameter('amsgrad') if amsgrad is None else amsgrad + epsilon = get_default_parameter('epsilon') if epsilon is None else epsilon + logging.info('Using the Adam optimizer with learning parameters: ' \ + 'lr: %.4f, beta_1: %.4f, beta_2: %.4f, epsilon: %.8f, amsgrad: %s' % \ + (lr, beta_1, beta_2, epsilon, amsgrad)) self.opt = Adam(beta_1=beta_1, beta_2=beta_2, epsilon=epsilon, amsgrad=amsgrad, \ name=name, lr=lr) self.loss = MINDLoss() @@ -29,9 +44,10 @@ def __init__(self, d, beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False, \ self.copula_entropy = None - def fit(self, z, batch_size=10000, steps_per_epoch=1000, epochs=20): + def fit(self, z, batch_size=10000, steps_per_epoch=1000, epochs=None): ''' ''' z_gen = CopulaBatchGenerator(z, batch_size=batch_size, steps_per_epoch=steps_per_epoch) + epochs = get_default_parameter('epochs') if epochs is None else epochs self.model.fit(z_gen, epochs=epochs, batch_size=batch_size, steps_per_epoch=steps_per_epoch, \ callbacks=[EarlyStopping(patience=3, monitor='loss'), TerminateOnNaN()]) self.copula_entropy = self.model.evaluate(z_gen) @@ -43,14 +59,22 @@ class PFSLearner(object): ''' Principal Feature Learner. ''' - def __init__(self, dx, dy=1, dox=0, doy=0, beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False, \ - name='Adam', lr=0.01): + def __init__(self, dx, dy=1, dox=0, doy=0, beta_1=None, beta_2=None, epsilon=None, amsgrad=None, \ + lr=None, name='Adam'): x_ixs = [_ for _ in range(dx)] y_ixs = [dx+_ for _ in range(dy)] ox_ixs = [dx+dy+_ for _ in range(dox)] oy_ixs = [dx+dy+dox+_ for _ in range(doy)] self.model = PFSModel(x_ixs, y_ixs, ox_ixs=ox_ixs, oy_ixs=oy_ixs) + beta_1 = get_default_parameter('beta_1') if beta_1 is None else beta_1 + beta_2 = get_default_parameter('beta_2') if beta_2 is None else beta_2 + lr = get_default_parameter('lr') if lr is None else lr + amsgrad = get_default_parameter('amsgrad') if amsgrad is None else amsgrad + epsilon = get_default_parameter('epsilon') if epsilon is None else epsilon + logging.info('Using the Adam optimizer with learning parameters: ' \ + 'lr: %.4f, beta_1: %.4f, beta_2: %.4f, epsilon: %.8f, amsgrad: %s' % \ + (lr, beta_1, beta_2, epsilon, amsgrad)) self.opt = Adam(beta_1=beta_1, beta_2=beta_2, epsilon=epsilon, amsgrad=amsgrad, \ name=name, lr=lr) self.loss = RectifiedMINDLoss() # MINDLoss() @@ -60,14 +84,16 @@ def __init__(self, dx, dy=1, dox=0, doy=0, beta_1=0.9, beta_2=0.999, epsilon=1e- self.statistics = None - def fit(self, x, y, ox=None, oy=None, batch_size=500, n_shuffle=5, epochs=20, mi_eps=0.00001): + def fit(self, x, y, ox=None, oy=None, batch_size=None, n_shuffle=5, epochs=None, mi_eps=0.00001): ''' ''' n = x.shape[0] + batch_size = get_default_parameter('batch_size') if batch_size is None else batch_size steps_per_epoch = n//batch_size steps_per_epoch = min(max(steps_per_epoch, 100), 1000) z_gen = PFSBatchGenerator(x, y, ox=ox, oy=oy, batch_size=batch_size, \ steps_per_epoch=steps_per_epoch, n_shuffle=n_shuffle) + epochs = get_default_parameter('epochs') if epochs is None else epochs self.model.fit(z_gen, epochs=epochs, batch_size=batch_size, steps_per_epoch=steps_per_epoch, \ callbacks=[EarlyStopping(patience=3, monitor='loss'), TerminateOnNaN()]) self.mutual_information = -self.model.evaluate(z_gen) @@ -102,12 +128,20 @@ class PFSOneShotLearner(object): ''' Principal Feature Learner learning multiple principal features simultaneously. ''' - def __init__(self, dx, dy=1, beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False, \ - name='Adam', lr=0.01, p=1): + def __init__(self, dx, dy=1, beta_1=None, beta_2=None, epsilon=None, amsgrad=None, \ + lr=None, name='Adam', p=1): x_ixs = [_ for _ in range(dx)] y_ixs = [dx+_ for _ in range(dy)] self.model = PFSOneShotModel(x_ixs, y_ixs, p=p) + beta_1 = get_default_parameter('beta_1') if beta_1 is None else beta_1 + beta_2 = get_default_parameter('beta_2') if beta_2 is None else beta_2 + lr = get_default_parameter('lr') if lr is None else lr + amsgrad = get_default_parameter('amsgrad') if amsgrad is None else amsgrad + epsilon = get_default_parameter('epsilon') if epsilon is None else epsilon + logging.info('Using the Adam optimizer with learning parameters: ' \ + 'lr: %.4f, beta_1: %.4f, beta_2: %.4f, epsilon: %.8f, amsgrad: %s' % \ + (lr, beta_1, beta_2, epsilon, amsgrad)) self.opt = Adam(beta_1=beta_1, beta_2=beta_2, epsilon=epsilon, amsgrad=amsgrad, \ name=name, lr=lr) self.loss = RectifiedMINDLoss() # MINDLoss() @@ -117,14 +151,16 @@ def __init__(self, dx, dy=1, beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=Fa self.statistics = None - def fit(self, x, y, batch_size=500, n_shuffle=5, epochs=20, mi_eps=0.00001): + def fit(self, x, y, batch_size=None, n_shuffle=5, epochs=None, mi_eps=0.00001): ''' ''' n = x.shape[0] + batch_size = get_default_parameter('batch_size') if batch_size is None else batch_size steps_per_epoch = n//batch_size steps_per_epoch = min(max(steps_per_epoch, 100), 1000) z_gen = PFSBatchGenerator(x, y, batch_size=batch_size, \ steps_per_epoch=steps_per_epoch, n_shuffle=n_shuffle) + epochs = get_default_parameter('epochs') if epochs is None else epochs self.model.fit(z_gen, epochs=epochs, batch_size=batch_size, steps_per_epoch=steps_per_epoch) self.mutual_information = -self.model.evaluate(z_gen) w = self.model.w_layer.get_weights()[0] diff --git a/kxy/misc/tf/models.py b/kxy/misc/tf/models.py index cfafce0..9b2f4a7 100644 --- a/kxy/misc/tf/models.py +++ b/kxy/misc/tf/models.py @@ -15,6 +15,7 @@ from tensorflow.keras.layers import Dense, Lambda, concatenate, Dot from tensorflow.keras.constraints import UnitNorm +from .initializers import frozen_glorot_uniform from .layers import InitializableDense @@ -33,9 +34,9 @@ def __init__(self, d, subsets=[]): self.p_samples = Lambda(lambda x: x[:,:,0]) self.q_samples = Lambda(lambda x: x[:,:,1]) - self.fx_non_mon_layer_1s = [Dense(3, activation=tf.nn.relu) for _ in range(self.n_subsets)] - self.fx_non_mon_layer_2s = [Dense(5, activation=tf.nn.relu) for _ in range(self.n_subsets)] - self.fx_non_mon_layer_3s = [Dense(3, activation=tf.nn.relu) for _ in range(self.n_subsets)] + self.fx_non_mon_layer_1s = [Dense(3, activation=tf.nn.relu, kernel_initializer=frozen_glorot_uniform()) for _ in range(self.n_subsets)] + self.fx_non_mon_layer_2s = [Dense(5, activation=tf.nn.relu, kernel_initializer=frozen_glorot_uniform()) for _ in range(self.n_subsets)] + self.fx_non_mon_layer_3s = [Dense(3, activation=tf.nn.relu, kernel_initializer=frozen_glorot_uniform()) for _ in range(self.n_subsets)] self.fx_non_mon_layer_4s = [Dense(1) for _ in range(self.n_subsets)] eff_ds = [len(subset)+1 for subset in self.subsets] @@ -43,9 +44,10 @@ def __init__(self, d, subsets=[]): self.dots = [Dot(1) for _ in range(self.n_subsets)] # Mixing layers - self.mixing_layer1 = Dense(5, activation=tf.nn.relu) - self.mixing_layer2 = Dense(5, activation=tf.nn.relu) - self.mixing_layer3 = Dense(1) + self.mixing_layer1 = Dense(5, activation=tf.nn.relu, kernel_initializer=frozen_glorot_uniform()) + self.mixing_layer2 = Dense(5, activation=tf.nn.relu, kernel_initializer=frozen_glorot_uniform()) + self.mixing_layer3 = Dense(1, kernel_initializer=frozen_glorot_uniform()) + def subset_statistics(self, u, i): @@ -120,21 +122,21 @@ def __init__(self, x_ixs, y_ixs, ox_ixs=[], oy_ixs=[], p=1): # Feature direction self.w_constraint = UnitNorm(axis=0) - self.w_layer = Dense(p, use_bias=False, kernel_constraint=self.w_constraint) + self.w_layer = Dense(p, use_bias=False, kernel_constraint=self.w_constraint, kernel_initializer=frozen_glorot_uniform()) # Z network - n_outer_z = 3 # max(len(x_ixs)//10, 3) - n_inner_z = 5 # max(len(x_ixs)//10, 5) - self.z_layer_1 = Dense(n_outer_z, activation=tf.nn.relu) - self.z_layer_2 = Dense(n_inner_z, activation=tf.nn.relu) - self.z_layer_3 = Dense(n_outer_z, activation=tf.nn.relu) + n_outer_z = 3 + n_inner_z = 5 + self.z_layer_1 = Dense(n_outer_z, activation=tf.nn.relu, kernel_initializer=frozen_glorot_uniform()) + self.z_layer_2 = Dense(n_inner_z, activation=tf.nn.relu, kernel_initializer=frozen_glorot_uniform()) + self.z_layer_3 = Dense(n_outer_z, activation=tf.nn.relu, kernel_initializer=frozen_glorot_uniform()) # Y network - n_outer_y = 3 # max(len(x_ixs)//100, 3) - n_inner_y = 5 # max(len(x_ixs)//100, 5) - self.y_layer_1 = Dense(n_outer_y, activation=tf.nn.relu) - self.y_layer_2 = Dense(n_inner_y, activation=tf.nn.relu) - self.y_layer_3 = Dense(n_outer_y, activation=tf.nn.relu) + n_outer_y = 3 + n_inner_y = 5 + self.y_layer_1 = Dense(n_outer_y, activation=tf.nn.relu, kernel_initializer=frozen_glorot_uniform()) + self.y_layer_2 = Dense(n_inner_y, activation=tf.nn.relu, kernel_initializer=frozen_glorot_uniform()) + self.y_layer_3 = Dense(n_outer_y, activation=tf.nn.relu, kernel_initializer=frozen_glorot_uniform()) # Outer quadratic constraints n_q = 1+n_outer_y+len(oy_ixs)+len(y_ixs)+n_outer_z+len(ox_ixs) diff --git a/kxy/pfs/pfs_selector.py b/kxy/pfs/pfs_selector.py index 5a59990..d78340b 100644 --- a/kxy/pfs/pfs_selector.py +++ b/kxy/pfs/pfs_selector.py @@ -4,6 +4,7 @@ import logging import numpy as np +import tensorflow as tf from tensorflow.keras.callbacks import EarlyStopping, TerminateOnNaN from tensorflow.keras.optimizers import Adam @@ -80,7 +81,7 @@ class PFS(object): """ Principal Feature Selection. """ - def fit(self, x, y, p=None, mi_tolerance=0.0001, max_duration=None, epochs=20): + def fit(self, x, y, p=None, mi_tolerance=0.0001, max_duration=None, epochs=20, seed=None): """ Perform Principal Feature Selection using :math:`x` to predict :math:`y`. @@ -114,6 +115,10 @@ def fit(self, x, y, p=None, mi_tolerance=0.0001, max_duration=None, epochs=20): W : np.array 2D array whose rows are directions to use to compute principal features: :math:`z = Wx`. """ + if not seed is None: + from kxy.misc.tf import set_seed + set_seed(seed) + if max_duration: start_time = time() diff --git a/setup.py b/setup.py index 3dbe77e..5af6d4a 100644 --- a/setup.py +++ b/setup.py @@ -12,7 +12,7 @@ with open('README.md') as f: long_description = f.read() -version = "1.4.6" +version = "1.4.7" setup(name="kxy", version=version, zip_safe=False, diff --git a/tests/test_pfs.py b/tests/test_pfs.py index ceef844..986aaa5 100644 --- a/tests/test_pfs.py +++ b/tests/test_pfs.py @@ -104,6 +104,8 @@ def test_save_pfs(): def test_pfs_accuracy(): # Generate the data + seed = 1 + np.random.seed(seed) d = 100 w = np.ones(d)/d x = np.random.randn(10000, d) @@ -111,8 +113,10 @@ def test_pfs_accuracy(): y = xTw + 2.*xTw**2 + 0.5*xTw**3 # Run PFS + from kxy.misc.tf import set_default_parameter + set_default_parameter('lr', 0.001) selector = PFS() - selector.fit(x, y, epochs=21) + selector.fit(x, y, epochs=21, seed=seed) # Learned principal directions F = selector.feature_directions