Skip to content

Commit

Permalink
Merged PR 9248: Use safe_load for PyYAML
Browse files Browse the repository at this point in the history
Introduced a model class look-up table that alleviates the need to specify the desired model class in the YAML file. Referencing classes in YAML files requires unsafe loading, which is undesirable from a security stand-point.

Some refactoring along the way, so the code will run without appending "src" to the PYTHONPATH.
  • Loading branch information
Neil Dalchau committed Sep 17, 2021
1 parent d3426bb commit 500994d
Show file tree
Hide file tree
Showing 33 changed files with 296 additions and 313 deletions.
4 changes: 2 additions & 2 deletions README.md
Expand Up @@ -33,14 +33,14 @@ To install the python dependencies, you can use `pip` with the requirements.txt

In Linux:
```bash
export PYTHONPATH=.:src
export PYTHONPATH=.
export INFERENCE_DATA_DIR=data
export INFERENCE_RESULTS_DIR=results
```

In Windows:
```dos
set PYTHONPATH=.;src
set PYTHONPATH=.
set INFERENCE_DATA_DIR=data
set INFERENCE_RESULTS_DIR=results
```
Expand Down
14 changes: 14 additions & 0 deletions models/__init__.py
@@ -0,0 +1,14 @@
# In this module, you must define the set of supported models.
# The YAML loader enables you to reference each model by string.

from models import debug, dr_constant, dr_growthrate, dr_blackbox

LOOKUP = {
'debug': debug.Debug_Constant,
'dr_constant': dr_constant.DR_Constant,
'dr_constant_precisions': dr_constant.DR_Constant_Precisions,
'dr_growthrate': dr_growthrate.DR_Growth,
'dr_blackbox': dr_blackbox.DR_Blackbox,
'dr_blackbox_precisions': dr_blackbox.DR_BlackboxPrecisions,
'dr_hierarchical_blackbox': dr_blackbox.DR_HierarchicalBlackbox
}
31 changes: 12 additions & 19 deletions models/base_model.py
@@ -1,16 +1,13 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under a Microsoft Research License.

import tensorflow as tf
from tensorflow.keras.layers import Dense
from tensorflow.keras import Sequential
from tensorflow.keras.constraints import NonNeg
import numpy as np
import pdb
import tensorflow.compat.v1 as tf # type: ignore
import yaml

from solvers import modified_euler_integrate, integrate_while
from utils import default_get_value, variable_summaries
from procdata import ProcData
from src.solvers import modified_euler_integrate, integrate_while
from src.utils import default_get_value, variable_summaries
from src.procdata import ProcData

def power(x, a):
return tf.exp(a * tf.math.log(x))
Expand All @@ -35,13 +32,9 @@ def expand_constant_precisions(precision_list):
def expand_decayed_precisions(precision_list): # pylint: disable=unused-argument
raise NotImplementedError("TODO: expand_decayed_precisions")

class BaseModel(object):
# We need an init_with_params method separate from the usual __init__, because the latter is
# called automatically with no arguments by pyyaml on creation, and we need a way to feed
# params (from elsewhere in the YAML structure) into it. It would really be better construct
# it properly after the structure has been loaded.
class BaseModel:
# pylint: disable=attribute-defined-outside-init
def init_with_params(self, params, procdata : ProcData):
def __init__(self, params, procdata : ProcData):
self.params = params
self.relevance = procdata.relevance_vectors
self.default_devices = procdata.default_devices
Expand Down Expand Up @@ -150,11 +143,11 @@ def __init__(self, nspecies, n_hidden_precisions, inputs = None, hidden_activati
self.nspecies = nspecies
if inputs is None:
inputs = self.nspecies+1
inp = Dense(n_hidden_precisions, activation = hidden_activation, use_bias=True, name = "prec_hidden", input_shape=(inputs,))
act_layer = Dense(4, activation = tf.nn.sigmoid, name = "prec_act", bias_constraint = NonNeg())
deg_layer = Dense(4, activation = tf.nn.sigmoid, name = "prec_deg", bias_constraint = NonNeg())
self.act = Sequential([inp, act_layer])
self.deg = Sequential([inp, deg_layer])
inp = tf.keras.layers.Dense(n_hidden_precisions, activation = hidden_activation, use_bias=True, name = "prec_hidden", input_shape=(inputs,))
act_layer = tf.keras.layers.Dense(4, activation = tf.nn.sigmoid, name = "prec_act", bias_constraint = tf.keras.constraints.NonNeg())
deg_layer = tf.keras.layers.Dense(4, activation = tf.nn.sigmoid, name = "prec_deg", bias_constraint = tf.keras.constraints.NonNeg())
self.act = tf.keras.Sequential([inp, act_layer])
self.deg = tf.keras.Sequential([inp, deg_layer])

for layer in [inp, act_layer, deg_layer]:
weights, bias = layer.weights
Expand Down
27 changes: 13 additions & 14 deletions models/debug.py
@@ -1,10 +1,9 @@
import tensorflow as tf
from tensorflow.compat.v1 import verify_tensor_all_finite
import tensorflow.compat.v1 as tf # type: ignore
from models.base_model import BaseModel

class Debug_Constant(BaseModel):
def init_with_params(self, params, procdata):
super(Debug_Constant, self).init_with_params(params, procdata)
def __init__(self, params, procdata):
super(Debug_Constant, self).__init__(params, procdata)
self.species = ['OD', 'RFP', 'YFP', 'CFP']
self.n_species = len(self.species)

Expand All @@ -30,26 +29,26 @@ def gen_reaction_equations(self, theta, treatments, dev_1hot, condition_on_devic
r = tf.clip_by_value(theta.r, 0.1, 2.0)

def reaction_equations(state, t):
state = verify_tensor_all_finite(state, "state NOT finite")
state = tf.verify_tensor_all_finite(state, "state NOT finite")
x, rfp, yfp, cfp = tf.unstack(state, axis=2)
x = verify_tensor_all_finite(x, "x NOT finite")
rfp = verify_tensor_all_finite(rfp, "rfp NOT finite")
yfp = verify_tensor_all_finite(yfp, "yfp NOT finite")
cfp = verify_tensor_all_finite(cfp, "cfp NOT finite")
x = tf.verify_tensor_all_finite(x, "x NOT finite")
rfp = tf.verify_tensor_all_finite(rfp, "rfp NOT finite")
yfp = tf.verify_tensor_all_finite(yfp, "yfp NOT finite")
cfp = tf.verify_tensor_all_finite(cfp, "cfp NOT finite")

gamma = r * (1.0 - x)
gamma = verify_tensor_all_finite(gamma, "gamma NOT finite")
gamma = tf.verify_tensor_all_finite(gamma, "gamma NOT finite")
# Right-hand sides
d_x = x * gamma
#d_x = verify_tensor_all_finite(d_x, "d_x NOT finite")
d_rfp = 1.0 - (gamma + 1.0) * rfp
d_rfp = verify_tensor_all_finite(d_rfp, "d_rfp NOT finite")
d_rfp = tf.verify_tensor_all_finite(d_rfp, "d_rfp NOT finite")
d_yfp = 1.0 - (gamma + 1.0) * yfp
d_yfp = verify_tensor_all_finite(d_yfp, "d_yfp NOT finite")
d_yfp = tf.verify_tensor_all_finite(d_yfp, "d_yfp NOT finite")
d_cfp = 1.0 - (gamma + 1.0) * cfp
d_cfp = verify_tensor_all_finite(d_cfp, "d_cfp NOT finite")
d_cfp = tf.verify_tensor_all_finite(d_cfp, "d_cfp NOT finite")

X = tf.stack([d_x, d_rfp, d_yfp, d_cfp], axis=2)
X = verify_tensor_all_finite(X, "RHS NOT finite")
X = tf.verify_tensor_all_finite(X, "RHS NOT finite")
return X
return reaction_equations
21 changes: 10 additions & 11 deletions models/dr_blackbox.py
Expand Up @@ -3,15 +3,14 @@

from models.base_model import BaseModel, NeuralPrecisions
from src.utils import default_get_value, variable_summaries
import tensorflow as tf
from tensorflow.compat.v1 import keras
import tensorflow.compat.v1 as tf # type: ignore
from tensorflow import keras
import numpy as np
import pdb

class DR_Blackbox( BaseModel ):

def init_with_params( self, params, procdata ):
super(DR_Blackbox, self).init_with_params( params, procdata )
def __init__( self, params, procdata ):
super(DR_Blackbox, self).__init__( params, procdata )
self.species = ['OD', 'RFP', 'YFP', 'CFP']
self.nspecies = 4
# do the other inits now
Expand Down Expand Up @@ -69,8 +68,8 @@ def observe( self, x_sample, theta ):

class DR_BlackboxStudentT( DR_Blackbox ):

def init_with_params( self, params, procdata ):
super(DR_BlackboxStudentT, self).init_with_params( params, procdata )
def __init__( self, params, procdata ):
super(DR_BlackboxStudentT, self).__init__( params, procdata )

# use a fixed gamma prior over precisions
self.alpha = params['precision_alpha']
Expand Down Expand Up @@ -103,8 +102,8 @@ def log_prob_observations( self, x_predict, x_obs, theta, x_sample ):
return log_prob

class DR_BlackboxPrecisions( DR_Blackbox ):
def init_with_params( self, params, procdata ):
super(DR_BlackboxPrecisions, self).init_with_params( params, procdata )
def __init__( self, params, procdata ):
super(DR_BlackboxPrecisions, self).__init__( params, procdata )
self.init_prec = params['init_prec']
self.n_hidden_precisions = params['n_hidden_decoder_precisions']
self.n_states = 4 + self.n_latent_species + 4
Expand Down Expand Up @@ -178,8 +177,8 @@ def reaction_equations( state, t ):

class DR_HierarchicalBlackbox( DR_BlackboxPrecisions ):

def init_with_params( self, params, procdata ):
super(DR_HierarchicalBlackbox, self).init_with_params( params, procdata )
def __init__( self, params, procdata ):
super(DR_HierarchicalBlackbox, self).__init__( params, procdata )
# do the other inits now
self.n_x = params['n_x']
self.n_y = params['n_y']
Expand Down
33 changes: 16 additions & 17 deletions models/dr_constant.py
@@ -1,17 +1,16 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under a Microsoft Research License.

from models.base_model import BaseModel, log_prob_gaussian, NeuralPrecisions
from src.utils import default_get_value, variable_summaries
import tensorflow as tf
from tensorflow.compat.v1 import keras, verify_tensor_all_finite
import numpy as np
import pdb
import tensorflow.compat.v1 as tf # type: ignore

from models.base_model import BaseModel, NeuralPrecisions
from src.utils import default_get_value, variable_summaries

class DR_Constant(BaseModel):

def init_with_params(self, params, procdata):
super(DR_Constant, self).init_with_params(params, procdata)
def __init__(self, params, procdata):
super(DR_Constant, self).__init__(params, procdata)
# do the other inits now
self.use_aRFP = default_get_value(params, "use_aRFP", False)
self.species = ['OD', 'RFP', 'YFP', 'CFP', 'F530', 'F480', 'LuxR', 'LasR']
Expand Down Expand Up @@ -68,7 +67,7 @@ def gen_reaction_equations(self, theta, treatments, dev_1hot, condition_on_devic
# condition on device information by mapping param_cond = f(param, d; \phi) where d is one-hot rep of device
# currently, f is a one-layer MLP with NO activation function (e.g., offset and scale only)
if condition_on_device:
kinit = keras.initializers.RandomNormal(mean=2.0, stddev=1.5)
kinit = tf.keras.initializers.RandomNormal(mean=2.0, stddev=1.5)
ones = tf.tile([[1.0]],tf.shape(theta.r))
aR = self.device_conditioner(ones, 'aR', dev_1hot, kernel_initializer=kinit)
aS = self.device_conditioner(ones, 'aS', dev_1hot, kernel_initializer=kinit)
Expand Down Expand Up @@ -99,8 +98,8 @@ def reaction_equations(state, t):
P81 = (e81 + KGR_81 * boundLuxR + KGS_81 * boundLasR) / (1.0 + KGR_81 * boundLuxR + KGS_81 * boundLasR)

# Check they are finite
boundLuxR = verify_tensor_all_finite(boundLuxR, "boundLuxR NOT finite")
boundLasR = verify_tensor_all_finite(boundLasR, "boundLasR NOT finite")
boundLuxR = tf.verify_tensor_all_finite(boundLuxR, "boundLuxR NOT finite")
boundLasR = tf.verify_tensor_all_finite(boundLasR, "boundLasR NOT finite")

# Right-hand sides
d_x = gamma * x
Expand All @@ -122,8 +121,8 @@ def reaction_equations(state, t):

class DR_ConstantStudentT(DR_Constant):

def init_with_params(self, params):
super(DR_ConstantStudentT, self).init_with_params(params)
def __init__(self, params, procdata):
super(DR_ConstantStudentT, self).__init__(params, procdata)

# use a fixed gamma prior over precisions
self.alpha = params['precision_alpha']
Expand Down Expand Up @@ -156,8 +155,8 @@ def log_prob_observations(self, x_predict, x_obs, theta, x_sample):

class DR_Constant_Precisions(DR_Constant):

def init_with_params(self, params, procdata):
super(DR_Constant_Precisions, self).init_with_params(params, procdata)
def __init__(self, params, procdata):
super(DR_Constant_Precisions, self)

self.species = ['OD', 'RFP', 'YFP', 'CFP', 'F510', 'F430', 'LuxR', 'LasR']
self.init_prec = default_get_value(params, 'init_prec', 0.00001)
Expand Down Expand Up @@ -223,7 +222,7 @@ def gen_reaction_equations(self, theta, treatments, dev_1hot, condition_on_devic
# condition on device information by mapping param_cond = f(param, d; \phi) where d is one-hot rep of device
# currently, f is a one-layer MLP with NO activation function (e.g., offset and scale only)
if condition_on_device:
kinit = keras.initializers.RandomNormal(mean=2.0, stddev=1.5)
kinit = tf.keras.initializers.RandomNormal(mean=2.0, stddev=1.5)
ones = tf.tile([[1.0]], tf.shape(theta.r))
aR = self.device_conditioner(ones, 'aR', dev_1hot, kernel_initializer=kinit)
aS = self.device_conditioner(ones, 'aS', dev_1hot, kernel_initializer=kinit)
Expand Down Expand Up @@ -255,8 +254,8 @@ def reaction_equations(state, t):
P81 = (e81 + KGR_81 * boundLuxR + KGS_81 * boundLasR) / (1.0 + KGR_81 * boundLuxR + KGS_81 * boundLasR)

# Check they are finite
boundLuxR = verify_tensor_all_finite(boundLuxR, "boundLuxR NOT finite")
boundLasR = verify_tensor_all_finite(boundLasR, "boundLasR NOT finite")
boundLuxR = tf.verify_tensor_all_finite(boundLuxR, "boundLuxR NOT finite")
boundLasR = tf.verify_tensor_all_finite(boundLasR, "boundLasR NOT finite")

# Right-hand sides
d_x = gamma * x
Expand Down
15 changes: 7 additions & 8 deletions models/dr_growthrate.py
@@ -1,10 +1,9 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under a Microsoft Research License.

from src.utils import default_get_value, variable_summaries
from src.utils import variable_summaries
from models.dr_constant import DR_Constant
import tensorflow as tf
from tensorflow.compat.v1 import keras, verify_tensor_all_finite
import tensorflow.compat.v1 as tf # type: ignore
import numpy as np

class DR_Growth( DR_Constant ):
Expand Down Expand Up @@ -50,7 +49,7 @@ def gen_reaction_equations( self, theta, treatments, dev_1hot, condition_on_devi
# condition on device information by mapping param_cond = f(param, d; \phi) where d is one-hot rep of device
# currently, f is a one-layer MLP with NO activation function (e.g., offset and scale only)
if condition_on_device:
kinit = keras.initializers.RandomNormal(mean=2.0, stddev=1.5)
kinit = tf.keras.initializers.RandomNormal(mean=2.0, stddev=1.5)
ones = tf.tile([[1.0]], tf.shape(theta.r))
aR = self.device_conditioner(ones, 'aR', dev_1hot, kernel_initializer=kinit)
aS = self.device_conditioner(ones, 'aS', dev_1hot, kernel_initializer=kinit)
Expand Down Expand Up @@ -83,8 +82,8 @@ def reaction_equations( state, t ):
#P81 = func(luxR, lasR, c6, c12)

# Check they are finite
boundLuxR = verify_tensor_all_finite(boundLuxR, "boundLuxR NOT finite")
boundLasR = verify_tensor_all_finite(boundLasR, "boundLasR NOT finite")
boundLuxR = tf.verify_tensor_all_finite(boundLuxR, "boundLuxR NOT finite")
boundLasR = tf.verify_tensor_all_finite(boundLasR, "boundLasR NOT finite")

# Right-hand sides
d_x = gamma*x
Expand All @@ -102,8 +101,8 @@ def reaction_equations( state, t ):

class DR_GrowthStudentT( DR_Growth ):

def init_with_params( self, params ):
super(DR_GrowthStudentT, self).init_with_params( params )
def __init__( self, params ):
super(DR_GrowthStudentT, self).__init__( params )

# use a fixed gamma prior over precisions
self.alpha = params['precision_alpha']
Expand Down
3 changes: 2 additions & 1 deletion requirements.txt
Expand Up @@ -4,4 +4,5 @@ numpy==1.18.5
matplotlib
pandas
seaborn
pyyaml
pyyaml
pytest
2 changes: 1 addition & 1 deletion specs/debug.yaml
Expand Up @@ -13,7 +13,7 @@ data:
conditions: ["C6","C12"]

params:
model: !!python/object:models.debug.Debug_Constant {}
model: debug {}

constant:
init_x: 0.002
Expand Down
2 changes: 1 addition & 1 deletion specs/debug_precisions.yaml
Expand Up @@ -16,7 +16,7 @@ data:
separate_conditions: true

params:
model: !!python/object:models.dr_constant.DR_Constant_Precisions {}
model: dr_constant_precisions
learning_boundaries: [250,1000]
learning_rates: [0.01,0.002,0.0002]
n_hidden_decoder_precisions: 5
Expand Down
2 changes: 1 addition & 1 deletion specs/dr_blackbox_xval.yaml
Expand Up @@ -13,7 +13,7 @@ data:
separate_conditions: true

params:
model: !!python/object:models.dr_blackbox.DR_BlackboxPrecisions {}
model: dr_blackbox_precisions
theta_columns: ['z1','z2','z3','z4','z5'] #,'z6','z7','z8','z9','z10']
n_z: 5
n_latent_species: 2
Expand Down
2 changes: 1 addition & 1 deletion specs/dr_blackbox_xval_hierarchical.yaml
Expand Up @@ -13,7 +13,7 @@ data:
separate_conditions: true

params:
model: !!python/object:models.dr_blackbox.DR_HierarchicalBlackbox {}
model: dr_hierarchical_blackbox
#theta_columns: ['x1','y1','z1','z2','z3'] #,'z6','z7','z8','z9','z10']
theta_columns: ['x1','x2','y1','y2','z1','z2'] #,'z6','z7','z8','z9','z10']

Expand Down
2 changes: 1 addition & 1 deletion specs/dr_constant_icml.yaml
Expand Up @@ -16,7 +16,7 @@ data:
separate_conditions: True

params:
model: !!python/object:models.dr_constant.DR_Constant {}
model: dr_constant
learning_boundaries: [250,1000]
learning_rates: [0.01,0.002,0.0002]

Expand Down
2 changes: 1 addition & 1 deletion specs/dr_constant_one.yaml
Expand Up @@ -10,7 +10,7 @@ data:
separate_conditions: True

params:
model: !!python/object:models.dr_constant.DR_Constant {}
model: dr_constant
learning_boundaries: [250,1000]
learning_rates: [0.002,0.0004,0.00008]
#solver: modeulerwhile
Expand Down
2 changes: 1 addition & 1 deletion specs/dr_constant_precisions.yaml
Expand Up @@ -16,7 +16,7 @@ data:
separate_conditions: true

params:
model: !!python/object:models.dr_constant.DR_Constant_Precisions {}
model: dr_constant_precisions
theta_columns: ['aYFP','aCFP','aR','aS','r','K']
lambda_l2: 0.001
lambda_l2_hidden: 0.001
Expand Down

0 comments on commit 500994d

Please sign in to comment.