Skip to content

Commit

Permalink
#191 more fixes for dict reordering
Browse files Browse the repository at this point in the history
  • Loading branch information
martinjrobins committed May 9, 2019
1 parent 3713ff3 commit 99a2e1a
Show file tree
Hide file tree
Showing 5 changed files with 80 additions and 37 deletions.
38 changes: 22 additions & 16 deletions pybamm/discretisations/discretisation.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,8 +169,8 @@ def process_initial_conditions(self, model):

# Concatenate initial conditions into a single vector
# check that all initial conditions are set
processed_concatenated_initial_conditions = self._concatenate_init(
processed_initial_conditions
processed_concatenated_initial_conditions = self._concatenate_in_order(
processed_initial_conditions, check_complete=True
).evaluate(0, None)

return processed_initial_conditions, processed_concatenated_initial_conditions
Expand All @@ -195,12 +195,17 @@ def process_rhs_and_algebraic(self, model):
# Discretise right-hand sides, passing domain from variable
processed_rhs = self.process_dict(model.rhs)
# Concatenate rhs into a single state vector
processed_concatenated_rhs = self.concatenate(*processed_rhs.values())
# Need to concatenate in order as the ordering of equations could be different
# in processed_rhs and model.rhs (for Python Version <= 3.5)
processed_concatenated_rhs = self._concatenate_in_order(processed_rhs)

# Discretise and concatenate algebraic equations
processed_algebraic = self.process_dict(model.algebraic)
processed_concatenated_algebraic = self.concatenate(
*processed_algebraic.values()

# Need to concatenate in order as the ordering of equations could be different
# in processed_rhs and model.rhs (for Python Version <= 3.5)
processed_concatenated_algebraic = self._concatenate_in_order(
processed_algebraic
)

return (
Expand Down Expand Up @@ -404,10 +409,9 @@ def process_symbol(self, symbol):
def concatenate(self, *symbols):
return pybamm.NumpyConcatenation(*symbols)

def _concatenate_init(self, var_eqn_dict):
def _concatenate_in_order(self, var_eqn_dict, check_complete=False):
"""
Concatenate a dictionary of {variable: equation} initial conditions using
self._y_slices
Concatenate a dictionary of {variable: equation} using self._y_slices
The keys/variables in `var_eqn_dict` must be the same as the ids in
`self._y_slices`.
Expand All @@ -432,14 +436,16 @@ def _concatenate_init(self, var_eqn_dict):
unpacked_variables.extend([var for var in symbol.children])
else:
unpacked_variables.append(symbol)
# Check keys from the given var_eqn_dict against self._y_slices
ids = {v.id for v in unpacked_variables}
if ids != self._y_slices.keys():
given_variable_names = [v.name for v in var_eqn_dict.keys()]
raise pybamm.ModelError(
"Initial conditions are insufficient. Only "
"provided for {} ".format(given_variable_names)
)

if check_complete:
# Check keys from the given var_eqn_dict against self._y_slices
ids = {v.id for v in unpacked_variables}
if ids != self._y_slices.keys():
given_variable_names = [v.name for v in var_eqn_dict.keys()]
raise pybamm.ModelError(
"Initial conditions are insufficient. Only "
"provided for {} ".format(given_variable_names)
)

equations = list(var_eqn_dict.values())
slices = [self._y_slices[var.id] for var in unpacked_variables]
Expand Down
6 changes: 3 additions & 3 deletions pybamm/parameters/parameter_values.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,9 +145,9 @@ def update_model(self, model, disc):
self.process_model(model, processing="update")

# update discretised quantities using disc
model.concatenated_rhs = disc.concatenate(*model.rhs.values())
model.concatenated_algebraic = disc.concatenate(*model.algebraic.values())
model.concatenated_initial_conditions = disc._concatenate_init(
model.concatenated_rhs = disc._concatenate_in_order(model.rhs)
model.concatenated_algebraic = disc._concatenate_in_order(model.algebraic)
model.concatenated_initial_conditions = disc._concatenate_in_order(
model.initial_conditions
).evaluate(0, None)

Expand Down
18 changes: 10 additions & 8 deletions tests/unit/test_discretisations/test_discretisation.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@


class TestDiscretise(unittest.TestCase):
def test_concatenate_init(self):
def test_concatenate_in_order(self):
a = pybamm.Variable("a")
b = pybamm.Variable("b")
c = pybamm.Variable("c")
Expand All @@ -25,16 +25,16 @@ def test_concatenate_init(self):
disc = get_discretisation_for_testing()

disc._y_slices = {c.id: slice(0, 1), a.id: slice(2, 3), b.id: slice(3, 4)}
result = disc._concatenate_init(initial_conditions)
result = disc._concatenate_in_order(initial_conditions)

self.assertIsInstance(result, pybamm.NumpyConcatenation)
self.assertIsInstance(result, pybamm.NumpyConcatenation, check_complete=True)
self.assertEqual(result.children[0].evaluate(), 1)
self.assertEqual(result.children[1].evaluate(), 2)
self.assertEqual(result.children[2].evaluate(), 3)

initial_conditions = {a: pybamm.Scalar(2), b: pybamm.Scalar(3)}
with self.assertRaises(pybamm.ModelError):
result = disc._concatenate_init(initial_conditions)
result = disc._concatenate_in_order(initial_conditions, check_complete=True)

def test_discretise_slicing(self):
# create discretisation
Expand Down Expand Up @@ -403,15 +403,17 @@ def test_process_model_ode(self):
else:
vect = init.evaluate() * np.ones_like(mesh[var.domain[0]][0].nodes)

y0_expect = np.concatenate([y0_expect,vect])
y0_expect = np.concatenate([y0_expect, vect])

np.testing.assert_array_equal(y0,y0_expect)
np.testing.assert_array_equal(y0, y0_expect)

# grad and div are identity operators here
np.testing.assert_array_equal(y0, model.concatenated_rhs.evaluate(None, y0))

S0 = model.initial_conditions[S].evaluate() * np.ones_like(mesh[S.domain[0]][0].nodes)
T0 = model.initial_conditions[T].evaluate() * np.ones_like(mesh[T.domain[0]][0].nodes)
S0 = model.initial_conditions[S].evaluate(
) * np.ones_like(mesh[S.domain[0]][0].nodes)
T0 = model.initial_conditions[T].evaluate(
) * np.ones_like(mesh[T.domain[0]][0].nodes)
np.testing.assert_array_equal(S0 * T0, model.variables["ST"].evaluate(None, y0))

# mass matrix is identity
Expand Down
28 changes: 23 additions & 5 deletions tests/unit/test_solvers/test_scikits_solvers.py
Original file line number Diff line number Diff line change
Expand Up @@ -377,6 +377,8 @@ def test_model_solver_ode_jacobian(self):
var2 = pybamm.Variable("var2", domain=whole_cell)
model.rhs = {var1: var1, var2: 1 - var1}
model.initial_conditions = {var1: 1.0, var2: -1.0}
model.variables = {"var1": var1, "var2": var2}

disc = get_discretisation_for_testing()
disc.process_model(model)

Expand All @@ -387,10 +389,18 @@ def test_model_solver_ode_jacobian(self):
)
N = combined_submesh[0].npts

# construct jacobian in order of model.rhs
J = []
for var in model.rhs.keys():
if var.id == var1.id:
J.append([np.eye(N), np.zeros((N, N))])
else:
J.append([-1.0 * np.eye(N), np.zeros((N, N))])

J = np.block(J)

def jacobian(t, y):
return np.block(
[[np.eye(N), np.zeros((N, N))], [-1.0 * np.eye(N), np.zeros((N, N))]]
)
return J

model.jacobian = jacobian

Expand All @@ -399,8 +409,16 @@ def jacobian(t, y):
t_eval = np.linspace(0, 1, 100)
solver.solve(model, t_eval)
np.testing.assert_array_equal(solver.t, t_eval)
np.testing.assert_allclose(solver.y[0], np.exp(solver.t))
np.testing.assert_allclose(solver.y[-1], solver.t - np.exp(solver.t))

T, Y = solver.t, solver.y
np.testing.assert_array_almost_equal(
model.variables["var1"].evaluate(T, Y),
np.ones((N, T.size)) * np.exp(T[np.newaxis, :])
)
np.testing.assert_array_almost_equal(
model.variables["var2"].evaluate(T, Y),
np.ones((N, T.size)) * (T[np.newaxis, :] - np.exp(T[np.newaxis, :]))
)

def test_model_solver_dae(self):
# Create model
Expand Down
27 changes: 22 additions & 5 deletions tests/unit/test_solvers/test_scipy_solver.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,7 @@ def test_model_solver_ode_with_jacobian(self):
var2 = pybamm.Variable("var2", domain=whole_cell)
model.rhs = {var1: var1, var2: 1 - var1}
model.initial_conditions = {var1: 1.0, var2: -1.0}
model.variables = {"var1": var1, "var2": var2}

# create discretisation
mesh = get_mesh_for_testing()
Expand All @@ -175,10 +176,18 @@ def test_model_solver_ode_with_jacobian(self):
)
N = combined_submesh[0].npts

# construct jacobian in order of model.rhs
J = []
for var in model.rhs.keys():
if var.id == var1.id:
J.append([np.eye(N), np.zeros((N, N))])
else:
J.append([-1.0 * np.eye(N), np.zeros((N, N))])

J = np.block(J)

def jacobian(t, y):
return np.block(
[[np.eye(N), np.zeros((N, N))], [-1.0 * np.eye(N), np.zeros((N, N))]]
)
return J

model.jacobian = jacobian

Expand All @@ -187,8 +196,16 @@ def jacobian(t, y):
t_eval = np.linspace(0, 1, 100)
solver.solve(model, t_eval)
np.testing.assert_array_equal(solver.t, t_eval)
np.testing.assert_allclose(solver.y[0], np.exp(solver.t))
np.testing.assert_allclose(solver.y[-1], solver.t - np.exp(solver.t))

T, Y = solver.t, solver.y
np.testing.assert_array_almost_equal(
model.variables["var1"].evaluate(T, Y),
np.ones((N, T.size)) * np.exp(T[np.newaxis, :])
)
np.testing.assert_array_almost_equal(
model.variables["var2"].evaluate(T, Y),
np.ones((N, T.size)) * (T[np.newaxis, :] - np.exp(T[np.newaxis, :]))
)


if __name__ == "__main__":
Expand Down

0 comments on commit 99a2e1a

Please sign in to comment.