From 7d4dd9873624875e52dde64a372340d810bc0b9b Mon Sep 17 00:00:00 2001 From: Jake <37048747+Jacob-Stevens-Haas@users.noreply.github.com> Date: Tue, 5 Dec 2023 17:42:43 +0000 Subject: [PATCH 01/16] ENH: Add function to calculate number of polynomial features This is much faster than fitting the features, and can be used by trapping to replace some constraint index calculations. Also: Add docstring/type annotations Rename a variable so it doesn't shadow newly-imported name --- pysindy/feature_library/polynomial_library.py | 66 ++++++++++++++++--- test/test_feature_library.py | 4 ++ 2 files changed, 62 insertions(+), 8 deletions(-) diff --git a/pysindy/feature_library/polynomial_library.py b/pysindy/feature_library/polynomial_library.py index 75dbf563..30710041 100644 --- a/pysindy/feature_library/polynomial_library.py +++ b/pysindy/feature_library/polynomial_library.py @@ -1,7 +1,9 @@ from itertools import chain +from math import comb from typing import Iterator import numpy as np +from numpy.typing import NDArray from scipy import sparse from sklearn.preprocessing import PolynomialFeatures from sklearn.utils.validation import check_is_fitted @@ -73,8 +75,22 @@ def __init__( @staticmethod def _combinations( - n_features, degree, include_interaction, interaction_only, include_bias - ) -> Iterator[tuple]: + n_features: int, + degree: int, + include_interaction: bool, + interaction_only: bool, + include_bias: bool, + ) -> Iterator[tuple[int, ...]]: + """ + Create selection tuples of input indexes for each polynomail term + + Selection tuple iterates the input indexes present in a single term. + For example, (x+y+1)^2 would be in iterator of the tuples: + (), (0,), (1,), (0, 0), (0, 1), (1, 1) + 1 x y x^2 x*y y^2 + + Order of terms is preserved regardless of include_interation. + """ if not include_interaction: return chain( [()] if include_bias else [], @@ -93,7 +109,12 @@ def _combinations( ) @property - def powers_(self): + def powers_(self) -> NDArray[np.dtype("int")]: + """ + The exponents of the polynomial as an array of shape + (n_features_out, n_features_in), where each item is the exponent of the + jth input variable in the ith polynomial term. + """ check_is_fitted(self) combinations = self._combinations( n_features=self.n_features_in_, @@ -208,10 +229,10 @@ def transform(self, x_full): ) if sparse.isspmatrix(x): columns = [] - for comb in combinations: - if comb: + for combo in combinations: + if combo: out_col = 1 - for col_idx in comb: + for col_idx in combo: out_col = x[..., col_idx].multiply(out_col) columns.append(out_col) else: @@ -227,7 +248,36 @@ def transform(self, x_full): ), x.__dict__, ) - for i, comb in enumerate(combinations): - xp[..., i] = x[..., comb].prod(-1) + for i, combo in enumerate(combinations): + xp[..., i] = x[..., combo].prod(-1) xp_full = xp_full + [xp] return xp_full + + +def n_poly_features( + n_in_feat: int, + degree: int, + include_bias: bool = False, + include_interation: bool = True, + interaction_only: bool = False, +) -> int: + """Calculate number of polynomial features + + Args: + n_in_feat: number of input features, e.g. 3 for x, y, z + degree: polynomial degree, e.g. 2 for quadratic + include_bias: whether to include a constant term + include_interaction: whether to include terms mixing multiple inputs + interaction_only: whether to omit terms of x_m * x_n^p for p > 1 + """ + if not include_interation and interaction_only: + raise ValueError("Cannot set interaction only if include_interaction is False") + n_feat = include_bias + if not include_interation: + return n_feat + n_in_feat * degree + for deg in range(1, degree + 1): + if interaction_only: + n_feat += comb(n_in_feat, deg) + else: + n_feat += comb(n_in_feat + deg - 1, deg) + return n_feat diff --git a/test/test_feature_library.py b/test/test_feature_library.py index 8e98b1a0..8bf27c58 100644 --- a/test/test_feature_library.py +++ b/test/test_feature_library.py @@ -21,6 +21,7 @@ from pysindy.feature_library import TensoredLibrary from pysindy.feature_library import WeakPDELibrary from pysindy.feature_library.base import BaseFeatureLibrary +from pysindy.feature_library.polynomial_library import n_poly_features from pysindy.optimizers import SINDyPI from pysindy.optimizers import STLSQ @@ -880,3 +881,6 @@ def test_polynomial_combinations(include_interaction, interaction_only, bias, ex ) result = tuple(sorted(list(combos))) assert result == expected + assert len(expected) == n_poly_features( + 2, 2, bias, include_interaction, interaction_only + ) From 65304b2b10189c536ca06754b7c9d9f7ebcd48e1 Mon Sep 17 00:00:00 2001 From: Jake <37048747+Jacob-Stevens-Haas@users.noreply.github.com> Date: Fri, 8 Dec 2023 16:47:07 +0000 Subject: [PATCH 02/16] ENH: add _make_constraints to TrappingSR3 There are several major differences from the version in example 8: * Coupled with PolynomialLibrary logic using PolynomialLibrary.powers_. Previously, this coupling was implicit and actually tied to the specific order in Example 8, not the order users would expect. * Instead of handling pre-flattened 2D array, constraints are built with target and feature axes separately, letting the caller handle formatting the constraints as "target" or "feature". This gives us flexibility to later clean up the constraint logic outside this function without needing to return here and removes the need for striding calculations like: n_tgts * (n_tgts + k - 1) + i + n_tgts * int(j * (2 * n_tgts - j - 3) / 2.0) Since constraints are built directly from the terms and exponents in the library, its much easier to read and debug the code. * Renamed "r" as "n_tgts", etc. Single-letter index variables are removed. When they occur in homogenous iterators, they are concatenated with what they're iterating (e.g. tgt_i, tgt_j) * Broke out the different kinds of constraints into helper functions to make it easier to test. This allows follow on work to auto-attach constraints to a problem. --- pysindy/optimizers/trapping_sr3.py | 94 ++++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) diff --git a/pysindy/optimizers/trapping_sr3.py b/pysindy/optimizers/trapping_sr3.py index dad0aa8c..def9d71e 100644 --- a/pysindy/optimizers/trapping_sr3.py +++ b/pysindy/optimizers/trapping_sr3.py @@ -1,12 +1,19 @@ import warnings +from itertools import combinations as combo_nr +from itertools import combinations_with_replacement as combo_wr +from math import comb from typing import Tuple import cvxpy as cp import numpy as np +from numpy import intp +from numpy.typing import NDArray from scipy.linalg import cho_factor from scipy.linalg import cho_solve from sklearn.exceptions import ConvergenceWarning +from ..feature_library.polynomial_library import n_poly_features +from ..feature_library.polynomial_library import PolynomialLibrary from ..utils import reorder_constraints from .sr3 import SR3 @@ -813,3 +820,90 @@ def _reduce(self, x, y): self.coef_ = coef_sparse.T self.objective_history = objective_history + + +def _make_constraints(n_tgts: int): + """Create constraints for the Quadratic terms in TrappingSR3. + + These are the constraints from equation 5 of the Trapping SINDy paper. + + Args: + n_tgts: number of coordinates or modes for which you're fitting an ODE. + + Returns: + A tuple of the constraint zeros, and a constraint matrix to multiply + by the coefficient matrix of Polynomial terms. Number of constraints is + ``n_tgts + 2 * math.comb(n_tgts, 2) + math.comb(n_tgts, 3)``. + Constraint matrix is of shape ``(n_constraint, n_feature, n_tgt)``. + To get "feature" order constraints, use + ``np.reshape(constraint_matrix, (n_constraints, -1))``. + To get "target" order constraints, transpose axis 1 and 2 before + reshaping. + """ + n_terms = n_poly_features(n_tgts, degree=2, include_bias=False) + lib = PolynomialLibrary(2, include_bias=False).fit(np.zeros((1, n_tgts))) + terms = [(t_ind, exps) for t_ind, exps in enumerate(lib.powers_)] + + # index of tgt -> index of its pure quadratic term + pure_terms = {np.argmax(exps): t_ind for t_ind, exps in terms if max(exps) == 2} + # two indexes of tgts -> index of their mixed quadratic term + mixed_terms = { + frozenset(np.argwhere(exponent == 1).flatten()): t_ind + for t_ind, exponent in terms + if max(exponent) == 1 and sum(exponent) == 2 + } + constraint_mat = np.vstack( + ( + _pure_constraints(n_tgts, n_terms, pure_terms), + _antisymm_double_constraint(n_tgts, n_terms, pure_terms, mixed_terms), + _antisymm_triple_constraints(n_tgts, n_terms, mixed_terms), + ) + ) + + return np.zeros(len(constraint_mat)), constraint_mat + + +def _pure_constraints( + n_tgts: int, n_terms: int, pure_terms: dict[intp, int] +) -> NDArray[np.dtype("float")]: + """Set constraints for coefficients adorning terms like a_i^3 = 0""" + constraint_mat = np.zeros((n_tgts, n_terms, n_tgts)) + for constr_ind, (tgt_ind, term_ind) in zip(range(n_tgts), pure_terms.items()): + constraint_mat[constr_ind, term_ind, tgt_ind] = 1.0 + return constraint_mat + + +def _antisymm_double_constraint( + n_tgts: int, + n_terms: int, + pure_terms: dict[intp, int], + mixed_terms: dict[frozenset[intp] : int], +) -> NDArray[np.dtype("float")]: + """Set constraints for coefficients adorning terms like a_i^2 * a_j=0""" + constraint_mat_1 = np.zeros((comb(n_tgts, 2), n_terms, n_tgts)) # a_i^2 * a_j + constraint_mat_2 = np.zeros((comb(n_tgts, 2), n_terms, n_tgts)) # a_i * a_j^2 + for constr_ind, ((tgt_i, tgt_j), mix_term) in zip( + range(n_tgts), mixed_terms.items() + ): + constraint_mat_1[constr_ind, mix_term, tgt_i] = 1.0 + constraint_mat_1[constr_ind, pure_terms[tgt_i], tgt_j] = 1.0 + constraint_mat_2[constr_ind, mix_term, tgt_j] = 1.0 + constraint_mat_2[constr_ind, pure_terms[tgt_j], tgt_i] = 1.0 + + return np.concatenate((constraint_mat_1, constraint_mat_2), axis=0) + + +def _antisymm_triple_constraints( + n_tgts: int, n_terms: int, mixed_terms: dict[frozenset[intp] : int] +) -> NDArray[np.dtype("float")]: + constraint_mat = np.zeros((comb(n_tgts, 3), n_terms, n_tgts)) # a_ia_ja_k + + def find_symm_term(a, b): + return mixed_terms[frozenset({a, b})] + + for constr_ind, (tgt_i, tgt_j, tgt_k) in enumerate(combo_nr(range(n_tgts), 3)): + constraint_mat[constr_ind, find_symm_term(tgt_j, tgt_k), tgt_i] = 1 + constraint_mat[constr_ind, find_symm_term(tgt_k, tgt_i), tgt_j] = 1 + constraint_mat[constr_ind, find_symm_term(tgt_i, tgt_j), tgt_k] = 1 + + return constraint_mat From 018eb9a80defc1518f57b73d54789eed17d5199b Mon Sep 17 00:00:00 2001 From: Jake <37048747+Jacob-Stevens-Haas@users.noreply.github.com> Date: Sun, 10 Dec 2023 17:53:58 +0000 Subject: [PATCH 03/16] TST: Add Trapping _make_constraints test --- test/test_optimizers.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/test/test_optimizers.py b/test/test_optimizers.py index c69ce982..385d809c 100644 --- a/test/test_optimizers.py +++ b/test/test_optimizers.py @@ -30,6 +30,8 @@ from pysindy.optimizers import TrappingSR3 from pysindy.optimizers import WrappedOptimizer from pysindy.optimizers.stlsq import _remove_and_decrement +from pysindy.optimizers.trapping_sr3 import _antisymm_triple_constraints +from pysindy.optimizers.trapping_sr3 import _make_constraints from pysindy.utils import supports_multiple_targets from pysindy.utils.odes import enzyme @@ -1129,3 +1131,18 @@ def test_remove_and_decrement(): existing_vals=existing_vals, vals_to_remove=vals_to_remove ) np.testing.assert_array_equal(expected, result) + + +def test_trapping_constraints(): + # x, y, x^2, xy, y^2 + constraint_rhs, constraint_lhs = _make_constraints(2) + stable_coefs = np.array([[0, 0, 0, 1, -1], [0, 0, -1, 1, 0]]) + result = np.tensordot(constraint_lhs, stable_coefs, ((1, 2), (1, 0))) + np.testing.assert_array_equal(constraint_rhs, result) + + # xy, xz, yz + stable_coefs = np.array([[0, 0, -1], [0, 0.5, 0], [0.5, 0, 0]]) + mixed_terms = {frozenset((0, 1)): 0, frozenset((0, 2)): 1, frozenset((1, 2)): 2} + constraint_lhs = _antisymm_triple_constraints(3, 3, mixed_terms) + result = np.tensordot(constraint_lhs, stable_coefs, ((1, 2), (1, 0))) + assert result[0] == 0 From 0870f57ab40d7d2ca65f3e24fd85d91ee240ff82 Mon Sep 17 00:00:00 2001 From: Jake <37048747+Jacob-Stevens-Haas@users.noreply.github.com> Date: Sun, 10 Dec 2023 18:41:00 +0000 Subject: [PATCH 04/16] ENH: Allow all polynomial library library options in trapping (other than degree, which has to be 2) --- pysindy/optimizers/trapping_sr3.py | 6 ++++-- test/test_optimizers.py | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/pysindy/optimizers/trapping_sr3.py b/pysindy/optimizers/trapping_sr3.py index def9d71e..1d8410bc 100644 --- a/pysindy/optimizers/trapping_sr3.py +++ b/pysindy/optimizers/trapping_sr3.py @@ -822,13 +822,15 @@ def _reduce(self, x, y): self.objective_history = objective_history -def _make_constraints(n_tgts: int): +def _make_constraints(n_tgts: int, **kwargs): """Create constraints for the Quadratic terms in TrappingSR3. These are the constraints from equation 5 of the Trapping SINDy paper. Args: n_tgts: number of coordinates or modes for which you're fitting an ODE. + kwargs: Keyword arguments to PolynomialLibrary such as + ``include_bias``. Returns: A tuple of the constraint zeros, and a constraint matrix to multiply @@ -841,7 +843,7 @@ def _make_constraints(n_tgts: int): reshaping. """ n_terms = n_poly_features(n_tgts, degree=2, include_bias=False) - lib = PolynomialLibrary(2, include_bias=False).fit(np.zeros((1, n_tgts))) + lib = PolynomialLibrary(2, **kwargs).fit(np.zeros((1, n_tgts))) terms = [(t_ind, exps) for t_ind, exps in enumerate(lib.powers_)] # index of tgt -> index of its pure quadratic term diff --git a/test/test_optimizers.py b/test/test_optimizers.py index 385d809c..ac65049e 100644 --- a/test/test_optimizers.py +++ b/test/test_optimizers.py @@ -1135,7 +1135,7 @@ def test_remove_and_decrement(): def test_trapping_constraints(): # x, y, x^2, xy, y^2 - constraint_rhs, constraint_lhs = _make_constraints(2) + constraint_rhs, constraint_lhs = _make_constraints(2, include_bias=False) stable_coefs = np.array([[0, 0, 0, 1, -1], [0, 0, -1, 1, 0]]) result = np.tensordot(constraint_lhs, stable_coefs, ((1, 2), (1, 0))) np.testing.assert_array_equal(constraint_rhs, result) From e00724773c491ae382b8371bfc5b247735addd7b Mon Sep 17 00:00:00 2001 From: Jake <37048747+Jacob-Stevens-Haas@users.noreply.github.com> Date: Tue, 2 Jan 2024 16:45:04 +0000 Subject: [PATCH 05/16] CLN: remove unused import --- pysindy/optimizers/trapping_sr3.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pysindy/optimizers/trapping_sr3.py b/pysindy/optimizers/trapping_sr3.py index 1d8410bc..e9816481 100644 --- a/pysindy/optimizers/trapping_sr3.py +++ b/pysindy/optimizers/trapping_sr3.py @@ -1,6 +1,5 @@ import warnings from itertools import combinations as combo_nr -from itertools import combinations_with_replacement as combo_wr from math import comb from typing import Tuple From 3730a91a9e5405777a6d7c6bd6a919ab6d00bff9 Mon Sep 17 00:00:00 2001 From: Jake <37048747+Jacob-Stevens-Haas@users.noreply.github.com> Date: Mon, 8 Jan 2024 13:19:21 +0000 Subject: [PATCH 06/16] BUG(_make_constraints): Request correct num of terms if include_bias --- pysindy/optimizers/trapping_sr3.py | 3 ++- test/test_optimizers.py | 9 +++++++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/pysindy/optimizers/trapping_sr3.py b/pysindy/optimizers/trapping_sr3.py index 993b2dc9..3b250bd7 100644 --- a/pysindy/optimizers/trapping_sr3.py +++ b/pysindy/optimizers/trapping_sr3.py @@ -589,7 +589,8 @@ def _make_constraints(n_tgts: int, **kwargs): To get "target" order constraints, transpose axis 1 and 2 before reshaping. """ - n_terms = n_poly_features(n_tgts, degree=2, include_bias=False) + include_bias = kwargs.get("include_bias", False) + n_terms = n_poly_features(n_tgts, degree=2, include_bias=include_bias) lib = PolynomialLibrary(2, **kwargs).fit(np.zeros((1, n_tgts))) terms = [(t_ind, exps) for t_ind, exps in enumerate(lib.powers_)] diff --git a/test/test_optimizers.py b/test/test_optimizers.py index 5cc6bd4c..045657ae 100644 --- a/test/test_optimizers.py +++ b/test/test_optimizers.py @@ -1131,13 +1131,18 @@ def test_remove_and_decrement(): np.testing.assert_array_equal(expected, result) -def test_trapping_constraints(): +@pytest.mark.parametrize("include_bias", (True, False)) +def test_trapping_constraints(include_bias): # x, y, x^2, xy, y^2 - constraint_rhs, constraint_lhs = _make_constraints(2, include_bias=False) + constraint_rhs, constraint_lhs = _make_constraints(2, include_bias=include_bias) stable_coefs = np.array([[0, 0, 0, 1, -1], [0, 0, -1, 1, 0]]) + if include_bias: + stable_coefs = np.concatenate(([[0], [0]], stable_coefs), axis=1) result = np.tensordot(constraint_lhs, stable_coefs, ((1, 2), (1, 0))) np.testing.assert_array_equal(constraint_rhs, result) + +def test_trapping_mixed_only(): # xy, xz, yz stable_coefs = np.array([[0, 0, -1], [0, 0.5, 0], [0.5, 0, 0]]) mixed_terms = {frozenset((0, 1)): 0, frozenset((0, 2)): 1, frozenset((1, 2)): 2} From 141f7f613ed249ebef54f5ca7dc9fe95be55ed07 Mon Sep 17 00:00:00 2001 From: Jake <37048747+Jacob-Stevens-Haas@users.noreply.github.com> Date: Mon, 8 Jan 2024 16:42:11 +0000 Subject: [PATCH 07/16] TST(trapping): Remove trapping w/cubic library --- test/test_optimizers.py | 27 --------------------------- 1 file changed, 27 deletions(-) diff --git a/test/test_optimizers.py b/test/test_optimizers.py index 045657ae..0d7fc5ee 100644 --- a/test/test_optimizers.py +++ b/test/test_optimizers.py @@ -504,33 +504,6 @@ def test_trapping_sr3_quadratic_library(params, trapping_sr3_params, quadratic_l assert np.allclose((opt.coef_.flatten())[0], 0.0, atol=1e-5) -def test_trapping_cubic_library(): - x = np.random.standard_normal((10, 3)) - library_functions = [ - lambda x: x, - lambda x, y: x * y, - lambda x: x**2, - lambda x, y, z: x * y * z, - lambda x, y: x**2 * y, - lambda x: x**3, - ] - library_function_names = [ - lambda x: str(x), - lambda x, y: "{} * {}".format(x, y), - lambda x: "{}^2".format(x), - lambda x, y, z: "{} * {} * {}".format(x, y, z), - lambda x, y: "{}^2 * {}".format(x, y), - lambda x: "{}^3".format(x), - ] - sindy_library = CustomLibrary( - library_functions=library_functions, function_names=library_function_names - ) - opt = TrappingSR3() - model = SINDy(optimizer=opt, feature_library=sindy_library) - model.fit(x) - check_is_fitted(model) - - @pytest.mark.parametrize( "error, optimizer, params", [ From 1b436cab9c416587a5575f5719f4273c9706ab17 Mon Sep 17 00:00:00 2001 From: Jake <37048747+Jacob-Stevens-Haas@users.noreply.github.com> Date: Mon, 8 Jan 2024 16:45:08 +0000 Subject: [PATCH 08/16] TST(trapping): remove unused quadratic_library fixutre --- test/conftest.py | 17 ----------------- test/test_optimizers.py | 2 +- 2 files changed, 1 insertion(+), 18 deletions(-) diff --git a/test/conftest.py b/test/conftest.py index 5ea49e6b..1b89d1d9 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -315,23 +315,6 @@ def custom_library_bias(): ) -@pytest.fixture -def quadratic_library(): - library_functions = [ - lambda x: x, - lambda x, y: x * y, - lambda x: x**2, - ] - function_names = [ - lambda x: str(x), - lambda x, y: "{} * {}".format(x, y), - lambda x: "{}^2".format(x), - ] - return CustomLibrary( - library_functions=library_functions, function_names=function_names - ) - - @pytest.fixture def generalized_library(): tensor_array = [[1, 1]] diff --git a/test/test_optimizers.py b/test/test_optimizers.py index 0d7fc5ee..ef0e9cfd 100644 --- a/test/test_optimizers.py +++ b/test/test_optimizers.py @@ -473,7 +473,7 @@ def test_stable_linear_sr3_linear_library(): dict(thresholder="weighted_l2", thresholds=1e-5 * np.ones((1, 2))), ], ) -def test_trapping_sr3_quadratic_library(params, trapping_sr3_params, quadratic_library): +def test_trapping_sr3_quadratic_library(params, trapping_sr3_params): t = np.arange(0, 1, 0.1) x = np.exp(-t).reshape((-1, 1)) x_dot = -x From 7b2622be2d588113385ba253b8d09b444f044266 Mon Sep 17 00:00:00 2001 From: Jake <37048747+Jacob-Stevens-Haas@users.noreply.github.com> Date: Mon, 8 Jan 2024 17:31:52 +0000 Subject: [PATCH 09/16] feat(trapping): Create trapping constraints automatically Previously, TrappingSR3 could only use constraints passed to it, and only then a limited set of constraints. It also didn't apply the trapping constraints automatically, because constraints were required at __init__, and actually shaping them requires knowledge about the number of features, typically not known until fit (unless the user is a developer who knows how the feature libraries work internally :wink:) WIP, Spawned issue #452 --- pysindy/optimizers/constrained_sr3.py | 4 +- pysindy/optimizers/trapping_sr3.py | 47 ++++++++++++++++++---- test/conftest.py | 3 +- test/test_optimizers.py | 57 +++++++++++++++++++-------- 4 files changed, 84 insertions(+), 27 deletions(-) diff --git a/pysindy/optimizers/constrained_sr3.py b/pysindy/optimizers/constrained_sr3.py index dbc771a6..0760aba2 100644 --- a/pysindy/optimizers/constrained_sr3.py +++ b/pysindy/optimizers/constrained_sr3.py @@ -72,8 +72,8 @@ class ConstrainedSR3(SR3): constraint_lhs : numpy ndarray, optional (default None) Shape should be (n_constraints, n_features * n_targets), - The left hand side matrix C of Cw <= d. - There should be one row per constraint. + The left hand side matrix C of Cw <= d (Or Cw = d for equality + constraints). There should be one row per constraint. constraint_rhs : numpy ndarray, shape (n_constraints,), optional (default None) The right hand side vector d of Cw <= d. diff --git a/pysindy/optimizers/trapping_sr3.py b/pysindy/optimizers/trapping_sr3.py index 3b250bd7..0183e8b7 100644 --- a/pysindy/optimizers/trapping_sr3.py +++ b/pysindy/optimizers/trapping_sr3.py @@ -71,10 +71,6 @@ class TrappingSR3(ConstrainedSR3): If relax_optim = True, use the relax-and-split method. If False, try a direct minimization on the largest eigenvalue. - inequality_constraints : bool, optional (default False) - If True, relax_optim must be false or relax_optim = True - AND threshold != 0, so that the CVXPY methods are used. - alpha_A : Determines the step size in the prox-gradient descent over A. For convergence, need alpha_A <= eta, so default @@ -166,10 +162,11 @@ class TrappingSR3(ConstrainedSR3): def __init__( self, *, + _n_tgts: int = None, + _include_bias: bool = True, eta: Union[float, None] = None, eps_solver: float = 1e-7, relax_optim: bool = True, - inequality_constraints=False, alpha_A: Union[float, None] = None, alpha_m: Union[float, None] = None, gamma: float = -0.1, @@ -180,10 +177,46 @@ def __init__( A0: Union[NDArray, None] = None, **kwargs, ): - super().__init__(thresholder=thresholder, **kwargs) + # n_tgts, constraints, etc are data-dependent parameters and belong in + # _reduce/fit (). The following is a hack until we refactor how + # constraints are applied in ConstrainedSR3 and MIOSR + self._include_bias = _include_bias + self._n_tgts = _n_tgts + if _n_tgts is None: + warnings.warn( + "Trapping Optimizer initialized without _n_tgts. It will likely" + " be unable to fit data" + ) + _n_tgts = 1 + constraint_separation_index = kwargs.get("constraint_separation_index", 0) + constraint_rhs, constraint_lhs = _make_constraints( + _n_tgts, include_bias=_include_bias + ) + constraint_order = kwargs.get("constraint_order", "feature") + if constraint_order == "target": + constraint_lhs = np.reshape(np.transpose(constraint_lhs, [1, 0, 2])) + constraint_lhs = np.reshape(constraint_lhs, (constraint_lhs.shape[0], -1)) + try: + constraint_lhs = np.concatenate( + (kwargs.pop("constraint_lhs"), constraint_lhs), 0 + ) + constraint_rhs = np.concatenate( + (kwargs.pop("constraint_rhs"), constraint_rhs), 0 + ) + except KeyError: + pass + + super().__init__( + constraint_lhs=constraint_lhs, + constraint_rhs=constraint_rhs, + constraint_separation_index=constraint_separation_index, + constraint_order=constraint_order, + equality_constraints=True, + thresholder=thresholder, + **kwargs, + ) self.eps_solver = eps_solver self.relax_optim = relax_optim - self.inequality_constraints = inequality_constraints self.m0 = m0 self.A0 = A0 self.alpha_A = alpha_A diff --git a/test/conftest.py b/test/conftest.py index 1b89d1d9..c899b8af 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -410,7 +410,8 @@ def data_linear_oscillator_corrupted(): @pytest.fixture(scope="session") def data_linear_combination(): t = np.linspace(0, 5, 100) - x = np.stack((np.exp(t), np.sin(t), np.cos(t)), axis=-1) + lib = PolynomialLibrary(2) + x = lib.fit_transform(t) y = np.stack((x[:, 0] + x[:, 1], x[:, 1] + x[:, 2]), axis=-1) return x, y diff --git a/test/test_optimizers.py b/test/test_optimizers.py index ef0e9cfd..839e0548 100644 --- a/test/test_optimizers.py +++ b/test/test_optimizers.py @@ -4,6 +4,7 @@ import numpy as np import pytest from numpy.linalg import norm +from numpy.typing import NDArray from scipy.integrate import solve_ivp from sklearn.base import BaseEstimator from sklearn.exceptions import ConvergenceWarning @@ -18,6 +19,7 @@ from pysindy import SINDy from pysindy.feature_library import CustomLibrary from pysindy.feature_library import SINDyPILibrary +from pysindy.optimizers import BaseOptimizer from pysindy.optimizers import ConstrainedSR3 from pysindy.optimizers import EnsembleOptimizer from pysindy.optimizers import FROLS @@ -67,6 +69,18 @@ def predict(self, x): return x +def _align_optimizer_and_1dfeatures( + opt: BaseOptimizer, features: NDArray +) -> tuple[BaseOptimizer, NDArray]: + # This is a hack until constraints are moved from init to fit + if isinstance(opt, TrappingSR3): + opt = TrappingSR3(_n_tgts=1, _include_bias=True) + features = np.hstack([features, features, features]) + else: + features = features + return opt, features + + @pytest.mark.parametrize( "cls, support", [ @@ -100,17 +114,19 @@ def data(request): SR3(), ConstrainedSR3(), StableLinearSR3(), - TrappingSR3(), + TrappingSR3(_n_tgts=1), Lasso(fit_intercept=False), ElasticNet(fit_intercept=False), DummyLinearModel(), MIOSR(), ], + ids=lambda param: type(param), ) def test_fit(data_derivative_1d, optimizer): x, x_dot = data_derivative_1d if len(x.shape) == 1: x = x.reshape(-1, 1) + optimizer, x = _align_optimizer_and_1dfeatures(optimizer, x) opt = WrappedOptimizer(optimizer, unbias=False) opt.fit(x, x_dot) @@ -167,12 +183,12 @@ def test_alternate_parameters(data_derivative_1d, kwargs): ], ) def test_sample_weight_optimizers(data_1d, optimizer): - x, t = data_1d - + y, t = data_1d + opt = optimizer() + opt, x = _align_optimizer_and_1dfeatures(opt, y) sample_weight = np.ones(x[:, 0].shape) sample_weight[::2] = 0 - opt = optimizer() - opt.fit(x, x, sample_weight=sample_weight) + opt.fit(x, y, sample_weight=sample_weight) check_is_fitted(opt) @@ -222,12 +238,12 @@ def test_sr3_bad_parameters(optimizer, params): ) def test_trapping_bad_parameters(params): with pytest.raises(ValueError): - TrappingSR3(**params) + TrappingSR3(_n_tgts=1, **params) def test_trapping_objective_print(): # test error in verbose print logic when max_iter < 10 - opt = TrappingSR3(max_iter=2, verbose=True) + opt = TrappingSR3(_n_tgts=1, max_iter=2, verbose=True) arr = np.ones(1) opt._objective(arr, arr, arr, arr, arr, 1) @@ -481,7 +497,7 @@ def test_trapping_sr3_quadratic_library(params, trapping_sr3_params): params.update(trapping_sr3_params) - opt = TrappingSR3(**params) + opt = TrappingSR3(_n_tgts=1, _include_bias=False, **params) opt.fit(features, x_dot) assert opt.PL_.shape == (1, 1, 1, 2) assert opt.PQ_.shape == (1, 1, 1, 1, 2) @@ -494,7 +510,7 @@ def test_trapping_sr3_quadratic_library(params, trapping_sr3_params): params["constraint_rhs"] = np.zeros(p) params["constraint_lhs"] = np.eye(p, r * N) - opt = TrappingSR3(**params) + opt = TrappingSR3(_n_tgts=1, _include_bias=False, **params) opt.fit(features, x_dot) assert opt.PL_.shape == (1, 1, 1, 2) assert opt.PQ_.shape == (1, 1, 1, 1, 2) @@ -631,7 +647,7 @@ def test_constrained_sr3_prox_functions(data_derivative_1d, thresholder): (SR3, {"trimming_fraction": 0.1}), (ConstrainedSR3, {"constraint_lhs": [1], "constraint_rhs": [1]}), (ConstrainedSR3, {"trimming_fraction": 0.1}), - (TrappingSR3, {"constraint_lhs": [1], "constraint_rhs": [1]}), + (TrappingSR3, {"_n_tgts": 1, "constraint_lhs": [1], "constraint_rhs": [1]}), (StableLinearSR3, {"constraint_lhs": [1], "constraint_rhs": [1]}), (StableLinearSR3, {"trimming_fraction": 0.1}), (SINDyPI, {}), @@ -741,7 +757,7 @@ def test_sr3_enable_trimming(optimizer, data_linear_oscillator_corrupted): SR3(max_iter=1), ConstrainedSR3(max_iter=1), StableLinearSR3(max_iter=1), - TrappingSR3(max_iter=1), + TrappingSR3(_n_tgts=1, max_iter=1), ], ) def test_fit_warn(data_derivative_1d, optimizer): @@ -755,7 +771,11 @@ def test_fit_warn(data_derivative_1d, optimizer): @pytest.mark.parametrize( "optimizer", - [(ConstrainedSR3, {"max_iter": 80}), (TrappingSR3, {"max_iter": 100}), (MIOSR, {})], + [ + (ConstrainedSR3, {"max_iter": 80}), + (TrappingSR3, {"_n_tgts": 5, "max_iter": 100}), + (MIOSR, {}), + ], ) @pytest.mark.parametrize("target_value", [0, -1, 3]) def test_row_format_constraints(data_linear_combination, optimizer, target_value): @@ -966,6 +986,7 @@ def test_normalize_columns(data_derivative_1d, optimizer): if len(x.shape) == 1: x = x.reshape(-1, 1) opt = optimizer(normalize_columns=True) + opt, x = _align_optimizer_and_1dfeatures(opt, x) opt.fit(x, x_dot) check_is_fitted(opt) assert opt.complexity >= 0 @@ -1027,9 +1048,11 @@ def test_ssr_criteria(data_lorenz): ], ) def test_optimizers_verbose(data_1d, optimizer): - x, _ = data_1d + y, _ = data_1d opt = optimizer(verbose=True) - opt.fit(x, x) + opt, x = _align_optimizer_and_1dfeatures(opt, y) + opt.verbose = True + opt.fit(x, y) check_is_fitted(opt) @@ -1043,10 +1066,10 @@ def test_optimizers_verbose(data_1d, optimizer): ], ) def test_optimizers_verbose_cvxpy(data_1d, optimizer): - x, _ = data_1d - + y, _ = data_1d opt = optimizer(verbose_cvxpy=True) - opt.fit(x, x) + opt, x = _align_optimizer_and_1dfeatures(opt, y) + opt.fit(x, y) check_is_fitted(opt) From b2d010afb53e55239f92de66310227a52908ad6c Mon Sep 17 00:00:00 2001 From: Jake <37048747+Jacob-Stevens-Haas@users.noreply.github.com> Date: Mon, 8 Jan 2024 20:57:46 +0000 Subject: [PATCH 10/16] BUG(axes): Make concat out param work --- pysindy/utils/axes.py | 7 +++++-- test/utils/test_axes.py | 7 +++++++ 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/pysindy/utils/axes.py b/pysindy/utils/axes.py index bad10d55..ad0f7904 100644 --- a/pysindy/utils/axes.py +++ b/pysindy/utils/axes.py @@ -117,13 +117,16 @@ def decorator(func): @implements(np.concatenate) -def concatenate(arrays, axis=0): +def concatenate(arrays, axis=0, out=None, dtype=None, casting="same_kind"): parents = [np.asarray(obj) for obj in arrays] ax_list = [obj.__dict__ for obj in arrays if isinstance(obj, AxesArray)] for ax1, ax2 in zip(ax_list[:-1], ax_list[1:]): if ax1 != ax2: raise TypeError("Concatenating >1 AxesArray with incompatible axes") - return AxesArray(np.concatenate(parents, axis), axes=ax_list[0]) + result = np.concatenate(parents, axis, out=out, dtype=dtype, casting=casting) + if isinstance(out, AxesArray): + out.__dict__ = ax_list[0] + return AxesArray(result, axes=ax_list[0]) def comprehend_axes(x): diff --git a/test/utils/test_axes.py b/test/utils/test_axes.py index b1a38e6f..e5d9a838 100644 --- a/test/utils/test_axes.py +++ b/test/utils/test_axes.py @@ -7,6 +7,13 @@ from pysindy import AxesArray +def test_concat_out(): + arr = AxesArray(np.arange(3).reshape(1, 3), {"ax_a": 0, "ax_b": 1}) + arr_out = np.empty((2, 3)).view(AxesArray) + result = np.concatenate((arr, arr), axis=0, out=arr_out) + assert_equal(result, arr_out) + + def test_reduce_mean_noinf_recursion(): arr = AxesArray(np.array([[1]]), {}) np.mean(arr, axis=0) From e6d69a26ce32b4973dc61e8c5748bfe94afbeec1 Mon Sep 17 00:00:00 2001 From: Jake <37048747+Jacob-Stevens-Haas@users.noreply.github.com> Date: Tue, 9 Jan 2024 16:13:07 +0000 Subject: [PATCH 11/16] TST: Fix shape of linear_combination (1d->2d) --- test/conftest.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/conftest.py b/test/conftest.py index c899b8af..8af932c7 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -409,10 +409,10 @@ def data_linear_oscillator_corrupted(): @pytest.fixture(scope="session") def data_linear_combination(): - t = np.linspace(0, 5, 100) - lib = PolynomialLibrary(2) - x = lib.fit_transform(t) - y = np.stack((x[:, 0] + x[:, 1], x[:, 1] + x[:, 2]), axis=-1) + t = np.linspace(0, 3, 100).reshape((-1, 1)) + lib = PolynomialLibrary(2, include_bias=False) + x = lib.fit_transform(np.hstack([t, -2 * t, 3 * t])) + y = np.stack((x[:, 0] + x[:, 1], x[:, 1] + x[:, 2], x[:, 0] + x[:, 2]), axis=-1) return x, y From f4a8f804756541c840d33956fb55f96fd244553f Mon Sep 17 00:00:00 2001 From: Jake <37048747+Jacob-Stevens-Haas@users.noreply.github.com> Date: Wed, 10 Jan 2024 01:36:40 +0000 Subject: [PATCH 12/16] feat(utils): Deprecate target format constraints And simplify reorder_constraints --- pysindy/utils/base.py | 26 ++++++++++---------------- 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/pysindy/utils/base.py b/pysindy/utils/base.py index cc3a846a..114916e7 100644 --- a/pysindy/utils/base.py +++ b/pysindy/utils/base.py @@ -1,3 +1,4 @@ +import warnings from itertools import repeat from typing import Sequence @@ -136,24 +137,17 @@ def drop_nan_samples(x, y): return x, y -def reorder_constraints(c, n_features, output_order="row"): - """Reorder constraint matrix.""" - ret = c.copy() - - if ret.ndim == 1: - ret = ret.reshape(1, -1) - - n_targets = ret.shape[1] // n_features - shape = (n_targets, n_features) - - if output_order == "row": - for i in range(ret.shape[0]): - ret[i] = ret[i].reshape(shape).flatten(order="F") +def reorder_constraints(arr, n_features, output_order="feature"): + """Switch between 'feature' and 'target' constraint order.""" + warnings.warn("Target format constraints are deprecated.", stacklevel=2) + n_constraints = arr.shape[0] if arr.ndim > 1 else 1 + n_tgt = arr.size // n_features // n_constraints + if output_order == "feature": + starting_shape = (n_constraints, n_tgt, n_features) else: - for i in range(ret.shape[0]): - ret[i] = ret[i].reshape(shape, order="F").flatten() + starting_shape = (n_constraints, n_features, n_tgt) - return ret + return arr.reshape(starting_shape).transpose([0, 2, 1]).reshape((n_constraints, -1)) def prox_l0(x, threshold): From e0f723b017cd11e32a4b74d49d0b5bf8c5c7f1c5 Mon Sep 17 00:00:00 2001 From: Jake <37048747+Jacob-Stevens-Haas@users.noreply.github.com> Date: Wed, 10 Jan 2024 03:30:36 +0000 Subject: [PATCH 13/16] test(reformat_constraints): Make target and feature format clear Read the fucking diff backport-to: constraint-order --- test/utils/test_utils.py | 44 +++++++++++++++++++++++++++------------- 1 file changed, 30 insertions(+), 14 deletions(-) diff --git a/test/utils/test_utils.py b/test/utils/test_utils.py index 54578c43..4c4d2daf 100644 --- a/test/utils/test_utils.py +++ b/test/utils/test_utils.py @@ -7,28 +7,44 @@ def test_reorder_constraints_1D(): - target_order = np.arange(6) - row_order = np.array([0, 3, 1, 4, 2, 5]) n_feats = 3 - - np.testing.assert_array_equal( - reorder_constraints(target_order, n_feats).flatten(), row_order + n_tgts = 2 + target_order = np.array( + [f"t{i}f{j}" for i in range(n_tgts) for j in range(n_feats)] ) - np.testing.assert_array_equal( - reorder_constraints(row_order, n_feats, output_order="target").flatten(), - target_order, + feature_order = np.array( + [f"t{i}f{j}" for j in range(n_feats) for i in range(n_tgts)] ) + result = reorder_constraints(target_order, n_feats, output_order="feature") + np.testing.assert_array_equal(result.flatten(), feature_order) + + result = reorder_constraints(feature_order, n_feats, output_order="target") + np.testing.assert_array_equal(result.flatten(), target_order) + def test_reorder_constraints_2D(): - target_order = np.arange(12).reshape((2, 6)) - row_order = np.array([[0, 3, 1, 4, 2, 5], [6, 9, 7, 10, 8, 11]]) n_feats = 3 - - np.testing.assert_array_equal(reorder_constraints(target_order, n_feats), row_order) - np.testing.assert_array_equal( - reorder_constraints(row_order, n_feats, output_order="target"), target_order + n_tgts = 2 + n_const = 2 + target_order = np.array( + [ + [f"c{k}t{i}f{j}" for i in range(n_tgts) for j in range(n_feats)] + for k in range(n_const) + ] ) + feature_order = np.array( + [ + [f"c{k}t{i}f{j}" for j in range(n_feats) for i in range(n_tgts)] + for k in range(n_const) + ] + ) + + result = reorder_constraints(target_order, n_feats, output_order="feature") + np.testing.assert_array_equal(result, feature_order) + + result = reorder_constraints(feature_order, n_feats, output_order="target") + np.testing.assert_array_equal(result, target_order) def test_validate_controls(): From c8339c0ccefbbfe12dc482ba39ac36d9d21650cf Mon Sep 17 00:00:00 2001 From: Jake <37048747+Jacob-Stevens-Haas@users.noreply.github.com> Date: Wed, 10 Jan 2024 03:41:40 +0000 Subject: [PATCH 14/16] feat(trapping): Enable merging trapping and user constraints Also: * (constrained_sr3)clean up creating cvxpy constraints * (constrained_sr3)Clone the cvxpy problem in case of except statements, because prob.solve() mutates prob in a mathematically significant way --- pysindy/optimizers/constrained_sr3.py | 63 +++++++++++++++------------ pysindy/optimizers/trapping_sr3.py | 33 ++++++++++---- test/test_optimizers.py | 54 +++++++++++++++-------- 3 files changed, 95 insertions(+), 55 deletions(-) diff --git a/pysindy/optimizers/constrained_sr3.py b/pysindy/optimizers/constrained_sr3.py index 0760aba2..300bce89 100644 --- a/pysindy/optimizers/constrained_sr3.py +++ b/pysindy/optimizers/constrained_sr3.py @@ -1,4 +1,6 @@ import warnings +from copy import deepcopy +from typing import Optional from typing import Tuple try: @@ -168,7 +170,7 @@ def __init__( thresholds=None, equality_constraints=False, inequality_constraints=False, - constraint_separation_index=0, + constraint_separation_index: Optional[bool] = None, verbose=False, verbose_cvxpy=False, unbias=False, @@ -194,7 +196,7 @@ def __init__( self.constraint_lhs = constraint_lhs self.constraint_rhs = constraint_rhs self.constraint_order = constraint_order - self.use_constraints = (constraint_lhs is not None) and ( + self.use_constraints = (constraint_lhs is not None) or ( constraint_rhs is not None ) @@ -208,7 +210,7 @@ def __init__( " but user did not specify if the constraints were equality or" " inequality constraints. Assuming equality constraints." ) - self.equality_constraints = True + equality_constraints = True if self.use_constraints: if constraint_order not in ("feature", "target"): @@ -243,6 +245,16 @@ def __init__( ) self.inequality_constraints = inequality_constraints self.equality_constraints = equality_constraints + if self.use_constraints and constraint_separation_index is None: + if self.inequality_constraints and not self.equality_constraints: + constraint_separation_index = len(constraint_lhs) + elif self.equality_constraints and not self.inequality_constraints: + constraint_separation_index = 0 + else: + raise ValueError( + "If passing both inequality and equality constraints, must specify" + " constraint_separation_index." + ) self.constraint_separation_index = constraint_separation_index def _update_full_coef_constraints(self, H, x_transpose_y, coef_sparse): @@ -276,30 +288,20 @@ def _create_var_and_part_cost( def _update_coef_cvxpy(self, xi, cost, var_len, coef_prev, tol): if self.use_constraints: - if self.inequality_constraints and self.equality_constraints: - # Process inequality constraints then equality constraints - prob = cp.Problem( - cp.Minimize(cost), - [ - self.constraint_lhs[: self.constraint_separation_index, :] @ xi - <= self.constraint_rhs[: self.constraint_separation_index], - self.constraint_lhs[self.constraint_separation_index :, :] @ xi - == self.constraint_rhs[self.constraint_separation_index :], - ], + constraints = [] + if self.equality_constraints: + constraints.append( + self.constraint_lhs[self.constraint_separation_index :, :] @ xi + == self.constraint_rhs[self.constraint_separation_index :], ) - elif self.inequality_constraints: - prob = cp.Problem( - cp.Minimize(cost), - [self.constraint_lhs @ xi <= self.constraint_rhs], - ) - else: - prob = cp.Problem( - cp.Minimize(cost), - [self.constraint_lhs @ xi == self.constraint_rhs], + if self.inequality_constraints: + constraints.append( + self.constraint_lhs[: self.constraint_separation_index, :] @ xi + <= self.constraint_rhs[: self.constraint_separation_index] ) - else: - prob = cp.Problem(cp.Minimize(cost)) + prob = cp.Problem(cp.Minimize(cost), constraints) + prob_clone = deepcopy(prob) # default solver is SCS/OSQP here but switches to ECOS for L2 try: prob.solve( @@ -313,13 +315,20 @@ def _update_coef_cvxpy(self, xi, cost, var_len, coef_prev, tol): # similar semantic changes for the other variables. except (TypeError, ValueError): try: + prob = prob_clone prob.solve(max_iters=self.max_iter, verbose=self.verbose_cvxpy) + xi = prob.variables()[0] except cp.error.SolverError: - print("Solver failed, setting coefs to zeros") + warnings.warn("Solver failed, setting coefs to zeros") xi.value = np.zeros(var_len) except cp.error.SolverError: - print("Solver failed, setting coefs to zeros") - xi.value = np.zeros(var_len) + try: + prob = prob_clone + prob.solve(max_iter=self.max_iter, verbose=self.verbose_cvxpy) + xi = prob.variables()[0] + except cp.error.SolverError: + warnings.warn("Solver failed, setting coefs to zeros") + xi.value = np.zeros(var_len) if xi.value is None: warnings.warn( diff --git a/pysindy/optimizers/trapping_sr3.py b/pysindy/optimizers/trapping_sr3.py index 0183e8b7..415ce88c 100644 --- a/pysindy/optimizers/trapping_sr3.py +++ b/pysindy/optimizers/trapping_sr3.py @@ -163,7 +163,8 @@ def __init__( self, *, _n_tgts: int = None, - _include_bias: bool = True, + _include_bias: bool = False, + _interaction_only: bool = False, eta: Union[float, None] = None, eps_solver: float = 1e-7, relax_optim: bool = True, @@ -181,6 +182,7 @@ def __init__( # _reduce/fit (). The following is a hack until we refactor how # constraints are applied in ConstrainedSR3 and MIOSR self._include_bias = _include_bias + self._interaction_only = _interaction_only self._n_tgts = _n_tgts if _n_tgts is None: warnings.warn( @@ -188,13 +190,22 @@ def __init__( " be unable to fit data" ) _n_tgts = 1 - constraint_separation_index = kwargs.get("constraint_separation_index", 0) + if _include_bias: + raise ValueError( + "Currently not able to include bias until PQ matrices are modified" + ) + if hasattr(kwargs, "constraint_separation_index"): + constraint_separation_index = kwargs["constraint_separation_index"] + elif kwargs.get("inequality_constraints", False): + constraint_separation_index = kwargs["constraint_lhs"].shape[0] + else: + constraint_separation_index = 0 constraint_rhs, constraint_lhs = _make_constraints( _n_tgts, include_bias=_include_bias ) - constraint_order = kwargs.get("constraint_order", "feature") + constraint_order = kwargs.pop("constraint_order", "feature") if constraint_order == "target": - constraint_lhs = np.reshape(np.transpose(constraint_lhs, [1, 0, 2])) + constraint_lhs = np.transpose(constraint_lhs, [0, 2, 1]) constraint_lhs = np.reshape(constraint_lhs, (constraint_lhs.shape[0], -1)) try: constraint_lhs = np.concatenate( @@ -460,22 +471,26 @@ def _reduce(self, x, y): self.PW_history_ = [] self.PWeigs_history_ = [] self.history_ = [] - n_samples, n_features = x.shape - n_tgts = y.shape[1] + n_samples, n_tgts = y.shape + n_features = n_poly_features( + n_tgts, + 2, + include_bias=self._include_bias, + interaction_only=self._interaction_only, + ) var_len = n_features * n_tgts - n_feat_expected = int((n_tgts**2 + 3 * n_tgts) / 2.0) # Only relevant if the stability term is turned on. self.PL_unsym_, self.PL_, self.PQ_ = self._set_Ptensors(n_tgts) # make sure dimensions/symmetries are correct self.PL_, self.PQ_ = self._check_P_matrix( - n_tgts, n_features, n_feat_expected, self.PL_, self.PQ_ + n_tgts, n_features, n_features, self.PL_, self.PQ_ ) # Set initial coefficients if self.use_constraints and self.constraint_order.lower() == "target": self.constraint_lhs = reorder_constraints( - self.constraint_lhs, n_features, output_order="target" + self.constraint_lhs, n_features, output_order="feature" ) coef_sparse = self.coef_.T diff --git a/test/test_optimizers.py b/test/test_optimizers.py index 839e0548..270e51d4 100644 --- a/test/test_optimizers.py +++ b/test/test_optimizers.py @@ -74,8 +74,8 @@ def _align_optimizer_and_1dfeatures( ) -> tuple[BaseOptimizer, NDArray]: # This is a hack until constraints are moved from init to fit if isinstance(opt, TrappingSR3): - opt = TrappingSR3(_n_tgts=1, _include_bias=True) - features = np.hstack([features, features, features]) + opt = TrappingSR3(_n_tgts=1, _include_bias=False) + features = np.hstack([features, features]) else: features = features return opt, features @@ -435,6 +435,7 @@ def test_constrained_sr3_quadratic_library(params): dict(thresholder="l2", threshold=1, expected=1.5), dict(thresholder="weighted_l2", thresholds=np.ones((4, 1)), expected=2.5), ], + ids=lambda d: d["thresholder"], ) def test_stable_linear_sr3_cost_function(params): expected = params.pop("expected") @@ -773,21 +774,24 @@ def test_fit_warn(data_derivative_1d, optimizer): "optimizer", [ (ConstrainedSR3, {"max_iter": 80}), - (TrappingSR3, {"_n_tgts": 5, "max_iter": 100}), + (TrappingSR3, {"_n_tgts": 3, "max_iter": 100, "eps_solver": 1e-5}), (MIOSR, {}), ], + ids=lambda param: param[0].__name__ + " " + ",".join([key for key in param[1]]), ) @pytest.mark.parametrize("target_value", [0, -1, 3]) -def test_row_format_constraints(data_linear_combination, optimizer, target_value): +def test_feature_format_constraints(data_linear_combination, optimizer, target_value): # Solution is x_dot = x.dot(np.array([[1, 1, 0], [0, 1, 1]])) - x, x_dot = data_linear_combination + x, y = data_linear_combination constraint_rhs = target_value * np.ones(2) - constraint_lhs = np.zeros((2, x.shape[1] * x_dot.shape[1])) + constraint_lhs = np.zeros((2, x.shape[1], y.shape[1])) # Should force corresponding entries of coef_ to be target_value - constraint_lhs[0, 0] = 1 - constraint_lhs[1, 3] = 1 + constraint_lhs[0, 1, 1] = 1 + constraint_lhs[1, 2, 2] = 1 + # reshape to "feature" order + constraint_lhs = np.reshape(constraint_lhs, (constraint_lhs.shape[0], -1)) model = optimizer[0]( constraint_lhs=constraint_lhs, @@ -795,10 +799,10 @@ def test_row_format_constraints(data_linear_combination, optimizer, target_value constraint_order="feature", **optimizer[1], ) - model.fit(x, x_dot) + model.fit(x, y) np.testing.assert_allclose( - np.array([model.coef_[0, 0], model.coef_[1, 1]]), target_value, atol=1e-8 + np.array([model.coef_[1, 1], model.coef_[2, 2]]), target_value, atol=1e-7 ) @@ -807,26 +811,37 @@ def test_row_format_constraints(data_linear_combination, optimizer, target_value [ (ConstrainedSR3, {"max_iter": 80}), (StableLinearSR3, {}), - (TrappingSR3, {"max_iter": 100}), + (TrappingSR3, {"_n_tgts": 3, "max_iter": 200, "eps_solver": 1e-5}), (MIOSR, {}), ], + ids=lambda param: param[0].__name__ + " " + ",".join([key for key in param[1]]), ) @pytest.mark.parametrize("target_value", [0, -1, 3]) def test_target_format_constraints(data_linear_combination, optimizer, target_value): - x, x_dot = data_linear_combination + x, y = data_linear_combination constraint_rhs = target_value * np.ones(2) - constraint_lhs = np.zeros((2, x.shape[1] * x_dot.shape[1])) + constraint_lhs = np.zeros((2, x.shape[1], y.shape[1])) # Should force corresponding entries of coef_ to be target_value - constraint_lhs[0, 1] = 1 - constraint_lhs[1, 4] = 1 + constraint_lhs[0, 2, 1] = 1 + constraint_lhs[1, 1, 2] = 1 + # reshape to "target" order + constraint_lhs = np.reshape( + np.transpose(constraint_lhs, [0, 2, 1]), (constraint_lhs.shape[0], -1) + ) model = optimizer[0]( - constraint_lhs=constraint_lhs, constraint_rhs=constraint_rhs, **optimizer[1] + constraint_lhs=constraint_lhs, + constraint_rhs=constraint_rhs, + constraint_order="target", + **optimizer[1], + ) + model.fit(x, y) + + np.testing.assert_allclose( + np.array([model.coef_[1, 2], model.coef_[2, 1]]), target_value, atol=1e-7 ) - model.fit(x, x_dot) - np.testing.assert_allclose(model.coef_[:, 1], target_value, atol=1e-8) @pytest.mark.parametrize( @@ -876,10 +891,11 @@ def test_constrained_inequality_constraints(data_lorenz, params): thresholder="weighted_l2", thresholds=0.5 * np.ones((1, 2)), expected=0.75 ), ], + ids=lambda d: d["thresholder"], ) def test_trapping_cost_function(params): expected = params.pop("expected") - opt = TrappingSR3(inequality_constraints=True, relax_optim=True, **params) + opt = TrappingSR3(relax_optim=True, **params) x = np.eye(2) y = np.ones(2) xi, cost = opt._create_var_and_part_cost(2, x, y) From a88a1f2cbb40586ef6486a6deb83c8bc70c8d69a Mon Sep 17 00:00:00 2001 From: Jake <37048747+Jacob-Stevens-Haas@users.noreply.github.com> Date: Tue, 16 Apr 2024 13:08:24 -0700 Subject: [PATCH 15/16] CLN: Fix black/flake8 minor --- examples/16_noise_robustness/utils.py | 1 - examples/17_parameterized_pattern_formation/utils.py | 8 ++++---- pysindy/differentiation/finite_difference.py | 1 - pysindy/feature_library/polynomial_library.py | 3 ++- pysindy/feature_library/sindy_pi_library.py | 1 - pysindy/feature_library/weak_pde_library.py | 3 --- pysindy/optimizers/sbr.py | 1 - pysindy/optimizers/trapping_sr3.py | 1 - pysindy/utils/axes.py | 2 -- pysindy/utils/odes.py | 1 + test/conftest.py | 7 ------- test/utils/test_axes.py | 7 ------- 12 files changed, 7 insertions(+), 29 deletions(-) diff --git a/examples/16_noise_robustness/utils.py b/examples/16_noise_robustness/utils.py index f81a89fb..8b51b52c 100644 --- a/examples/16_noise_robustness/utils.py +++ b/examples/16_noise_robustness/utils.py @@ -169,7 +169,6 @@ def make_test_trajectories( all_t_test = dict() for i, equation_name in enumerate(systems_list): - dimension = all_properties[equation_name]["embedding_dimension"] all_sols_test[equation_name] = np.zeros((n, n_trajectories, dimension)) all_t_test[equation_name] = np.zeros((n, n_trajectories)) diff --git a/examples/17_parameterized_pattern_formation/utils.py b/examples/17_parameterized_pattern_formation/utils.py index 91edcae6..c25778c1 100644 --- a/examples/17_parameterized_pattern_formation/utils.py +++ b/examples/17_parameterized_pattern_formation/utils.py @@ -38,7 +38,7 @@ def get_lorenz_trajectories(sigmas, rhos, betas, dt): x0_train, args=(sigmas[i], betas[i], rhos[i]), t_eval=t_train, - **integrator_keywords + **integrator_keywords, ).y.T x_trains = x_trains + [x_train] t_trains = t_trains + [t_train] @@ -966,7 +966,7 @@ def oregonator( args=(b,), y0=ut, first_step=dt, - **integrator_keywords + **integrator_keywords, ) if not usol.success: print(usol.message) @@ -1164,7 +1164,7 @@ def oregonator_homogeneous( args=(b,), y0=ut, first_step=dt, - **integrator_keywords + **integrator_keywords, ) dt = np.diff(usol.t)[-1] ut = usol.y[:, -1] @@ -1267,7 +1267,7 @@ def cgle_homogeneous(t, u): (t[i], t[i + 1]), y0=ut, first_step=dt, - **integrator_keywords + **integrator_keywords, ) # print(usol.message,end='\r') dt = np.diff(usol.t)[-1] diff --git a/pysindy/differentiation/finite_difference.py b/pysindy/differentiation/finite_difference.py index 0c44917e..25690ddc 100644 --- a/pysindy/differentiation/finite_difference.py +++ b/pysindy/differentiation/finite_difference.py @@ -66,7 +66,6 @@ def __init__( drop_endpoints=False, periodic=False, ): - if order <= 0 or not isinstance(order, int): raise ValueError("order must be a positive int") if d <= 0: diff --git a/pysindy/feature_library/polynomial_library.py b/pysindy/feature_library/polynomial_library.py index 817e6a96..21cc126a 100644 --- a/pysindy/feature_library/polynomial_library.py +++ b/pysindy/feature_library/polynomial_library.py @@ -1,6 +1,7 @@ from itertools import chain from math import comb from typing import Iterator +from typing import Tuple import numpy as np from numpy.typing import NDArray @@ -80,7 +81,7 @@ def _combinations( include_interaction: bool, interaction_only: bool, include_bias: bool, - ) -> Iterator[tuple[int, ...]]: + ) -> Iterator[Tuple[int, ...]]: """ Create selection tuples of input indexes for each polynomail term diff --git a/pysindy/feature_library/sindy_pi_library.py b/pysindy/feature_library/sindy_pi_library.py index f45cf567..407c67de 100644 --- a/pysindy/feature_library/sindy_pi_library.py +++ b/pysindy/feature_library/sindy_pi_library.py @@ -393,7 +393,6 @@ def transform(self, x_full): f_dot.__code__.co_argcount, self.interaction_only, ): - for i, f in enumerate(self.x_functions): for f_combs in self._combinations( n_input_features, diff --git a/pysindy/feature_library/weak_pde_library.py b/pysindy/feature_library/weak_pde_library.py index 0f41ede8..edbdce40 100644 --- a/pysindy/feature_library/weak_pde_library.py +++ b/pysindy/feature_library/weak_pde_library.py @@ -457,7 +457,6 @@ def _set_up_weights(self): deriv = np.zeros(self.grid_ndim) deriv[-1] = 1 for k in range(self.K): - ret = np.ones(shapes_k[k]) for i in range(self.grid_ndim): s = [0] * (self.grid_ndim + 1) @@ -476,7 +475,6 @@ def _set_up_weights(self): # Product weights over the axes for pure derivative terms, shaped as inds_k self.fullweights0 = [] for k in range(self.K): - ret = np.ones(shapes_k[k]) for i in range(self.grid_ndim): s = [0] * (self.grid_ndim + 1) @@ -922,7 +920,6 @@ def transform(self, x_full): for deriv in derivs[ np.where(np.all(derivs <= derivs_mixed, axis=1))[0] ]: - # indices for product rule terms j0 = np.where(np.all(derivs == deriv, axis=1))[0][0] j1 = np.where( diff --git a/pysindy/optimizers/sbr.py b/pysindy/optimizers/sbr.py index f06467b4..2e304a34 100644 --- a/pysindy/optimizers/sbr.py +++ b/pysindy/optimizers/sbr.py @@ -99,7 +99,6 @@ def __init__( unbias: bool = False, **kwargs, ): - if unbias: raise ValueError("SBR is incompatible with unbiasing. Set unbias=False") diff --git a/pysindy/optimizers/trapping_sr3.py b/pysindy/optimizers/trapping_sr3.py index 415ce88c..973267a2 100644 --- a/pysindy/optimizers/trapping_sr3.py +++ b/pysindy/optimizers/trapping_sr3.py @@ -539,7 +539,6 @@ def _reduce(self, x, y): # Begin optimization loop self.objective_history_ = [] for k in range(self.max_iter): - # update P tensor from the newest trap center mPQ = np.tensordot(trap_ctr, self.PQ_, axes=([0], [0])) p = self.PL_ - mPQ diff --git a/pysindy/utils/axes.py b/pysindy/utils/axes.py index 7de086db..25c42db8 100644 --- a/pysindy/utils/axes.py +++ b/pysindy/utils/axes.py @@ -430,7 +430,6 @@ def decorator(func): return decorator - @_implements(np.ravel) def ravel(a, order="C"): out = np.ravel(np.asarray(a), order=order) @@ -455,7 +454,6 @@ def concatenate(arrays, axis=0, out=None, dtype=None, casting="same_kind"): ax_list = [obj.axes for obj in arrays if isinstance(obj, AxesArray)] for ax1, ax2 in zip(ax_list[:-1], ax_list[1:]): if ax1 != ax2: - raise ValueError("Concatenating >1 AxesArray with incompatible axes") result = np.concatenate(parents, axis, out=out, dtype=dtype, casting=casting) if isinstance(out, AxesArray): diff --git a/pysindy/utils/odes.py b/pysindy/utils/odes.py index eb2b93ea..fd2be0bb 100644 --- a/pysindy/utils/odes.py +++ b/pysindy/utils/odes.py @@ -334,6 +334,7 @@ def burgers_galerkin(sigma=0.1, nu=0.025, U=1.0): # Below this line are models only suitable for SINDy-PI, since they are implicit # # and therefore require a library Theta(X, Xdot) rather than just Theta(X) # + # Michaelis–Menten model for enzyme kinetics def enzyme(t, x, jx=0.6, Vmax=1.5, Km=0.3): return jx - Vmax * x / (Km + x) diff --git a/test/conftest.py b/test/conftest.py index 2c4f0831..59fb61f7 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -60,7 +60,6 @@ def data_1d_bad_shape(): @pytest.fixture(scope="session") def data_lorenz(): - t = np.linspace(0, 1, 12) x0 = [8, 27, -7] x = solve_ivp(lorenz, (t[0], t[-1]), x0, t_eval=t).y.T @@ -70,7 +69,6 @@ def data_lorenz(): @pytest.fixture def data_multiple_trajectories(): - n_points = [100, 200, 500] initial_conditions = [ [8, 27, -7], @@ -120,7 +118,6 @@ def diffuse(t, u, dx, nx): @pytest.fixture(scope="session") def data_discrete_time(): - n_steps = 100 mu = 3.6 x = np.zeros((n_steps)) @@ -133,7 +130,6 @@ def data_discrete_time(): @pytest.fixture(scope="session") def data_discrete_time_multiple_trajectories(): - n_steps = 100 mus = [1, 2.3, 3.6] x = [np.zeros((n_steps)) for mu in mus] @@ -421,7 +417,6 @@ def u_fun(t): @pytest.fixture(scope="session") def data_discrete_time_c(): - n_steps = 100 mu = 3.6 @@ -437,7 +432,6 @@ def data_discrete_time_c(): @pytest.fixture(scope="session") def data_discrete_time_c_multivariable(): - n_steps = 100 mu = 3.6 @@ -454,7 +448,6 @@ def data_discrete_time_c_multivariable(): @pytest.fixture(scope="session") def data_discrete_time_multiple_trajectories_c(): - n_steps = 100 mus = [1, 2.3, 3.6] u = [0.001 * np.random.randn(n_steps) for mu in mus] diff --git a/test/utils/test_axes.py b/test/utils/test_axes.py index f3302cb6..6e85cd38 100644 --- a/test/utils/test_axes.py +++ b/test/utils/test_axes.py @@ -29,13 +29,6 @@ def test_bad_concat(): np.concatenate((arr, arr2), axis=0) -def test_concat_out(): - arr = AxesArray(np.arange(3).reshape(1, 3), {"ax_a": 0, "ax_b": 1}) - arr_out = np.empty((2, 3)).view(AxesArray) - result = np.concatenate((arr, arr), axis=0, out=arr_out) - assert_equal(result, arr_out) - - def test_reduce_mean_noinf_recursion(): arr = AxesArray(np.array([[1]]), {"ax_a": [0, 1]}) np.mean(arr, axis=0) From cb1ab378fade187c49fe3fbeeac3117db8e27875 Mon Sep 17 00:00:00 2001 From: Jake <37048747+Jacob-Stevens-Haas@users.noreply.github.com> Date: Tue, 16 Apr 2024 13:33:09 -0700 Subject: [PATCH 16/16] CI: bump CI versions --- .github/workflows/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 350f1689..68385b63 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -29,7 +29,7 @@ jobs: strategy: max-parallel: 4 matrix: - python-version: ["3.8", "3.10"] + python-version: ["3.9", "3.11"] steps: - uses: actions/checkout@v3