Skip to content

Commit

Permalink
Merge pull request #435 from dynamicslab/trapping-helper
Browse files Browse the repository at this point in the history
Make TrappingSR3 a subclass of ConstrainedSR3
  • Loading branch information
Jacob-Stevens-Haas committed Apr 14, 2024
2 parents 098d231 + 639ea9d commit 80171b7
Show file tree
Hide file tree
Showing 8 changed files with 264 additions and 497 deletions.
19 changes: 13 additions & 6 deletions pysindy/optimizers/base.py
Expand Up @@ -97,17 +97,24 @@ def __init__(
unbias: bool = True,
):
super().__init__(fit_intercept=False, copy_X=copy_X)

if max_iter <= 0:
raise ValueError("max_iter must be positive")

self.max_iter = max_iter
self.iters = 0
if np.ndim(initial_guess) == 1:
initial_guess = initial_guess.reshape(1, -1)
self.initial_guess = initial_guess
self.normalize_columns = normalize_columns
self.unbias = unbias
self.__post_init_guard()

# See name mangling rules for double underscore rationale
def __post_init_guard(self):
"""Conduct initialization post-init, as required by scikitlearn API."""
if np.ndim(self.initial_guess) == 1:
self.initial_guess = self.initial_guess.reshape(1, -1)
if self.max_iter <= 0:
raise ValueError("max_iter must be positive")

def set_params(self, **kwargs):
super().set_params(**kwargs)
self.__post_init_guard

# Force subclasses to implement this
@abc.abstractmethod
Expand Down
58 changes: 38 additions & 20 deletions pysindy/optimizers/constrained_sr3.py
@@ -1,4 +1,5 @@
import warnings
from typing import Tuple

try:
import cvxpy as cp
Expand Down Expand Up @@ -39,6 +40,9 @@ class ConstrainedSR3(SR3):
to learn parsimonious physics-informed models from data."
IEEE Access 8 (2020): 169259-169271.
Zheng, Peng, et al. "A unified framework for sparse relaxed
regularized regression: Sr3." IEEE Access 7 (2018): 1404-1423.
Parameters
----------
threshold : float, optional (default 0.1)
Expand Down Expand Up @@ -93,9 +97,6 @@ class ConstrainedSR3(SR3):
is deprecated in sklearn versions >= 1.0 and will be removed. Note that
this parameter is incompatible with the constraints!
copy_X : boolean, optional (default True)
If True, X will be copied; else, it may be overwritten.
initial_guess : np.ndarray, optional (default None)
Shape should be (n_features) or (n_targets, n_features).
Initial guess for coefficients ``coef_``, (v in the mathematical equations)
Expand Down Expand Up @@ -138,6 +139,15 @@ class ConstrainedSR3(SR3):
Weight vector(s) that are not subjected to the regularization.
This is the w in the objective function.
history_ : list
History of sparse coefficients. ``history_[k]`` contains the
sparse coefficients (v in the optimization objective function)
at iteration k.
objective_history_ : list
History of the value of the objective at each step. Note that
the trapping SINDy problem is nonconvex, meaning that this value
may increase and decrease as the algorithm works.
"""

def __init__(
Expand Down Expand Up @@ -249,17 +259,22 @@ def _update_full_coef_constraints(self, H, x_transpose_y, coef_sparse):
rhs = rhs.reshape(g.shape)
return inv1.dot(rhs)

def _update_coef_cvxpy(self, x, y, coef_sparse):
xi = cp.Variable(coef_sparse.shape[0] * coef_sparse.shape[1])
cost = cp.sum_squares(x @ xi - y.flatten())
def _create_var_and_part_cost(
self, var_len: int, x_expanded: np.ndarray, y: np.ndarray
) -> Tuple[cp.Variable, cp.Expression]:
xi = cp.Variable(var_len)
cost = cp.sum_squares(x_expanded @ xi - y.flatten())
if self.thresholder.lower() == "l1":
cost = cost + self.threshold * cp.norm1(xi)
elif self.thresholder.lower() == "weighted_l1":
cost = cost + cp.norm1(np.ravel(self.thresholds) @ xi)
elif self.thresholder.lower() == "l2":
cost = cost + self.threshold * cp.norm2(xi)
cost = cost + self.threshold * cp.norm2(xi) ** 2
elif self.thresholder.lower() == "weighted_l2":
cost = cost + cp.norm2(np.ravel(self.thresholds) @ xi)
cost = cost + cp.norm2(np.ravel(self.thresholds) @ xi) ** 2
return xi, cost

def _update_coef_cvxpy(self, xi, cost, var_len, coef_prev, tol):
if self.use_constraints:
if self.inequality_constraints and self.equality_constraints:
# Process inequality constraints then equality constraints
Expand All @@ -285,26 +300,26 @@ def _update_coef_cvxpy(self, x, y, coef_sparse):
else:
prob = cp.Problem(cp.Minimize(cost))

# default solver is OSQP here but switches to ECOS for L2
# default solver is SCS/OSQP here but switches to ECOS for L2
try:
prob.solve(
max_iter=self.max_iter,
eps_abs=self.tol,
eps_rel=self.tol,
eps_abs=tol,
eps_rel=tol,
verbose=self.verbose_cvxpy,
)
# Annoying error coming from L2 norm switching to use the ECOS
# solver, which uses "max_iters" instead of "max_iter", and
# similar semantic changes for the other variables.
except TypeError:
except (TypeError, ValueError):
try:
prob.solve(abstol=self.tol, reltol=self.tol, verbose=self.verbose_cvxpy)
prob.solve(max_iters=self.max_iter, verbose=self.verbose_cvxpy)
except cp.error.SolverError:
print("Solver failed, setting coefs to zeros")
xi.value = np.zeros(coef_sparse.shape[0] * coef_sparse.shape[1])
xi.value = np.zeros(var_len)
except cp.error.SolverError:
print("Solver failed, setting coefs to zeros")
xi.value = np.zeros(coef_sparse.shape[0] * coef_sparse.shape[1])
xi.value = np.zeros(var_len)

if xi.value is None:
warnings.warn(
Expand All @@ -313,7 +328,7 @@ def _update_coef_cvxpy(self, x, y, coef_sparse):
ConvergenceWarning,
)
return None
coef_new = (xi.value).reshape(coef_sparse.shape)
coef_new = (xi.value).reshape(coef_prev.shape)
return coef_new

def _update_sparse_coef(self, coef_full):
Expand Down Expand Up @@ -420,7 +435,11 @@ def _reduce(self, x, y):

objective_history = []
if self.inequality_constraints:
coef_sparse = self._update_coef_cvxpy(x_expanded, y, coef_sparse)
var_len = coef_sparse.shape[0] * coef_sparse.shape[1]
xi, cost = self._create_var_and_part_cost(var_len, x_expanded, y)
coef_sparse = self._update_coef_cvxpy(
xi, cost, var_len, coef_sparse, self.tol
)
objective_history.append(self._objective(x, y, 0, coef_full, coef_sparse))
else:
for k in range(self.max_iter):
Expand Down Expand Up @@ -459,9 +478,8 @@ def _reduce(self, x, y):
break
else:
warnings.warn(
"SR3._reduce did not converge after {} iterations.".format(
self.max_iter
),
f"ConstrainedSR3 did not converge after {self.max_iter}"
" iterations.",
ConvergenceWarning,
)
if self.use_constraints and self.constraint_order.lower() == "target":
Expand Down
6 changes: 2 additions & 4 deletions pysindy/optimizers/sr3.py
Expand Up @@ -342,8 +342,8 @@ def _reduce(self, x, y):
coef_sparse = self.coef_.T
n_samples, n_features = x.shape

coef_full = coef_sparse.copy()
if self.use_trimming:
coef_full = coef_sparse.copy()
trimming_array = np.repeat(1.0 - self.trimming_fraction, n_samples)
self.history_trimming_ = [trimming_array]
else:
Expand Down Expand Up @@ -392,9 +392,7 @@ def _reduce(self, x, y):
break
else:
warnings.warn(
"SR3._reduce did not converge after {} iterations.".format(
self.max_iter
),
f"SR3 did not converge after {self.max_iter} iterations.",
ConvergenceWarning,
)
self.coef_ = coef_sparse.T
Expand Down
4 changes: 1 addition & 3 deletions pysindy/optimizers/stable_linear_sr3.py
Expand Up @@ -417,9 +417,7 @@ def _reduce(self, x, y):
break
else:
warnings.warn(
"StableLinearSR3._reduce did not converge after {} iterations.".format(
self.max_iter
),
f"StableLinearSR3 did not converge after {self.max_iter} iterations.",
ConvergenceWarning,
)

Expand Down
5 changes: 2 additions & 3 deletions pysindy/optimizers/stlsq.py
Expand Up @@ -203,6 +203,7 @@ def _reduce(self, x, y):
n_targets = y.shape[1]
n_features_selected = np.sum(ind)
sparse_sub = [np.array(self.sparse_ind)] * y.shape[1]
optvar = np.zeros((n_targets, n_features))

# Print initial values for each term in the optimization
if self.verbose:
Expand Down Expand Up @@ -266,9 +267,7 @@ def _reduce(self, x, y):
break
else:
warnings.warn(
"STLSQ._reduce did not converge after {} iterations.".format(
self.max_iter
),
"STLSQ did not converge after {self.max_iter} iterations.",
ConvergenceWarning,
)
if self.sparse_ind is None:
Expand Down

0 comments on commit 80171b7

Please sign in to comment.