Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Hotfix v1.5.1 #238

Merged
merged 8 commits into from
Aug 18, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
8 changes: 4 additions & 4 deletions examples/horizon.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ def run_example():
# Step 1: Setup model & future loading
def future_loading(t, x = None):
return {}
m = ThrownObject(process_noise = 0.25, measurement_noise = 0.2)
m = ThrownObject(process_noise = 0.2, measurement_noise = 0.1)
initial_state = m.initialize()

# Step 2: Demonstrating state estimator
Expand Down Expand Up @@ -50,17 +50,17 @@ def future_loading(t, x = None):
# THIS IS WHERE WE DIVERGE FROM THE THROWN_OBJECT_EXAMPLE
# Here we set a prediction horizon
# We're saying we are not interested in any events that occur after this time
PREDICTION_HORIZON = 7.75
PREDICTION_HORIZON = 7.67
samples = filt.x # Since we're using a particle filter, which is also sample-based, we can directly use the samples, without changes
STEP_SIZE = 0.01
STEP_SIZE = 0.001
mc_results = mc.predict(samples, future_loading, dt=STEP_SIZE, horizon = PREDICTION_HORIZON)
print("\nPredicted Time of Event:")
metrics = mc_results.time_of_event.metrics()
pprint(metrics) # Note this takes some time
mc_results.time_of_event.plot_hist(keys = 'impact')
mc_results.time_of_event.plot_hist(keys = 'falling')

print("\nSamples where impact occurs before horizon: {:.2f}%".format(metrics['impact']['number of samples']/NUM_SAMPLES*100))
print("\nSamples where impact occurs before horizon: {:.2f}%".format(metrics['impact']['number of samples']/mc.parameters['n_samples']*100))

# Step 4: Show all plots
import matplotlib.pyplot as plt # For plotting
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@

setup(
name = 'prog_algs',
version = '1.5.0',
version = '1.5.1',
description = "The NASA Prognostics Algorithm Package is a framework for model-based prognostics (computation of remaining useful life) of engineering systems. It includes algorithms for state estimation and prediction, including uncertainty propagation. The algorithms use prognostic models (see prog_models) to perform estimation and prediction. The package enables rapid development of prognostics solutions for given models of components and systems. Algorithms can be swapped for comparative studies and evaluations",
long_description=long_description,
long_description_content_type='text/markdown',
Expand Down
2 changes: 1 addition & 1 deletion src/prog_algs/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

import warnings

__version__ = '1.5.0'
__version__ = '1.5.1'

def run_prog_playback(obs, pred, future_loading, output_measurements, **kwargs):
warnings.warn("Depreciated in 1.2.0, will be removed in a future release.", DeprecationWarning)
Expand Down
8 changes: 8 additions & 0 deletions src/prog_algs/predictors/monte_carlo.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@ def predict(self, state: UncertainData, future_loading_eqn: Callable, **kwargs)

# Perform prediction
t0 = params.get('t0', 0)
HORIZON = params.get('horizon', float('inf')) # Save the horizon to be used later
for x in state:
first_output = self.model.output(x)

Expand All @@ -82,6 +83,7 @@ def predict(self, state: UncertainData, future_loading_eqn: Callable, **kwargs)

params['t0'] = t0
params['x'] = x
params['horizon'] = HORIZON # reset to initial horizon

if 'save_freq' in params and not isinstance(params['save_freq'], tuple):
params['save_freq'] = (params['t0'], params['save_freq'])
Expand All @@ -93,6 +95,8 @@ def predict(self, state: UncertainData, future_loading_eqn: Callable, **kwargs)
**params
)
else:

# Simulate
events_remaining = params['events'].copy()

times = []
Expand All @@ -103,6 +107,10 @@ def predict(self, state: UncertainData, future_loading_eqn: Callable, **kwargs)

# Non-vectorized prediction
while len(events_remaining) > 0: # Still events to predict
# Since horizon is relative to t0 (the simulation starting point),
# we must subtract the difference in current t0 from the initial (i.e., prediction t0)
# each subsequent simulation
params['horizon'] = HORIZON - (params['t0'] - t0)
(t, u, xi, z, es) = simulate_to_threshold(future_loading_eqn,
first_output,
threshold_keys = events_remaining,
Expand Down
6 changes: 3 additions & 3 deletions src/prog_algs/state_estimators/particle_filter.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,9 +67,9 @@ def __init__(self, model, x0, **kwargs):
# Added to avoid float/int issues
self.parameters['num_particles'] = int(self.parameters['num_particles'])
sample_gen = x0.sample(self.parameters['num_particles'])
samples = [array(sample_gen.key(k), dtype=float64) for k in x0.keys()]
samples = {k: array(sample_gen.key(k), dtype=float64) for k in x0.keys()}

self.particles = model.StateContainer(array(samples, dtype=float64))
self.particles = model.StateContainer(samples)

if 'R' in self.parameters:
# For backwards compatibility
Expand All @@ -81,7 +81,7 @@ def __init__(self, model, x0, **kwargs):
def __str__(self):
return "{} State Estimator".format(self.__class__)

def estimate(self, t : float, u, z, dt = None):
def estimate(self, t: float, u, z, dt = None):
"""
Perform one state estimation step (i.e., update the state estimate, filt.x)

Expand Down
6 changes: 6 additions & 0 deletions tests/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from .test_metrics import run_tests as metrics_main
from .test_visualize import run_tests as visualize_main
from .test_tutorials import run_tests as tutorials_main
from .test_horizon import run_tests as horizon_main

import unittest
import sys
Expand Down Expand Up @@ -69,5 +70,10 @@
except Exception:
was_successful = False

try:
horizon_main_main()
except Exception:
was_successful = False

if not was_successful:
raise Exception('Tests Failed')

Check failure on line 79 in tests/__main__.py

View workflow job for this annotation

GitHub Actions / test-prog_models-released (3.10)

Tests Failed
75 changes: 75 additions & 0 deletions tests/test_horizon.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
from io import StringIO
import sys
import unittest

from prog_models.models.thrown_object import ThrownObject
from prog_algs import *

class TestHorizon(unittest.TestCase):
def setUp(self):
# set stdout (so it won't print)
sys.stdout = StringIO()

def tearDown(self):
sys.stdout = sys.__stdout__

def test_horizon_ex(self):
# Setup model
m = ThrownObject(process_noise = 0.25, measurement_noise = 0.2)
# Change parameters (to make simulation faster)
m.parameters['thrower_height'] = 1.0
m.parameters['throwing_speed'] = 10.0
initial_state = m.initialize()

# Define future loading (necessary for prediction call)
def future_loading(t, x = None):
return {}

# Setup Predictor (smaller sample size for efficiency)
mc = predictors.MonteCarlo(m)
mc.parameters['n_samples'] = 50

# Perform a prediction
# With this horizon, all samples will reach 'falling', but only some will reach 'impact'
PREDICTION_HORIZON = 2.127
STEP_SIZE = 0.001
mc_results = mc.predict(initial_state, future_loading, dt=STEP_SIZE, horizon = PREDICTION_HORIZON)

# 'falling' happens before the horizon is met
falling_res = [mc_results.time_of_event[iter]['falling'] for iter in range(mc.parameters['n_samples']) if mc_results.time_of_event[iter]['falling'] is not None]
self.assertEqual(len(falling_res), mc.parameters['n_samples'])

# 'impact' happens around the horizon, so some samples have reached this event and others haven't
impact_res = [mc_results.time_of_event[iter]['impact'] for iter in range(mc.parameters['n_samples']) if mc_results.time_of_event[iter]['impact'] is not None]
self.assertLess(len(impact_res), mc.parameters['n_samples'])

# Try again with very low prediction_horizon, where no events are reached
# Note: here we count how many None values there are for each event (in the above and below examples, we count values that are NOT None)
mc_results_no_event = mc.predict(initial_state, future_loading, dt=STEP_SIZE, horizon = 0.3)
falling_res_no_event = [mc_results_no_event.time_of_event[iter]['falling'] for iter in range(mc.parameters['n_samples']) if mc_results_no_event.time_of_event[iter]['falling'] is None]
impact_res_no_event = [mc_results_no_event.time_of_event[iter]['impact'] for iter in range(mc.parameters['n_samples']) if mc_results_no_event.time_of_event[iter]['impact'] is None]
self.assertEqual(len(falling_res_no_event), mc.parameters['n_samples'])
self.assertEqual(len(impact_res_no_event), mc.parameters['n_samples'])

# Finally, try without horizon, all events should be reached for all samples
mc_results_all_event = mc.predict(initial_state, future_loading, dt=STEP_SIZE)
falling_res_all_event = [mc_results_all_event.time_of_event[iter]['falling'] for iter in range(mc.parameters['n_samples']) if mc_results_all_event.time_of_event[iter]['falling'] is not None]
impact_res_all_event = [mc_results_all_event.time_of_event[iter]['impact'] for iter in range(mc.parameters['n_samples']) if mc_results_all_event.time_of_event[iter]['impact'] is not None]
self.assertEqual(len(falling_res_all_event), mc.parameters['n_samples'])
self.assertEqual(len(impact_res_all_event), mc.parameters['n_samples'])

# This allows the module to be executed directly
def run_tests():
unittest.main()

def main():
load_test = unittest.TestLoader()
runner = unittest.TextTestRunner()
print("\n\nTesting Horizon functionality")
result = runner.run(load_test.loadTestsFromTestCase(TestHorizon)).wasSuccessful()

if not result:
raise Exception("Failed test")

if __name__ == '__main__':
main()
17 changes: 16 additions & 1 deletion tests/test_state_estimators.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import random

from prog_models import PrognosticsModel, LinearModel
from prog_models.models import ThrownObject, BatteryElectroChem, PneumaticValveBase
from prog_models.models import ThrownObject, BatteryElectroChem, PneumaticValveBase, BatteryElectroChemEOD
from prog_algs.state_estimators import ParticleFilter, KalmanFilter, UnscentedKalmanFilter
from prog_algs.uncertain_data import ScalarData, MultivariateNormalDist, UnweightedSamples

Expand Down Expand Up @@ -557,6 +557,21 @@ def future_loading(t, x=None):
times = simulation_result.times
for t, u, z in zip(times, inputs.data, outputs.data):
kf.estimate(t, u, z)

def test_PF_particle_ordering(self):
"""
This is testing for a bug found by @mstraut where particle filter was mixing up the keys if users:
1. Do not call m.initialize(), and instead
2. provide a state as a dictionary instead of a state container, and
3. order the states in a different order than m.states
"""
m = BatteryElectroChemEOD()
x0 = m.parameters['x0'] # state as a dictionary with the wrong order
filt = ParticleFilter(m, x0, num_particles=2)
for key in m.states:
self.assertEqual(filt.particles[key][0], x0[key])
self.assertEqual(filt.particles[key][1], x0[key])


# This allows the module to be executed directly
def run_tests():
Expand Down