Skip to content

Commit

Permalink
Merge pull request #219 from NREL/reporting_measures
Browse files Browse the repository at this point in the history
Reporting Measure Arguments
  • Loading branch information
nmerket committed Apr 27, 2021
2 parents 0d5feb9 + 645c5f5 commit 07fc9be
Show file tree
Hide file tree
Showing 18 changed files with 406 additions and 129 deletions.
2 changes: 1 addition & 1 deletion buildstockbatch/aws/aws.py
Expand Up @@ -2032,7 +2032,7 @@ def run_job(cls, job_id, bucket, prefix, job_name, region):

fs = S3FileSystem()
local_fs = LocalFileSystem()
reporting_measures = cfg.get('reporting_measures', [])
reporting_measures = cls.get_reporting_measures(cfg)
dpouts = []
simulation_output_tar_filename = sim_dir.parent / 'simulation_outputs.tar.gz'
with tarfile.open(str(simulation_output_tar_filename), 'w:gz') as simout_tar:
Expand Down
8 changes: 7 additions & 1 deletion buildstockbatch/base.py
Expand Up @@ -125,6 +125,12 @@ def skip_baseline_sims(self):
baseline_skip = self.cfg['baseline'].get('skip_sims', False)
return baseline_skip

@classmethod
def get_reporting_measures(cls, cfg):
WorkflowGenerator = cls.get_workflow_generator_class(cfg['workflow_generator']['type'])
wg = WorkflowGenerator(cfg, 1) # Number of datapoints doesn't really matter here
return wg.reporting_measures()

def run_batch(self):
raise NotImplementedError

Expand Down Expand Up @@ -551,7 +557,7 @@ def process_results(self, skip_combine=False, force_upload=False):
if 'athena' in aws_conf:
postprocessing.create_athena_tables(aws_conf, os.path.basename(self.output_dir), s3_bucket, s3_prefix)

if not self.cfg['eagle'].get('postprocessing', {}).get('keep_intermediate_files', False):
if not self.cfg.get('eagle', {}).get('postprocessing', {}).get('keep_intermediate_files', False):
logger.info("Removing intermediate files.")
postprocessing.remove_intermediate_files(fs, self.results_dir)
else:
Expand Down
2 changes: 1 addition & 1 deletion buildstockbatch/eagle.py
Expand Up @@ -378,7 +378,7 @@ def run_building(cls, output_dir, cfg, n_datapoints, i, upgrade_idx=None):
i
)

reporting_measures = cfg.get('reporting_measures', [])
reporting_measures = cls.get_reporting_measures(cfg)
dpout = postprocessing.read_simulation_outputs(fs, reporting_measures, sim_dir, upgrade_id, i)
return dpout

Expand Down
2 changes: 1 addition & 1 deletion buildstockbatch/localdocker.py
Expand Up @@ -150,7 +150,7 @@ def run_building(cls, project_dir, buildstock_dir, weather_dir, docker_image, re
)

# Read data_point_out.json
reporting_measures = cfg.get('reporting_measures', [])
reporting_measures = cls.get_reporting_measures(cfg)
dpout = postprocessing.read_simulation_outputs(fs, reporting_measures, sim_dir, upgrade_id, i)
return dpout

Expand Down
10 changes: 2 additions & 8 deletions buildstockbatch/postprocessing.py
Expand Up @@ -189,17 +189,11 @@ def clean_up_results_df(df, cfg, keep_upgrade_id=False):
simulation_output_cols = sorted([col for col in results_df.columns if col.startswith('simulation_output_report')])
sorted_cols = first_few_cols + build_existing_model_cols + simulation_output_cols

for reporting_measure in cfg.get('reporting_measures', []):
reporting_measure_cols = sorted([col for col in results_df.columns if
col.startswith(to_camelcase(reporting_measure))])
sorted_cols += reporting_measure_cols
remaining_cols = sorted(set(results_df.columns.values).difference(sorted_cols))
sorted_cols += remaining_cols

results_df = results_df.reindex(columns=sorted_cols, copy=False)

# for col in results_df.columns:
# if col.startswith('simulation_output_report.') and not col == 'simulation_output_report.applicable':
# results_df[col] = pd.to_numeric(results_df[col], errors='coerce')

return results_df


Expand Down
1 change: 1 addition & 0 deletions buildstockbatch/test/test_eagle.py
Expand Up @@ -226,6 +226,7 @@ def make_sim_dir_mock(building_id, upgrade_idx, base_dir, overwrite_existing=Fal
sampler_prop_mock = mocker.patch.object(EagleBatch, 'sampler', new_callable=mocker.PropertyMock)
sampler_mock = mocker.MagicMock()
sampler_prop_mock.return_value = sampler_mock
sampler_mock.csv_path = results_dir.parent / 'housing_characteristic2' / 'buildstock.csv'
sampler_mock.run_sampling = mocker.MagicMock(return_value='buildstock.csv')

b = EagleBatch(project_filename)
Expand Down
Expand Up @@ -27,7 +27,7 @@ workflow_generator:
simulation_output:
include_enduse_subcategory: true
reporting_measures:
- ReportingMeasure2
- measure_dir_name: ReportingMeasure2

upgrades:
- upgrade_name: good upgrade
Expand Down
Expand Up @@ -21,7 +21,7 @@ workflow_generator:
output_variables:
- Zone Mean Air Temperature
reporting_measures:
- ReportingMeasure1
- measure_dir_name: ReportingMeasure1

upgrades:
- upgrade_name: cool upgrade
Expand Down
7 changes: 7 additions & 0 deletions buildstockbatch/workflow_generator/base.py
Expand Up @@ -66,3 +66,10 @@ def validate(cls, cfg):
:type cfg: dict
"""
return True

def reporting_measures(self):
"""Return a list of reporting measures to include in the outputs
Replace this in your subclass
"""
return []

0 comments on commit 07fc9be

Please sign in to comment.