From b3d5fee5567a5d4532d3445d9063740d0ea4ae97 Mon Sep 17 00:00:00 2001 From: Lucas Zimmer Date: Mon, 7 Oct 2019 21:10:03 +0200 Subject: [PATCH 01/13] Auto-PyTorch 0.0.2 Release - Adding AutoNet classes AutoNetImageClassification, AutoNetImageClassificationMultipleDatasets for image classification - Adding new optimizers - Adding new learning rate schedulers - Adding new networks --- .gitignore | 17 +- README.md | 2 +- autoPyTorch.egg-info/PKG-INFO | 17 + autoPyTorch.egg-info/SOURCES.txt | 237 ++++++ autoPyTorch.egg-info/dependency_links.txt | 1 + autoPyTorch.egg-info/requires.txt | 16 + autoPyTorch.egg-info/top_level.txt | 3 + autoPyTorch/__init__.py | 2 +- .../components/lr_scheduler/lr_schedulers.py | 194 ++++- autoPyTorch/components/metrics/__init__.py | 3 +- .../components/metrics/standard_metrics.py | 31 +- autoPyTorch/components/networks/base_net.py | 16 +- .../components/networks/image/__init__.py | 4 + .../components/networks/image/convnet.py | 87 ++ .../networks/image/darts/__init__.py | 0 .../networks/image/darts/darts_worker.py | 114 +++ .../networks/image/darts/genotypes.py | 98 +++ .../components/networks/image/darts/model.py | 238 ++++++ .../networks/image/darts/operations.py | 105 +++ .../components/networks/image/darts/utils.py | 166 ++++ .../components/networks/image/densenet.py | 171 ++++ .../networks/image/densenet_flexible.py | 245 ++++++ .../components/networks/image/mobilenet.py | 258 ++++++ .../components/networks/image/resnet.py | 292 +++++++ .../components/networks/image/resnet152.py | 190 +++++ .../networks/image/utils/__init__.py | 0 .../networks/image/utils/conv2d_helpers.py | 135 ++++ .../networks/image/utils/mobilenet_utils.py | 753 ++++++++++++++++++ .../networks/image/utils/shakedrop.py | 60 ++ .../networks/image/utils/shakeshakeblock.py | 49 ++ .../components/networks/image/utils/utils.py | 44 + autoPyTorch/components/optimizer/optimizer.py | 48 +- .../image_preprocessing/__init__.py | 0 .../image_preprocessing/archive.py | 28 + .../augmentation_transforms.py | 439 ++++++++++ .../image_preprocessing/operations.py | 283 +++++++ .../image_preprocessing/transforms.py | 177 ++++ .../preprocessing/loss_weight_strategies.py | 16 +- .../components/training/image/__init__.py | 0 .../training/image/base_training.py | 172 ++++ .../components/training/image/budget_types.py | 50 ++ .../training/image/checkpoints/__init__.py | 0 .../image/checkpoints/load_specific.py | 58 ++ .../training/image/checkpoints/save_load.py | 39 + .../training/image/early_stopping.py | 84 ++ .../training/image/lr_scheduling.py | 39 + .../components/training/image/mixup.py | 32 + .../components/training/image/trainer.py | 251 ++++++ autoPyTorch/components/training/trainer.py | 1 - autoPyTorch/core/api.py | 118 ++- autoPyTorch/core/autonet_classes/__init__.py | 4 +- .../autonet_classes/autonet_feature_data.py | 20 +- .../autonet_image_classification.py | 34 + ..._image_classification_multiple_datasets.py | 56 ++ .../autonet_classes/autonet_image_data.py | 133 ++++ .../bohb_multi_kde_ext.py | 20 + .../hpbandster_extensions/run_with_time.py | 47 +- .../presets/image_classification/__init__.py | 0 .../presets/image_classification/full_cs.txt | 0 .../image_classification/medium_cs.txt | 4 + .../presets/image_classification/tiny_cs.txt | 4 + .../__init__.py | 0 .../full_cs.txt | 0 .../medium_cs.txt | 4 + .../tiny_cs.txt | 4 + autoPyTorch/core/worker_no_timelimit.py | 130 +++ autoPyTorch/data_management/data_loader.py | 47 ++ autoPyTorch/data_management/data_manager.py | 98 +++ autoPyTorch/data_management/data_reader.py | 35 +- autoPyTorch/data_management/image_loader.py | 119 +++ autoPyTorch/pipeline/base/node.py | 7 +- autoPyTorch/pipeline/base/pipeline.py | 3 +- .../pipeline/nodes/create_dataloader.py | 2 +- autoPyTorch/pipeline/nodes/image/__init__.py | 0 .../image/autonet_settings_no_shuffle.py | 71 ++ .../nodes/image/create_dataset_info.py | 154 ++++ .../nodes/image/create_image_dataloader.py | 97 +++ .../nodes/image/cross_validation_indices.py | 223 ++++++ .../nodes/image/image_augmentation.py | 221 +++++ .../nodes/image/image_dataset_reader.py | 57 ++ .../image/loss_module_selector_indices.py | 35 + .../pipeline/nodes/image/multiple_datasets.py | 115 +++ .../image/network_selector_datasetinfo.py | 35 + .../optimization_algorithm_no_timelimit.py | 365 +++++++++ .../nodes/image/simple_scheduler_selector.py | 26 + .../pipeline/nodes/image/simple_train_node.py | 348 ++++++++ .../pipeline/nodes/image/single_dataset.py | 35 + .../pipeline/nodes/loss_module_selector.py | 2 +- autoPyTorch/pipeline/nodes/metric_selector.py | 15 + autoPyTorch/pipeline/nodes/train_node.py | 10 +- autoPyTorch/utils/benchmarking/benchmark.py | 12 +- .../benchmark_pipeline/__init__.py | 3 +- .../benchmark_pipeline/apply_user_updates.py | 71 ++ .../benchmark_pipeline/create_autonet.py | 6 +- .../benchmark_pipeline/fit_autonet.py | 132 ++- .../benchmark_pipeline/for_autonet_config.py | 7 +- .../benchmark_pipeline/for_instance.py | 35 +- .../benchmark_pipeline/for_run.py | 7 +- .../prepare_result_folder.py | 84 +- .../benchmark_pipeline/read_instance_data.py | 41 +- .../benchmark_pipeline/save_results.py | 13 +- .../benchmark_pipeline/set_autonet_config.py | 3 +- .../utils/config/config_file_parser.py | 6 +- .../utils/config_space_hyperparameter.py | 7 +- autoPyTorch/utils/configspace_wrapper.py | 6 + .../hyperparameter_search_space_update.py | 1 + autoPyTorch/utils/loggers.py | 219 +++++ autoPyTorch/utils/modify_config_space.py | 242 ++++++ autoPyTorch/utils/thread_read_write.py | 42 + configs/autonet/automl/cifar_example.txt | 15 + configs/benchmark/cifar_example.txt | 7 + configs/datasets/cifar.txt | 1 + configs/datasets/openml_image.txt | 1 + configs/refit/refit_example.json | 19 + datasets/CIFAR10.csv | 1 + datasets/example.csv | 99 +++ .../example_images/alley_cat_s_000843.png | Bin 0 -> 2396 bytes .../example_images/american_toad_s_001003.png | Bin 0 -> 2705 bytes datasets/example_images/arabian_s_000782.png | Bin 0 -> 2389 bytes datasets/example_images/arabian_s_002303.png | Bin 0 -> 2146 bytes .../articulated_lorry_s_000916.png | Bin 0 -> 2357 bytes datasets/example_images/auto_s_000800.png | Bin 0 -> 2060 bytes .../example_images/automobile_s_001645.png | Bin 0 -> 2512 bytes .../bird_of_passage_s_000006.png | Bin 0 -> 1829 bytes .../example_images/broodmare_s_000179.png | Bin 0 -> 2431 bytes .../example_images/broodmare_s_000313.png | Bin 0 -> 2470 bytes datasets/example_images/buckskin_s_000031.png | Bin 0 -> 2248 bytes .../example_images/bufo_bufo_s_002202.png | Bin 0 -> 2472 bytes datasets/example_images/bullfrog_s_000797.png | Bin 0 -> 2224 bytes datasets/example_images/bullfrog_s_001028.png | Bin 0 -> 2664 bytes datasets/example_images/camion_s_000599.png | Bin 0 -> 2372 bytes .../canis_familiaris_s_000450.png | Bin 0 -> 2082 bytes .../capreolus_capreolus_s_001283.png | Bin 0 -> 1972 bytes .../capreolus_capreolus_s_001380.png | Bin 0 -> 2112 bytes .../capreolus_capreolus_s_001605.png | Bin 0 -> 2174 bytes datasets/example_images/car_s_000040.png | Bin 0 -> 2349 bytes .../example_images/cassowary_s_000194.png | Bin 0 -> 2412 bytes .../example_images/cassowary_s_002024.png | Bin 0 -> 2347 bytes .../cervus_elaphus_s_000903.png | Bin 0 -> 2284 bytes .../cervus_elaphus_s_001124.png | Bin 0 -> 2341 bytes .../example_images/convertible_s_000295.png | Bin 0 -> 2344 bytes .../example_images/convertible_s_000520.png | Bin 0 -> 2563 bytes datasets/example_images/cruiser_s_000163.png | Bin 0 -> 2473 bytes .../example_images/dawn_horse_s_001453.png | Bin 0 -> 2127 bytes .../delivery_truck_s_001300.png | Bin 0 -> 2410 bytes .../delivery_truck_s_001587.png | Bin 0 -> 2397 bytes .../example_images/domestic_cat_s_000913.png | Bin 0 -> 2481 bytes .../example_images/domestic_dog_s_000455.png | Bin 0 -> 2510 bytes datasets/example_images/dredger_s_000486.png | Bin 0 -> 1786 bytes .../example_images/dump_truck_s_000163.png | Bin 0 -> 2362 bytes .../example_images/dump_truck_s_001097.png | Bin 0 -> 2411 bytes .../example_images/dump_truck_s_001363.png | Bin 0 -> 2398 bytes datasets/example_images/dumper_s_000805.png | Bin 0 -> 2308 bytes datasets/example_images/elk_s_001751.png | Bin 0 -> 2086 bytes .../example_images/estate_car_s_001092.png | Bin 0 -> 2451 bytes .../example_images/fallow_deer_s_000351.png | Bin 0 -> 2269 bytes .../example_images/fallow_deer_s_001133.png | Bin 0 -> 2197 bytes .../example_images/fallow_deer_s_001785.png | Bin 0 -> 2182 bytes datasets/example_images/fawn_s_001418.png | Bin 0 -> 2256 bytes .../fighter_aircraft_s_000720.png | Bin 0 -> 1952 bytes .../fighter_aircraft_s_001009.png | Bin 0 -> 1864 bytes .../example_images/garbage_truck_s_001211.png | Bin 0 -> 2280 bytes .../example_images/green_frog_s_001384.png | Bin 0 -> 2443 bytes .../example_images/house_cat_s_000064.png | Bin 0 -> 2185 bytes .../example_images/house_cat_s_002004.png | Bin 0 -> 2396 bytes .../example_images/icebreaker_s_001689.png | Bin 0 -> 2029 bytes .../example_images/ladder_truck_s_001799.png | Bin 0 -> 2748 bytes datasets/example_images/lapdog_s_001489.png | Bin 0 -> 2276 bytes .../example_images/liberty_ship_s_001456.png | Bin 0 -> 2190 bytes datasets/example_images/lipizzan_s_001223.png | Bin 0 -> 2360 bytes datasets/example_images/lorry_s_000562.png | Bin 0 -> 2095 bytes .../example_images/male_horse_s_000742.png | Bin 0 -> 2458 bytes datasets/example_images/maltese_s_000562.png | Bin 0 -> 2569 bytes .../example_images/monoplane_s_000877.png | Bin 0 -> 2348 bytes datasets/example_images/mouser_s_000792.png | Bin 0 -> 2428 bytes .../example_images/mule_deer_s_000357.png | Bin 0 -> 2572 bytes datasets/example_images/mutt_s_000997.png | Bin 0 -> 2503 bytes datasets/example_images/ostrich_s_000026.png | Bin 0 -> 2432 bytes datasets/example_images/ostrich_s_000147.png | Bin 0 -> 1994 bytes datasets/example_images/ostrich_s_001561.png | Bin 0 -> 2405 bytes datasets/example_images/peke_s_000545.png | Bin 0 -> 2498 bytes datasets/example_images/pekinese_s_000046.png | Bin 0 -> 2388 bytes .../example_images/police_boat_s_001118.png | Bin 0 -> 2377 bytes .../police_cruiser_s_001385.png | Bin 0 -> 2505 bytes datasets/example_images/puppy_s_001045.png | Bin 0 -> 2282 bytes .../example_images/rana_pipiens_s_000379.png | Bin 0 -> 2579 bytes datasets/example_images/red_deer_s_001101.png | Bin 0 -> 2127 bytes datasets/example_images/red_deer_s_001719.png | Bin 0 -> 2396 bytes .../rhea_americana_s_000436.png | Bin 0 -> 2133 bytes datasets/example_images/sika_s_000337.png | Bin 0 -> 2479 bytes .../example_images/spring_frog_s_000407.png | Bin 0 -> 2199 bytes datasets/example_images/stallion_s_000015.png | Bin 0 -> 2595 bytes .../example_images/station_wagon_s_000464.png | Bin 0 -> 2400 bytes .../example_images/station_wagon_s_002537.png | Bin 0 -> 2461 bytes .../example_images/supertanker_s_000275.png | Bin 0 -> 1556 bytes .../example_images/supertanker_s_000761.png | Bin 0 -> 1980 bytes .../example_images/tabby_cat_s_000069.png | Bin 0 -> 2365 bytes .../example_images/tabby_cat_s_001983.png | Bin 0 -> 2167 bytes datasets/example_images/tabby_s_001593.png | Bin 0 -> 2122 bytes datasets/example_images/tabby_s_001774.png | Bin 0 -> 2294 bytes .../example_images/tailed_frog_s_000246.png | Bin 0 -> 2596 bytes .../example_images/toad_frog_s_001786.png | Bin 0 -> 2364 bytes .../tractor_trailer_s_000653.png | Bin 0 -> 2630 bytes .../example_images/trailer_truck_s_001350.png | Bin 0 -> 2341 bytes datasets/example_images/truck_s_000028.png | Bin 0 -> 2414 bytes .../example_images/trucking_rig_s_001247.png | Bin 0 -> 2333 bytes .../example_images/trucking_rig_s_001431.png | Bin 0 -> 2350 bytes datasets/example_images/true_cat_s_000886.png | Bin 0 -> 1887 bytes datasets/example_images/wagon_s_000378.png | Bin 0 -> 2442 bytes datasets/example_images/wagon_s_000572.png | Bin 0 -> 2461 bytes datasets/example_images/wagon_s_002463.png | Bin 0 -> 2387 bytes datasets/example_images/wagtail_s_000747.png | Bin 0 -> 2424 bytes .../example_images/walking_horse_s_000071.png | Bin 0 -> 2619 bytes .../example_images/western_toad_s_000622.png | Bin 0 -> 2455 bytes datasets/example_images/wrecker_s_002395.png | Bin 0 -> 2467 bytes examples/basics/Auto-PyTorch Tutorial.ipynb | 387 +++++++++ examples/real_data/openml_task.py | 30 + optional-requirements.txt | 2 +- requirements.txt | 6 +- setup.py | 2 +- 220 files changed, 9307 insertions(+), 163 deletions(-) create mode 100644 autoPyTorch.egg-info/PKG-INFO create mode 100644 autoPyTorch.egg-info/SOURCES.txt create mode 100644 autoPyTorch.egg-info/dependency_links.txt create mode 100644 autoPyTorch.egg-info/requires.txt create mode 100644 autoPyTorch.egg-info/top_level.txt create mode 100644 autoPyTorch/components/networks/image/__init__.py create mode 100644 autoPyTorch/components/networks/image/convnet.py create mode 100644 autoPyTorch/components/networks/image/darts/__init__.py create mode 100644 autoPyTorch/components/networks/image/darts/darts_worker.py create mode 100644 autoPyTorch/components/networks/image/darts/genotypes.py create mode 100644 autoPyTorch/components/networks/image/darts/model.py create mode 100644 autoPyTorch/components/networks/image/darts/operations.py create mode 100644 autoPyTorch/components/networks/image/darts/utils.py create mode 100644 autoPyTorch/components/networks/image/densenet.py create mode 100644 autoPyTorch/components/networks/image/densenet_flexible.py create mode 100644 autoPyTorch/components/networks/image/mobilenet.py create mode 100644 autoPyTorch/components/networks/image/resnet.py create mode 100644 autoPyTorch/components/networks/image/resnet152.py create mode 100644 autoPyTorch/components/networks/image/utils/__init__.py create mode 100644 autoPyTorch/components/networks/image/utils/conv2d_helpers.py create mode 100644 autoPyTorch/components/networks/image/utils/mobilenet_utils.py create mode 100644 autoPyTorch/components/networks/image/utils/shakedrop.py create mode 100644 autoPyTorch/components/networks/image/utils/shakeshakeblock.py create mode 100644 autoPyTorch/components/networks/image/utils/utils.py create mode 100644 autoPyTorch/components/preprocessing/image_preprocessing/__init__.py create mode 100644 autoPyTorch/components/preprocessing/image_preprocessing/archive.py create mode 100644 autoPyTorch/components/preprocessing/image_preprocessing/augmentation_transforms.py create mode 100644 autoPyTorch/components/preprocessing/image_preprocessing/operations.py create mode 100644 autoPyTorch/components/preprocessing/image_preprocessing/transforms.py create mode 100644 autoPyTorch/components/training/image/__init__.py create mode 100644 autoPyTorch/components/training/image/base_training.py create mode 100644 autoPyTorch/components/training/image/budget_types.py create mode 100644 autoPyTorch/components/training/image/checkpoints/__init__.py create mode 100644 autoPyTorch/components/training/image/checkpoints/load_specific.py create mode 100644 autoPyTorch/components/training/image/checkpoints/save_load.py create mode 100644 autoPyTorch/components/training/image/early_stopping.py create mode 100644 autoPyTorch/components/training/image/lr_scheduling.py create mode 100644 autoPyTorch/components/training/image/mixup.py create mode 100644 autoPyTorch/components/training/image/trainer.py create mode 100644 autoPyTorch/core/autonet_classes/autonet_image_classification.py create mode 100644 autoPyTorch/core/autonet_classes/autonet_image_classification_multiple_datasets.py create mode 100644 autoPyTorch/core/autonet_classes/autonet_image_data.py create mode 100644 autoPyTorch/core/hpbandster_extensions/bohb_multi_kde_ext.py create mode 100644 autoPyTorch/core/presets/image_classification/__init__.py create mode 100644 autoPyTorch/core/presets/image_classification/full_cs.txt create mode 100644 autoPyTorch/core/presets/image_classification/medium_cs.txt create mode 100644 autoPyTorch/core/presets/image_classification/tiny_cs.txt create mode 100644 autoPyTorch/core/presets/image_classification_multiple_datasets/__init__.py create mode 100644 autoPyTorch/core/presets/image_classification_multiple_datasets/full_cs.txt create mode 100644 autoPyTorch/core/presets/image_classification_multiple_datasets/medium_cs.txt create mode 100644 autoPyTorch/core/presets/image_classification_multiple_datasets/tiny_cs.txt create mode 100644 autoPyTorch/core/worker_no_timelimit.py create mode 100644 autoPyTorch/data_management/data_loader.py create mode 100644 autoPyTorch/data_management/image_loader.py create mode 100644 autoPyTorch/pipeline/nodes/image/__init__.py create mode 100644 autoPyTorch/pipeline/nodes/image/autonet_settings_no_shuffle.py create mode 100644 autoPyTorch/pipeline/nodes/image/create_dataset_info.py create mode 100644 autoPyTorch/pipeline/nodes/image/create_image_dataloader.py create mode 100644 autoPyTorch/pipeline/nodes/image/cross_validation_indices.py create mode 100644 autoPyTorch/pipeline/nodes/image/image_augmentation.py create mode 100644 autoPyTorch/pipeline/nodes/image/image_dataset_reader.py create mode 100644 autoPyTorch/pipeline/nodes/image/loss_module_selector_indices.py create mode 100644 autoPyTorch/pipeline/nodes/image/multiple_datasets.py create mode 100644 autoPyTorch/pipeline/nodes/image/network_selector_datasetinfo.py create mode 100644 autoPyTorch/pipeline/nodes/image/optimization_algorithm_no_timelimit.py create mode 100644 autoPyTorch/pipeline/nodes/image/simple_scheduler_selector.py create mode 100644 autoPyTorch/pipeline/nodes/image/simple_train_node.py create mode 100644 autoPyTorch/pipeline/nodes/image/single_dataset.py create mode 100644 autoPyTorch/utils/benchmarking/benchmark_pipeline/apply_user_updates.py create mode 100644 autoPyTorch/utils/loggers.py create mode 100644 autoPyTorch/utils/modify_config_space.py create mode 100644 autoPyTorch/utils/thread_read_write.py create mode 100644 configs/autonet/automl/cifar_example.txt create mode 100644 configs/benchmark/cifar_example.txt create mode 100644 configs/datasets/cifar.txt create mode 100644 configs/datasets/openml_image.txt create mode 100644 configs/refit/refit_example.json create mode 100644 datasets/CIFAR10.csv create mode 100644 datasets/example.csv create mode 100644 datasets/example_images/alley_cat_s_000843.png create mode 100644 datasets/example_images/american_toad_s_001003.png create mode 100644 datasets/example_images/arabian_s_000782.png create mode 100644 datasets/example_images/arabian_s_002303.png create mode 100644 datasets/example_images/articulated_lorry_s_000916.png create mode 100644 datasets/example_images/auto_s_000800.png create mode 100644 datasets/example_images/automobile_s_001645.png create mode 100644 datasets/example_images/bird_of_passage_s_000006.png create mode 100644 datasets/example_images/broodmare_s_000179.png create mode 100644 datasets/example_images/broodmare_s_000313.png create mode 100644 datasets/example_images/buckskin_s_000031.png create mode 100644 datasets/example_images/bufo_bufo_s_002202.png create mode 100644 datasets/example_images/bullfrog_s_000797.png create mode 100644 datasets/example_images/bullfrog_s_001028.png create mode 100644 datasets/example_images/camion_s_000599.png create mode 100644 datasets/example_images/canis_familiaris_s_000450.png create mode 100644 datasets/example_images/capreolus_capreolus_s_001283.png create mode 100644 datasets/example_images/capreolus_capreolus_s_001380.png create mode 100644 datasets/example_images/capreolus_capreolus_s_001605.png create mode 100644 datasets/example_images/car_s_000040.png create mode 100644 datasets/example_images/cassowary_s_000194.png create mode 100644 datasets/example_images/cassowary_s_002024.png create mode 100644 datasets/example_images/cervus_elaphus_s_000903.png create mode 100644 datasets/example_images/cervus_elaphus_s_001124.png create mode 100644 datasets/example_images/convertible_s_000295.png create mode 100644 datasets/example_images/convertible_s_000520.png create mode 100644 datasets/example_images/cruiser_s_000163.png create mode 100644 datasets/example_images/dawn_horse_s_001453.png create mode 100644 datasets/example_images/delivery_truck_s_001300.png create mode 100644 datasets/example_images/delivery_truck_s_001587.png create mode 100644 datasets/example_images/domestic_cat_s_000913.png create mode 100644 datasets/example_images/domestic_dog_s_000455.png create mode 100644 datasets/example_images/dredger_s_000486.png create mode 100644 datasets/example_images/dump_truck_s_000163.png create mode 100644 datasets/example_images/dump_truck_s_001097.png create mode 100644 datasets/example_images/dump_truck_s_001363.png create mode 100644 datasets/example_images/dumper_s_000805.png create mode 100644 datasets/example_images/elk_s_001751.png create mode 100644 datasets/example_images/estate_car_s_001092.png create mode 100644 datasets/example_images/fallow_deer_s_000351.png create mode 100644 datasets/example_images/fallow_deer_s_001133.png create mode 100644 datasets/example_images/fallow_deer_s_001785.png create mode 100644 datasets/example_images/fawn_s_001418.png create mode 100644 datasets/example_images/fighter_aircraft_s_000720.png create mode 100644 datasets/example_images/fighter_aircraft_s_001009.png create mode 100644 datasets/example_images/garbage_truck_s_001211.png create mode 100644 datasets/example_images/green_frog_s_001384.png create mode 100644 datasets/example_images/house_cat_s_000064.png create mode 100644 datasets/example_images/house_cat_s_002004.png create mode 100644 datasets/example_images/icebreaker_s_001689.png create mode 100644 datasets/example_images/ladder_truck_s_001799.png create mode 100644 datasets/example_images/lapdog_s_001489.png create mode 100644 datasets/example_images/liberty_ship_s_001456.png create mode 100644 datasets/example_images/lipizzan_s_001223.png create mode 100644 datasets/example_images/lorry_s_000562.png create mode 100644 datasets/example_images/male_horse_s_000742.png create mode 100644 datasets/example_images/maltese_s_000562.png create mode 100644 datasets/example_images/monoplane_s_000877.png create mode 100644 datasets/example_images/mouser_s_000792.png create mode 100644 datasets/example_images/mule_deer_s_000357.png create mode 100644 datasets/example_images/mutt_s_000997.png create mode 100644 datasets/example_images/ostrich_s_000026.png create mode 100644 datasets/example_images/ostrich_s_000147.png create mode 100644 datasets/example_images/ostrich_s_001561.png create mode 100644 datasets/example_images/peke_s_000545.png create mode 100644 datasets/example_images/pekinese_s_000046.png create mode 100644 datasets/example_images/police_boat_s_001118.png create mode 100644 datasets/example_images/police_cruiser_s_001385.png create mode 100644 datasets/example_images/puppy_s_001045.png create mode 100644 datasets/example_images/rana_pipiens_s_000379.png create mode 100644 datasets/example_images/red_deer_s_001101.png create mode 100644 datasets/example_images/red_deer_s_001719.png create mode 100644 datasets/example_images/rhea_americana_s_000436.png create mode 100644 datasets/example_images/sika_s_000337.png create mode 100644 datasets/example_images/spring_frog_s_000407.png create mode 100644 datasets/example_images/stallion_s_000015.png create mode 100644 datasets/example_images/station_wagon_s_000464.png create mode 100644 datasets/example_images/station_wagon_s_002537.png create mode 100644 datasets/example_images/supertanker_s_000275.png create mode 100644 datasets/example_images/supertanker_s_000761.png create mode 100644 datasets/example_images/tabby_cat_s_000069.png create mode 100644 datasets/example_images/tabby_cat_s_001983.png create mode 100644 datasets/example_images/tabby_s_001593.png create mode 100644 datasets/example_images/tabby_s_001774.png create mode 100644 datasets/example_images/tailed_frog_s_000246.png create mode 100644 datasets/example_images/toad_frog_s_001786.png create mode 100644 datasets/example_images/tractor_trailer_s_000653.png create mode 100644 datasets/example_images/trailer_truck_s_001350.png create mode 100644 datasets/example_images/truck_s_000028.png create mode 100644 datasets/example_images/trucking_rig_s_001247.png create mode 100644 datasets/example_images/trucking_rig_s_001431.png create mode 100644 datasets/example_images/true_cat_s_000886.png create mode 100644 datasets/example_images/wagon_s_000378.png create mode 100644 datasets/example_images/wagon_s_000572.png create mode 100644 datasets/example_images/wagon_s_002463.png create mode 100644 datasets/example_images/wagtail_s_000747.png create mode 100644 datasets/example_images/walking_horse_s_000071.png create mode 100644 datasets/example_images/western_toad_s_000622.png create mode 100644 datasets/example_images/wrecker_s_002395.png create mode 100644 examples/basics/Auto-PyTorch Tutorial.ipynb create mode 100644 examples/real_data/openml_task.py diff --git a/.gitignore b/.gitignore index 57bd3c856..bddccd816 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,3 @@ - # Visual Studio *.vs/* @@ -6,8 +5,9 @@ *.vscode/* # Python -*__pycache__/ +*__pycache__* *.pyc +.ipynb_checkpoints* # Zipped *.tar.gz @@ -24,19 +24,24 @@ results.json outputs/ jobs.txt .pylintrc +*worker_logs* # Build *build/ *autonet.egg-info *.simg - - -# Datasets -/datasets/ +.DS_Store +dist/ # Meta GPU *meta_logs/ +runs.log +runs.log.lock +logs/ # ensemble data predictions_for_ensemble.npy test_predictions_for_ensemble.npy + +# testing +tests.ipynb diff --git a/README.md b/README.md index a8916ed5a..90738ea56 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ Copyright (C) 2019 [AutoML Group Freiburg](http://www.automl.org/) This a very early pre-alpha version of our upcoming Auto-PyTorch. -So far, Auto-PyTorch only supports featurized data. +So far, Auto-PyTorch only supports featurized data and image data. ## Installation diff --git a/autoPyTorch.egg-info/PKG-INFO b/autoPyTorch.egg-info/PKG-INFO new file mode 100644 index 000000000..4ef8bd634 --- /dev/null +++ b/autoPyTorch.egg-info/PKG-INFO @@ -0,0 +1,17 @@ +Metadata-Version: 1.2 +Name: autoPyTorch +Version: 0.0.2 +Summary: Auto-PyTorch searches neural architectures using BO-HB +Home-page: UNKNOWN +Author: AutoML Freiburg +Author-email: urbanm@informatik.uni-freiburg.de +License: 3-clause BSD +Description: UNKNOWN +Keywords: machine learning algorithm configuration hyperparameter optimization tuning neural architecture deep learning +Platform: Linux +Classifier: Development Status :: 3 - Alpha +Classifier: Topic :: Utilities +Classifier: Topic :: Scientific/Engineering +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Classifier: License :: OSI Approved :: 3-clause BSD +Requires-Python: >=3 diff --git a/autoPyTorch.egg-info/SOURCES.txt b/autoPyTorch.egg-info/SOURCES.txt new file mode 100644 index 000000000..54a06ad13 --- /dev/null +++ b/autoPyTorch.egg-info/SOURCES.txt @@ -0,0 +1,237 @@ +README.md +setup.py +autoPyTorch/__init__.py +autoPyTorch.egg-info/PKG-INFO +autoPyTorch.egg-info/SOURCES.txt +autoPyTorch.egg-info/dependency_links.txt +autoPyTorch.egg-info/requires.txt +autoPyTorch.egg-info/top_level.txt +autoPyTorch/components/__init__.py +autoPyTorch/components/ensembles/__init__.py +autoPyTorch/components/ensembles/abstract_ensemble.py +autoPyTorch/components/ensembles/ensemble_selection.py +autoPyTorch/components/lr_scheduler/__init__.py +autoPyTorch/components/lr_scheduler/lr_schedulers.py +autoPyTorch/components/metrics/__init__.py +autoPyTorch/components/metrics/additional_logs.py +autoPyTorch/components/metrics/balanced_accuracy.py +autoPyTorch/components/metrics/pac_score.py +autoPyTorch/components/metrics/standard_metrics.py +autoPyTorch/components/networks/__init__.py +autoPyTorch/components/networks/activations.py +autoPyTorch/components/networks/base_net.py +autoPyTorch/components/networks/initialization.py +autoPyTorch/components/networks/feature/__init__.py +autoPyTorch/components/networks/feature/embedding.py +autoPyTorch/components/networks/feature/mlpnet.py +autoPyTorch/components/networks/feature/resnet.py +autoPyTorch/components/networks/feature/shapedmlpnet.py +autoPyTorch/components/networks/feature/shapedresnet.py +autoPyTorch/components/networks/image/__init__.py +autoPyTorch/components/networks/image/convnet.py +autoPyTorch/components/networks/image/densenet.py +autoPyTorch/components/networks/image/densenet_flexible.py +autoPyTorch/components/networks/image/mobilenet.py +autoPyTorch/components/networks/image/resnet.py +autoPyTorch/components/networks/image/resnet152.py +autoPyTorch/components/networks/image/darts/__init__.py +autoPyTorch/components/networks/image/darts/darts_worker.py +autoPyTorch/components/networks/image/darts/genotypes.py +autoPyTorch/components/networks/image/darts/model.py +autoPyTorch/components/networks/image/darts/operations.py +autoPyTorch/components/networks/image/darts/utils.py +autoPyTorch/components/networks/image/utils/__init__.py +autoPyTorch/components/networks/image/utils/conv2d_helpers.py +autoPyTorch/components/networks/image/utils/mobilenet_utils.py +autoPyTorch/components/networks/image/utils/shakedrop.py +autoPyTorch/components/networks/image/utils/shakeshakeblock.py +autoPyTorch/components/networks/image/utils/utils.py +autoPyTorch/components/optimizer/__init__.py +autoPyTorch/components/optimizer/optimizer.py +autoPyTorch/components/preprocessing/__init__.py +autoPyTorch/components/preprocessing/loss_weight_strategies.py +autoPyTorch/components/preprocessing/preprocessor_base.py +autoPyTorch/components/preprocessing/resampling_base.py +autoPyTorch/components/preprocessing/feature_preprocessing/__init__.py +autoPyTorch/components/preprocessing/feature_preprocessing/fast_ica.py +autoPyTorch/components/preprocessing/feature_preprocessing/kernel_pca.py +autoPyTorch/components/preprocessing/feature_preprocessing/kitchen_sinks.py +autoPyTorch/components/preprocessing/feature_preprocessing/nystroem.py +autoPyTorch/components/preprocessing/feature_preprocessing/polynomial_features.py +autoPyTorch/components/preprocessing/feature_preprocessing/power_transformer.py +autoPyTorch/components/preprocessing/feature_preprocessing/truncated_svd.py +autoPyTorch/components/preprocessing/image_preprocessing/__init__.py +autoPyTorch/components/preprocessing/image_preprocessing/archive.py +autoPyTorch/components/preprocessing/image_preprocessing/augmentation_transforms.py +autoPyTorch/components/preprocessing/image_preprocessing/operations.py +autoPyTorch/components/preprocessing/image_preprocessing/transforms.py +autoPyTorch/components/preprocessing/resampling/__init__.py +autoPyTorch/components/preprocessing/resampling/random.py +autoPyTorch/components/preprocessing/resampling/smote.py +autoPyTorch/components/preprocessing/resampling/target_size_strategies.py +autoPyTorch/components/regularization/__init__.py +autoPyTorch/components/regularization/mixup.py +autoPyTorch/components/regularization/shake.py +autoPyTorch/components/training/__init__.py +autoPyTorch/components/training/base_training.py +autoPyTorch/components/training/budget_types.py +autoPyTorch/components/training/early_stopping.py +autoPyTorch/components/training/lr_scheduling.py +autoPyTorch/components/training/trainer.py +autoPyTorch/components/training/image/__init__.py +autoPyTorch/components/training/image/base_training.py +autoPyTorch/components/training/image/budget_types.py +autoPyTorch/components/training/image/early_stopping.py +autoPyTorch/components/training/image/lr_scheduling.py +autoPyTorch/components/training/image/mixup.py +autoPyTorch/components/training/image/trainer.py +autoPyTorch/components/training/image/checkpoints/__init__.py +autoPyTorch/components/training/image/checkpoints/load_specific.py +autoPyTorch/components/training/image/checkpoints/save_load.py +autoPyTorch/core/__init__.py +autoPyTorch/core/api.py +autoPyTorch/core/ensemble.py +autoPyTorch/core/worker.py +autoPyTorch/core/worker_no_timelimit.py +autoPyTorch/core/autonet_classes/__init__.py +autoPyTorch/core/autonet_classes/autonet_feature_classification.py +autoPyTorch/core/autonet_classes/autonet_feature_data.py +autoPyTorch/core/autonet_classes/autonet_feature_multilabel.py +autoPyTorch/core/autonet_classes/autonet_feature_regression.py +autoPyTorch/core/autonet_classes/autonet_image_classification.py +autoPyTorch/core/autonet_classes/autonet_image_classification_multiple_datasets.py +autoPyTorch/core/autonet_classes/autonet_image_data.py +autoPyTorch/core/hpbandster_extensions/__init__.py +autoPyTorch/core/hpbandster_extensions/bohb_ext.py +autoPyTorch/core/hpbandster_extensions/bohb_multi_kde_ext.py +autoPyTorch/core/hpbandster_extensions/hyperband_ext.py +autoPyTorch/core/hpbandster_extensions/run_with_time.py +autoPyTorch/core/presets/__init__.py +autoPyTorch/core/presets/tiny_cs_updates.txt +autoPyTorch/core/presets/feature_classification/__init__.py +autoPyTorch/core/presets/feature_classification/full_cs.txt +autoPyTorch/core/presets/feature_classification/medium_cs.txt +autoPyTorch/core/presets/feature_classification/tiny_cs.txt +autoPyTorch/core/presets/feature_multilabel/__init__.py +autoPyTorch/core/presets/feature_multilabel/full_cs.txt +autoPyTorch/core/presets/feature_multilabel/medium_cs.txt +autoPyTorch/core/presets/feature_multilabel/tiny_cs.txt +autoPyTorch/core/presets/feature_regression/__init__.py +autoPyTorch/core/presets/feature_regression/full_cs.txt +autoPyTorch/core/presets/feature_regression/medium_cs.txt +autoPyTorch/core/presets/feature_regression/tiny_cs.txt +autoPyTorch/core/presets/image_classification/__init__.py +autoPyTorch/core/presets/image_classification/full_cs.txt +autoPyTorch/core/presets/image_classification/medium_cs.txt +autoPyTorch/core/presets/image_classification/tiny_cs.txt +autoPyTorch/core/presets/image_classification_multiple_datasets/__init__.py +autoPyTorch/core/presets/image_classification_multiple_datasets/full_cs.txt +autoPyTorch/core/presets/image_classification_multiple_datasets/medium_cs.txt +autoPyTorch/core/presets/image_classification_multiple_datasets/tiny_cs.txt +autoPyTorch/data_management/__init__.py +autoPyTorch/data_management/data_converter.py +autoPyTorch/data_management/data_loader.py +autoPyTorch/data_management/data_manager.py +autoPyTorch/data_management/data_reader.py +autoPyTorch/data_management/image_loader.py +autoPyTorch/pipeline/__init__.py +autoPyTorch/pipeline/base/__init__.py +autoPyTorch/pipeline/base/node.py +autoPyTorch/pipeline/base/pipeline.py +autoPyTorch/pipeline/base/pipeline_node.py +autoPyTorch/pipeline/base/sub_pipeline_node.py +autoPyTorch/pipeline/nodes/__init__.py +autoPyTorch/pipeline/nodes/autonet_settings.py +autoPyTorch/pipeline/nodes/create_dataloader.py +autoPyTorch/pipeline/nodes/create_dataset_info.py +autoPyTorch/pipeline/nodes/cross_validation.py +autoPyTorch/pipeline/nodes/embedding_selector.py +autoPyTorch/pipeline/nodes/ensemble.py +autoPyTorch/pipeline/nodes/imputation.py +autoPyTorch/pipeline/nodes/initialization_selector.py +autoPyTorch/pipeline/nodes/log_functions_selector.py +autoPyTorch/pipeline/nodes/loss_module_selector.py +autoPyTorch/pipeline/nodes/lr_scheduler_selector.py +autoPyTorch/pipeline/nodes/metric_selector.py +autoPyTorch/pipeline/nodes/network_selector.py +autoPyTorch/pipeline/nodes/normalization_strategy_selector.py +autoPyTorch/pipeline/nodes/one_hot_encoding.py +autoPyTorch/pipeline/nodes/optimization_algorithm.py +autoPyTorch/pipeline/nodes/optimizer_selector.py +autoPyTorch/pipeline/nodes/preprocessor_selector.py +autoPyTorch/pipeline/nodes/resampling_strategy_selector.py +autoPyTorch/pipeline/nodes/train_node.py +autoPyTorch/pipeline/nodes/image/__init__.py +autoPyTorch/pipeline/nodes/image/autonet_settings_no_shuffle.py +autoPyTorch/pipeline/nodes/image/create_dataset_info.py +autoPyTorch/pipeline/nodes/image/create_image_dataloader.py +autoPyTorch/pipeline/nodes/image/cross_validation_indices.py +autoPyTorch/pipeline/nodes/image/image_augmentation.py +autoPyTorch/pipeline/nodes/image/image_dataset_reader.py +autoPyTorch/pipeline/nodes/image/loss_module_selector_indices.py +autoPyTorch/pipeline/nodes/image/multiple_datasets.py +autoPyTorch/pipeline/nodes/image/network_selector_datasetinfo.py +autoPyTorch/pipeline/nodes/image/optimization_algorithm_no_timelimit.py +autoPyTorch/pipeline/nodes/image/simple_scheduler_selector.py +autoPyTorch/pipeline/nodes/image/simple_train_node.py +autoPyTorch/pipeline/nodes/image/single_dataset.py +autoPyTorch/utils/__init__.py +autoPyTorch/utils/config_space_hyperparameter.py +autoPyTorch/utils/configspace_wrapper.py +autoPyTorch/utils/ensemble.py +autoPyTorch/utils/hyperparameter_search_space_update.py +autoPyTorch/utils/loggers.py +autoPyTorch/utils/mem_test_thread.py +autoPyTorch/utils/modify_config_space.py +autoPyTorch/utils/modules.py +autoPyTorch/utils/thread_read_write.py +autoPyTorch/utils/benchmarking/__init__.py +autoPyTorch/utils/benchmarking/benchmark.py +autoPyTorch/utils/benchmarking/benchmark_pipeline/__init__.py +autoPyTorch/utils/benchmarking/benchmark_pipeline/apply_user_updates.py +autoPyTorch/utils/benchmarking/benchmark_pipeline/benchmark_settings.py +autoPyTorch/utils/benchmarking/benchmark_pipeline/create_autonet.py +autoPyTorch/utils/benchmarking/benchmark_pipeline/fit_autonet.py +autoPyTorch/utils/benchmarking/benchmark_pipeline/for_autonet_config.py +autoPyTorch/utils/benchmarking/benchmark_pipeline/for_instance.py +autoPyTorch/utils/benchmarking/benchmark_pipeline/for_run.py +autoPyTorch/utils/benchmarking/benchmark_pipeline/prepare_result_folder.py +autoPyTorch/utils/benchmarking/benchmark_pipeline/read_instance_data.py +autoPyTorch/utils/benchmarking/benchmark_pipeline/save_ensemble_logs.py +autoPyTorch/utils/benchmarking/benchmark_pipeline/save_results.py +autoPyTorch/utils/benchmarking/benchmark_pipeline/set_autonet_config.py +autoPyTorch/utils/benchmarking/benchmark_pipeline/set_ensemble_config.py +autoPyTorch/utils/benchmarking/visualization_pipeline/__init__.py +autoPyTorch/utils/benchmarking/visualization_pipeline/collect_trajectories.py +autoPyTorch/utils/benchmarking/visualization_pipeline/get_additional_trajectories.py +autoPyTorch/utils/benchmarking/visualization_pipeline/get_ensemble_trajectories.py +autoPyTorch/utils/benchmarking/visualization_pipeline/get_run_trajectories.py +autoPyTorch/utils/benchmarking/visualization_pipeline/plot_summary.py +autoPyTorch/utils/benchmarking/visualization_pipeline/plot_trajectories.py +autoPyTorch/utils/benchmarking/visualization_pipeline/read_instance_info.py +autoPyTorch/utils/benchmarking/visualization_pipeline/visualization_settings.py +autoPyTorch/utils/config/__init__.py +autoPyTorch/utils/config/config_condition.py +autoPyTorch/utils/config/config_file_parser.py +autoPyTorch/utils/config/config_option.py +examples/__init__.py +examples/basics/__init__.py +examples/basics/autonet_tutorial.py +examples/basics/classification.py +examples/basics/ensemble.py +examples/basics/modify_pipeline.py +examples/basics/regression.py +test/__init__.py +test/test_pipeline/__init__.py +test/test_pipeline/test_cross_validation.py +test/test_pipeline/test_imputation.py +test/test_pipeline/test_initialization.py +test/test_pipeline/test_log_selector.py +test/test_pipeline/test_loss_selector.py +test/test_pipeline/test_lr_scheduler_selector.py +test/test_pipeline/test_metric_selector.py +test/test_pipeline/test_network_selector.py +test/test_pipeline/test_normalization_strategy_selector.py +test/test_pipeline/test_optimization_algorithm.py +test/test_pipeline/test_optimizer_selector.py +test/test_pipeline/test_resampling_strategy_selector.py \ No newline at end of file diff --git a/autoPyTorch.egg-info/dependency_links.txt b/autoPyTorch.egg-info/dependency_links.txt new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/autoPyTorch.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/autoPyTorch.egg-info/requires.txt b/autoPyTorch.egg-info/requires.txt new file mode 100644 index 000000000..34c062db2 --- /dev/null +++ b/autoPyTorch.egg-info/requires.txt @@ -0,0 +1,16 @@ +setuptools +Cython +netifaces +numpy +pandas +scipy +statsmodels +scikit-learn>=0.20.0 +imblearn +ConfigSpace +pynisher +hpbandster +fasteners +torch +torchvision +tensorboard_logger diff --git a/autoPyTorch.egg-info/top_level.txt b/autoPyTorch.egg-info/top_level.txt new file mode 100644 index 000000000..d7d64695f --- /dev/null +++ b/autoPyTorch.egg-info/top_level.txt @@ -0,0 +1,3 @@ +autoPyTorch +examples +test diff --git a/autoPyTorch/__init__.py b/autoPyTorch/__init__.py index 05b6be868..3e5048fdc 100644 --- a/autoPyTorch/__init__.py +++ b/autoPyTorch/__init__.py @@ -2,7 +2,7 @@ hpbandster = os.path.abspath(os.path.join(__file__, '..', '..', 'submodules', 'HpBandSter')) sys.path.append(hpbandster) -from autoPyTorch.core.autonet_classes import AutoNetClassification, AutoNetMultilabel, AutoNetRegression +from autoPyTorch.core.autonet_classes import AutoNetClassification, AutoNetMultilabel, AutoNetRegression, AutoNetImageClassification, AutoNetImageClassificationMultipleDatasets from autoPyTorch.data_management.data_manager import DataManager from autoPyTorch.utils.hyperparameter_search_space_update import HyperparameterSearchSpaceUpdates from autoPyTorch.core.ensemble import AutoNetEnsemble diff --git a/autoPyTorch/components/lr_scheduler/lr_schedulers.py b/autoPyTorch/components/lr_scheduler/lr_schedulers.py index 91b98f97f..434f28a51 100644 --- a/autoPyTorch/components/lr_scheduler/lr_schedulers.py +++ b/autoPyTorch/components/lr_scheduler/lr_schedulers.py @@ -6,8 +6,11 @@ from autoPyTorch.utils.config_space_hyperparameter import add_hyperparameter, get_hyperparameter +import numpy as np +import math import torch import torch.optim.lr_scheduler as lr_scheduler +from torch.optim import Optimizer import ConfigSpace as CS import ConfigSpace.hyperparameters as CSH @@ -16,6 +19,7 @@ __version__ = "0.0.1" __license__ = "BSD" + class AutoNetLearningRateSchedulerBase(object): def __new__(cls, optimizer, config): """Get a new instance of the scheduler @@ -42,12 +46,17 @@ def _get_scheduler(self, optimizer, config): def get_config_space(): return CS.ConfigurationSpace() + class SchedulerNone(AutoNetLearningRateSchedulerBase): def _get_scheduler(self, optimizer, config): return NoScheduling(optimizer=optimizer) + class SchedulerStepLR(AutoNetLearningRateSchedulerBase): + """ + Step learning rate scheduler + """ def _get_scheduler(self, optimizer, config): return lr_scheduler.StepLR(optimizer=optimizer, step_size=config['step_size'], gamma=config['gamma'], last_epoch=-1) @@ -62,8 +71,12 @@ def get_config_space( add_hyperparameter(cs, CSH.UniformFloatHyperparameter, 'gamma', gamma) return cs + class SchedulerExponentialLR(AutoNetLearningRateSchedulerBase): - + """ + Exponential learning rate scheduler + """ + def _get_scheduler(self, optimizer, config): return lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=config['gamma'], last_epoch=-1) @@ -75,11 +88,17 @@ def get_config_space( add_hyperparameter(cs, CSH.UniformFloatHyperparameter, 'gamma', gamma) return cs + class SchedulerReduceLROnPlateau(AutoNetLearningRateSchedulerBase): + """ + Reduce LR on plateau learning rate scheduler + """ def _get_scheduler(self, optimizer, config): - return lr_scheduler.ReduceLROnPlateau(optimizer=optimizer) - + return lr_scheduler.ReduceLROnPlateau(optimizer=optimizer, + factor=config['factor'], + patience=config['patience']) + @staticmethod def get_config_space( factor=(0.05, 0.5), @@ -90,7 +109,112 @@ def get_config_space( add_hyperparameter(cs, CSH.UniformIntegerHyperparameter, 'patience', patience) return cs + +class SchedulerAdaptiveLR(AutoNetLearningRateSchedulerBase): + """ + Adaptive cosine learning rate scheduler + """ + + def _get_scheduler(self, optimizer, config): + return AdaptiveLR(optimizer=optimizer, + T_max=config['T_max'], + T_mul=config['T_mult'], + patience=config['patience'], + threshold=config['threshold']) + + @staticmethod + def get_config_space( + T_max=(300,1000), + patience=(2,5), + T_mult=(1.0,2.0), + threshold=(0.001, 0.5) + ): + cs = CS.ConfigurationSpace() + add_hyperparameter(cs, CSH.UniformIntegerHyperparameter, 'T_max', T_max) + add_hyperparameter(cs, CSH.UniformIntegerHyperparameter, 'patience', patience) + add_hyperparameter(cs, CSH.UniformFloatHyperparameter, 'T_mult', T_mult) + add_hyperparameter(cs, CSH.UniformFloatHyperparameter, 'threshold', threshold) + return cs + + +class AdaptiveLR(object): + + def __init__(self, optimizer, mode='min', T_max=30, T_mul=2.0, eta_min=0, patience=3, threshold=0.1, min_lr=0, eps=1e-8, last_epoch=-1): + + if not isinstance(optimizer, Optimizer): + raise TypeError('{} is not an Optimizer'.format( + type(optimizer).__name__)) + + self.optimizer = optimizer + + if last_epoch == -1: + for group in optimizer.param_groups: + group.setdefault('initial_lr', group['lr']) + else: + for i, group in enumerate(optimizer.param_groups): + if 'initial_lr' not in group: + raise KeyError("param 'initial_lr' is not specified " + "in param_groups[{}] when resuming an optimizer".format(i)) + + self.base_lrs = list(map(lambda group: group['initial_lr'], optimizer.param_groups)) + self.last_epoch = last_epoch + + if isinstance(min_lr, list) or isinstance(min_lr, tuple): + if len(min_lr) != len(optimizer.param_groups): + raise ValueError("expected {} min_lrs, got {}".format( + len(optimizer.param_groups), len(min_lr))) + self.min_lrs = list(min_lr) + else: + self.min_lrs = [min_lr] * len(optimizer.param_groups) + + self.T_max = T_max + self.T_mul = T_mul + self.eta_min = eta_min + self.current_base_lrs = self.base_lrs + self.metric_values = [] + self.threshold = threshold + self.patience = patience + self.steps = 0 + + def step(self, metrics, epoch=None): + if epoch is None: + epoch = self.last_epoch + 1 + self.last_epoch = epoch + + self.metric_values.append(metrics) + if len(self.metric_values) > self.patience: + self.metric_values = self.metric_values[1:] + + if max(self.metric_values) - metrics > self.threshold: + self.current_base_lrs = self.get_lr() + self.steps = 0 + else: + self.steps += 1 + + self.last_metric_value = metrics + + for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()): + param_group['lr'] = lr + + def get_lr(self): + ''' + Override this method to the existing get_lr() of the parent class + ''' + if self.steps >= self.T_max: + self.T_max = self.T_max * self.T_mul + self.current_base_lrs = self.base_lrs + self.metric_values = [] + self.steps = 0 + + return [self.eta_min + (base_lr - self.eta_min) * + (1 + math.cos(math.pi * self.steps / self.T_max)) / 2 + for base_lr in self.current_base_lrs] + + class SchedulerCyclicLR(AutoNetLearningRateSchedulerBase): + """ + Cyclic learning rate scheduler + """ def _get_scheduler(self, optimizer, config): maf = config['max_factor'] @@ -118,7 +242,11 @@ def get_config_space( add_hyperparameter(cs, CSH.UniformIntegerHyperparameter, 'cycle_length', cycle_length) return cs + class SchedulerCosineAnnealingWithRestartsLR(AutoNetLearningRateSchedulerBase): + """ + Cosine annealing learning rate scheduler with warm restarts + """ def _get_scheduler(self, optimizer, config): scheduler = CosineAnnealingWithRestartsLR(optimizer, T_max=config['T_max'], T_mult=config['T_mult'],last_epoch=-1) @@ -151,7 +279,6 @@ def get_lr(self): return [None] -import math class CosineAnnealingWithRestartsLR(torch.optim.lr_scheduler._LRScheduler): r"""Copyright: pytorch @@ -205,3 +332,62 @@ def get_lr(self): if self.step_n >= self.restart_every: self.restart() return [self.cosine(base_lr) for base_lr in self.base_lrs] + + def needs_checkpoint(self): + return self.step_n + 1 >= self.restart_every + + +class SchedulerAlternatingCosineLR(AutoNetLearningRateSchedulerBase): + """ + Alternating cosine learning rate scheduler + """ + + def _get_scheduler(self, optimizer, config): + scheduler = AlternatingCosineLR(optimizer, T_max=config['T_max'], T_mul=config['T_mult'], amplitude_reduction=config['amp_reduction'], last_epoch=-1) + return scheduler + + @staticmethod + def get_config_space( + T_max=(1, 20), + T_mult=(1.0, 2.0), + amp_reduction=(0.1,1) + ): + cs = CS.ConfigurationSpace() + add_hyperparameter(cs, CSH.UniformIntegerHyperparameter, 'T_max', T_max) + add_hyperparameter(cs, CSH.UniformFloatHyperparameter, 'T_mult', T_mult) + add_hyperparameter(cs, CSH.UniformFloatHyperparameter, 'amp_reduction', amp_reduction) + return cs + + +class AlternatingCosineLR(torch.optim.lr_scheduler._LRScheduler): + def __init__(self, optimizer, T_max, T_mul=1, amplitude_reduction=0.9, eta_min=0, last_epoch=-1): + ''' + Here last_epoch actually means last_step since the + learning rate is decayed after each batch step. + ''' + + self.T_max = T_max + self.T_mul = T_mul + self.eta_min = eta_min + self.cumulative_time = 0 + self.amplitude_mult = amplitude_reduction + self.base_lr_mult = 1 + self.frequency_mult = 1 + self.time_offset = 0 + self.last_step = 0 + super(AlternatingCosineLR, self).__init__(optimizer, last_epoch) + + def get_lr(self): + ''' + Override this method to the existing get_lr() of the parent class + ''' + if self.last_epoch >= self.T_max: + self.T_max = self.T_max * self.T_mul + self.time_offset = self.T_max / 2 + self.last_epoch = 0 + self.base_lr_mult *= self.amplitude_mult + self.frequency_mult = 2 + self.cumulative_time = 0 + return [self.eta_min + (base_lr * self.base_lr_mult - self.eta_min) * + (1 + math.cos(math.pi * (self.time_offset + self.cumulative_time) / self.T_max * self.frequency_mult)) / 2 + for base_lr in self.base_lrs] diff --git a/autoPyTorch/components/metrics/__init__.py b/autoPyTorch/components/metrics/__init__.py index aee6098dd..cba08f437 100644 --- a/autoPyTorch/components/metrics/__init__.py +++ b/autoPyTorch/components/metrics/__init__.py @@ -1,3 +1,4 @@ from autoPyTorch.components.metrics.balanced_accuracy import balanced_accuracy from autoPyTorch.components.metrics.pac_score import pac_metric -from autoPyTorch.components.metrics.standard_metrics import accuracy, auc_metric, mean_distance, multilabel_accuracy \ No newline at end of file +from autoPyTorch.components.metrics.standard_metrics import accuracy, auc_metric, mean_distance, multilabel_accuracy +from autoPyTorch.components.metrics.standard_metrics import top1, top3, top5 diff --git a/autoPyTorch/components/metrics/standard_metrics.py b/autoPyTorch/components/metrics/standard_metrics.py index e18f01fa4..f711c24a1 100644 --- a/autoPyTorch/components/metrics/standard_metrics.py +++ b/autoPyTorch/components/metrics/standard_metrics.py @@ -2,20 +2,41 @@ import numpy as np # classification metrics - - def accuracy(y_true, y_pred): return np.mean(y_true == y_pred) - def auc_metric(y_true, y_pred): return (2 * metrics.roc_auc_score(y_true, y_pred) - 1) +def top1(y_pred, y_true): + return topN(y_pred, y_true, 1) + +def top3(y_pred, y_true): + return topN(y_pred, y_true, 3) + +def top5(y_pred, y_true): + if y_pred.shape[1] < 5: + return -1 + return topN(y_pred, y_true, 5) + +def topN(output, target, topk): + """Computes the accuracy over the k top predictions for the specified values of k""" + with torch.no_grad(): + batch_size = target.size(0) -# multilabel metric + _, pred = output.topk(topk, 1, True, True) + pred = pred.t() + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + correct_k = correct[:topk].view(-1).float().sum(0, keepdim=True) + return correct_k.mul_(100.0 / batch_size).item() + + +# multilabel metrics def multilabel_accuracy(y_true, y_pred): return np.mean(y_true == (y_pred > 0.5)) -# regression metric + +# regression metrics def mean_distance(y_true, y_pred): return np.mean(np.abs(y_true - y_pred)) diff --git a/autoPyTorch/components/networks/base_net.py b/autoPyTorch/components/networks/base_net.py index 3fcfbc236..154dc9af9 100644 --- a/autoPyTorch/components/networks/base_net.py +++ b/autoPyTorch/components/networks/base_net.py @@ -64,4 +64,18 @@ def __init__(self, config, in_features, out_features, embedding, final_activatio def forward(self, x): x = self.embedding(x) - return super(BaseFeatureNet, self).forward(x) \ No newline at end of file + return super(BaseFeatureNet, self).forward(x) + + +class BaseImageNet(BaseNet): + def __init__(self, config, in_features, out_features, final_activation): + super(BaseImageNet, self).__init__(config, in_features, out_features, final_activation) + + if len(in_features) == 2: + self.channels = 1 + self.iw = in_features[0] + self.ih = in_features[1] + if len(in_features) == 3: + self.channels = in_features[0] + self.iw = in_features[1] + self.ih = in_features[2] diff --git a/autoPyTorch/components/networks/image/__init__.py b/autoPyTorch/components/networks/image/__init__.py new file mode 100644 index 000000000..9a0900524 --- /dev/null +++ b/autoPyTorch/components/networks/image/__init__.py @@ -0,0 +1,4 @@ +from autoPyTorch.components.networks.image.convnet import ConvNet +from autoPyTorch.components.networks.image.densenet import DenseNet +from autoPyTorch.components.networks.image.resnet import ResNet +from autoPyTorch.components.networks.image.mobilenet import MobileNet diff --git a/autoPyTorch/components/networks/image/convnet.py b/autoPyTorch/components/networks/image/convnet.py new file mode 100644 index 000000000..8b3344460 --- /dev/null +++ b/autoPyTorch/components/networks/image/convnet.py @@ -0,0 +1,87 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Basic Implementation of a convolutional network. +""" + +from __future__ import division, print_function + +import ConfigSpace as CS +import ConfigSpace.hyperparameters as CSH +import torch.nn as nn + +from autoPyTorch.components.networks.base_net import BaseImageNet + +__author__ = "Max Dippel, Michael Burkart and Matthias Urban" +__version__ = "0.0.1" +__license__ = "BSD" + + +class ConvNet(BaseImageNet): + def __init__(self, config, in_features, out_features, final_activation, *args, **kwargs): + super(ConvNet, self).__init__(config, in_features, out_features, final_activation) + self.layers = self._build_net(self.n_classes) + + + def forward(self, x): + x = self.layers(x) + x = x.reshape(x.size(0), -1) + x = self.last_layer(x) + if not self.training and self.final_activation is not None: + x = self.final_activation(x) + return x + + def _build_net(self, out_features): + layers = list() + init_filter = self.config["conv_init_filters"] + self._add_layer(layers, self.channels, init_filter, 1) + + cw, ch = self._get_layer_size(self.iw, self.ih) + self.dense_size = init_filter * cw * ch + print(cw, ch, self.dense_size) + for i in range(2, self.config["num_layers"]+1): + cw, ch = self._get_layer_size(cw, ch) + if cw == 0 or ch == 0: + print("> reduce network size due to too small layers.") + break + self._add_layer(layers, init_filter, init_filter * 2, i) + init_filter *= 2 + self.dense_size = init_filter * cw * ch + print(cw, ch, self.dense_size) + + self.last_layer = nn.Linear(self.dense_size, out_features) + nw = nn.Sequential(*layers) + #print(nw) + return nw + + def _get_layer_size(self, w, h): + cw = ((w - self.config["conv_kernel_size"] + 2 * self.config["conv_kernel_padding"]) + //self.config["conv_kernel_stride"]) + 1 + ch = ((h - self.config["conv_kernel_size"] + 2 * self.config["conv_kernel_padding"]) + //self.config["conv_kernel_stride"]) + 1 + cw, ch = cw // self.config["pool_size"], ch // self.config["pool_size"] + return cw, ch + + def _add_layer(self, layers, in_filters, out_filters, layer_id): + layers.append(nn.Conv2d(in_filters, out_filters, + kernel_size=self.config["conv_kernel_size"], + stride=self.config["conv_kernel_stride"], + padding=self.config["conv_kernel_padding"])) + layers.append(nn.BatchNorm2d(out_filters)) + layers.append(self.activation()) + layers.append(nn.MaxPool2d(kernel_size=self.config["pool_size"], stride=self.config["pool_size"])) + + @staticmethod + def get_config_space(user_updates=None): + cs = CS.ConfigurationSpace() + + cs.add_hyperparameter(CSH.CategoricalHyperparameter('activation', ['relu'])) #'sigmoid', 'tanh', + num_layers = CSH.UniformIntegerHyperparameter('num_layers', lower=2, upper=5) + cs.add_hyperparameter(num_layers) + cs.add_hyperparameter(CSH.UniformIntegerHyperparameter('conv_init_filters', lower=16, upper=64)) + cs.add_hyperparameter(CSH.UniformIntegerHyperparameter('conv_kernel_size', lower=2, upper=5)) + cs.add_hyperparameter(CSH.UniformIntegerHyperparameter('conv_kernel_stride', lower=1, upper=3)) + cs.add_hyperparameter(CSH.UniformIntegerHyperparameter('conv_kernel_padding', lower=2, upper=3)) + cs.add_hyperparameter(CSH.UniformIntegerHyperparameter('pool_size', lower=2, upper=3)) + + return(cs) diff --git a/autoPyTorch/components/networks/image/darts/__init__.py b/autoPyTorch/components/networks/image/darts/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/autoPyTorch/components/networks/image/darts/darts_worker.py b/autoPyTorch/components/networks/image/darts/darts_worker.py new file mode 100644 index 000000000..c4c4b882c --- /dev/null +++ b/autoPyTorch/components/networks/image/darts/darts_worker.py @@ -0,0 +1,114 @@ +import os +import time +import argparse +#from copy import copy, deepcopy + +import ConfigSpace as CS +import ConfigSpace.hyperparameters as CSH +from hpbandster.core.worker import Worker +# from .helper import darts_cifar10 + + +PRIMITIVES = [ + #'none', + 'max_pool_3x3', + 'avg_pool_3x3', + 'skip_connect', + 'sep_conv_3x3', + 'sep_conv_5x5', + 'dil_conv_3x3', + 'dil_conv_5x5', +] + + +class DARTSWorker(Worker): + # def __init__(self, *args, **kwargs): + # super().__init__(*args, **kwargs) + # #self.darts_mainsourcepath = '/home/zelaa/Thesis/bohb-darts/workers/lib' + # self.darts_path = os.getcwd() + '/workers/lib/darts_space' + + # def compute(self, config, budget, config_id, working_directory): + # return darts_cifar10(config=config, + # budget=int(budget), + # config_id=config_id, + # directory=working_directory, + # darts_source=self.darts_path) + + @staticmethod + def get_config_space(): + config_space = CS.ConfigurationSpace() + + # here we instantiate one categorical hyperparameter for each edge in + # the DARTS cell + for i in range(14): + config_space.add_hyperparameter(CSH.CategoricalHyperparameter('edge_normal_{}'.format(i), + PRIMITIVES)) + config_space.add_hyperparameter(CSH.CategoricalHyperparameter('edge_reduce_{}'.format(i), + PRIMITIVES)) + # for the intermediate node 2 we add directly the two incoming edges to + # the config_space. All nodes are topologicaly sorted and the labels 0 + # and 1 correspond to the 2 input nodes of the cell. nodes 2, 3, 4, 5 + # are intermediate nodes. We define below a CategoricalHyperparameter + # for nodes 3, 4, 5 with each category representing two possible + # predecesor nodes indices (for node 2 there is only one possibility) + pred_nodes = {'3': ['0_1', '0_2', '1_2'], + '4': ['0_1', '0_2', '0_3', '1_2', '1_3', '2_3'], + '5': ['0_1', '0_2', '0_3', '0_4', '1_2', '1_3', '1_4', + '2_3', '2_4', '3_4'] + } + + for i in range(3, 6): + config_space.add_hyperparameter(CSH.CategoricalHyperparameter('inputs_node_normal_{}'.format(i), + pred_nodes[str(i)])) + config_space.add_hyperparameter(CSH.CategoricalHyperparameter('inputs_node_reduce_{}'.format(i), + pred_nodes[str(i)])) + + config_space.add_hyperparameter(CSH.Constant('layers', 20)) + config_space.add_hyperparameter(CSH.Constant('init_channels', 36)) + config_space.add_hyperparameter(CSH.Constant('drop_path_prob', 0.1)) + config_space.add_hyperparameter(CSH.CategoricalHyperparameter('auxiliary', [False])) + + # now we define the conditions constraining the inclusion of the edges + # on the optimization in order to be consistent with the DARTS original + # search space + for cell_type in ['normal', 'reduce']: + config_space.add_condition(CS.InCondition(child=config_space.get_hyperparameter('edge_{}_2'.format(cell_type)), + parent=config_space.get_hyperparameter('inputs_node_{}_3'.format(cell_type)), + values=['0_1', '0_2'])) + config_space.add_condition(CS.InCondition(child=config_space.get_hyperparameter('edge_{}_3'.format(cell_type)), + parent=config_space.get_hyperparameter('inputs_node_{}_3'.format(cell_type)), + values=['0_1', '1_2'])) + config_space.add_condition(CS.InCondition(child=config_space.get_hyperparameter('edge_{}_4'.format(cell_type)), + parent=config_space.get_hyperparameter('inputs_node_{}_3'.format(cell_type)), + values=['0_2', '1_2'])) + config_space.add_condition(CS.InCondition(child=config_space.get_hyperparameter('edge_{}_5'.format(cell_type)), + parent=config_space.get_hyperparameter('inputs_node_{}_4'.format(cell_type)), + values=['0_1', '0_2', '0_3'])) + config_space.add_condition(CS.InCondition(child=config_space.get_hyperparameter('edge_{}_6'.format(cell_type)), + parent=config_space.get_hyperparameter('inputs_node_{}_4'.format(cell_type)), + values=['0_1', '1_2', '1_3'])) + config_space.add_condition(CS.InCondition(child=config_space.get_hyperparameter('edge_{}_7'.format(cell_type)), + parent=config_space.get_hyperparameter('inputs_node_{}_4'.format(cell_type)), + values=['0_2', '1_2', '2_3'])) + config_space.add_condition(CS.InCondition(child=config_space.get_hyperparameter('edge_{}_8'.format(cell_type)), + parent=config_space.get_hyperparameter('inputs_node_{}_4'.format(cell_type)), + values=['0_3', '1_3', '2_3'])) + config_space.add_condition(CS.InCondition(child=config_space.get_hyperparameter('edge_{}_9'.format(cell_type)), + parent=config_space.get_hyperparameter('inputs_node_{}_5'.format(cell_type)), + values=['0_1', '0_2', '0_3', '0_4'])) + config_space.add_condition(CS.InCondition(child=config_space.get_hyperparameter('edge_{}_10'.format(cell_type)), + parent=config_space.get_hyperparameter('inputs_node_{}_5'.format(cell_type)), + values=['0_1', '1_2', '1_3', '1_4'])) + config_space.add_condition(CS.InCondition(child=config_space.get_hyperparameter('edge_{}_11'.format(cell_type)), + parent=config_space.get_hyperparameter('inputs_node_{}_5'.format(cell_type)), + values=['0_2', '1_2', '2_3', '2_4'])) + config_space.add_condition(CS.InCondition(child=config_space.get_hyperparameter('edge_{}_12'.format(cell_type)), + parent=config_space.get_hyperparameter('inputs_node_{}_5'.format(cell_type)), + values=['0_3', '1_3', '2_3', '3_4'])) + config_space.add_condition(CS.InCondition(child=config_space.get_hyperparameter('edge_{}_13'.format(cell_type)), + parent=config_space.get_hyperparameter('inputs_node_{}_5'.format(cell_type)), + values=['0_4', '1_4', '2_4', '3_4'])) + + return config_space + + diff --git a/autoPyTorch/components/networks/image/darts/genotypes.py b/autoPyTorch/components/networks/image/darts/genotypes.py new file mode 100644 index 000000000..2a46099fc --- /dev/null +++ b/autoPyTorch/components/networks/image/darts/genotypes.py @@ -0,0 +1,98 @@ +from functools import wraps +from collections import namedtuple +import random +import sys + +Genotype = namedtuple('Genotype', 'normal normal_concat reduce reduce_concat') + +PRIMITIVES = [ + #'none', + 'max_pool_3x3', + 'avg_pool_3x3', + 'skip_connect', + 'sep_conv_3x3', + 'sep_conv_5x5', + 'dil_conv_3x3', + 'dil_conv_5x5' +] + +DARTS = Genotype(normal=[('sep_conv_3x3', 0), ('sep_conv_3x3', 1), + ('sep_conv_3x3', 0), ('sep_conv_3x3', 1), + ('sep_conv_3x3', 1), ('skip_connect', 0), + ('skip_connect', 0), ('dil_conv_3x3', 2)], + normal_concat=[2, 3, 4, 5], reduce=[('max_pool_3x3', 0), + ('max_pool_3x3', 1), + ('skip_connect', 2), + ('max_pool_3x3', 1), + ('max_pool_3x3', 0), + ('skip_connect', 2), + ('skip_connect', 2), + ('max_pool_3x3', 1)], + reduce_concat=[2, 3, 4, 5]) + + +def generate_genotype(gene_function): + @wraps(gene_function) + def wrapper(config=None, steps=4): + concat = range(2, 6) + gene_normal, gene_reduce = gene_function(config, steps).values() + genotype = Genotype( + normal=gene_normal, normal_concat=concat, + reduce=gene_reduce, reduce_concat=concat + ) + return genotype + return wrapper + + +@generate_genotype +def get_gene_from_config(config, steps=4): + gene = {'normal': [], 'reduce': []} + + # node 2 + for cell_type in gene.keys(): + first_edge = (config['edge_{}_0'.format(cell_type)], 0) + second_edge = (config['edge_{}_1'.format(cell_type)], 1) + gene[cell_type].append(first_edge) + gene[cell_type].append(second_edge) + + # nodes 3, 4, 5 + for i, offset in zip(range(3, steps+2), [2, 5, 9]): + for cell_type in gene.keys(): + input_nodes = config['inputs_node_{}_{}'.format(cell_type, i)].split('_') + for node in input_nodes: + edge = (config['edge_{}_{}'.format(cell_type, int(node)+offset)], + int(node)) + gene[cell_type].append(edge) + return gene + + +@generate_genotype +def random_gene(config=None, steps=4): + gene = {'normal': [], 'reduce': []} + + n = 1 + for i in range(steps): + for cell_type in gene.keys(): + first_edge = (random.choice(PRIMITIVES), + random.randint(0, n)) + second_edge = (random.choice(PRIMITIVES), + random.randint(0, n)) + + gene[cell_type].append(first_edge) + gene[cell_type].append(second_edge) + n += 1 + return gene + + +if __name__ == '__main__': + if len(sys.argv) != 2: + print("usage:\n python {} CONFIGS".format(sys.argv[0])) + sys.exit(1) + + with open('genotypes.py', 'a') as f: + _nr_random_genes = sys.argv[1] + for i in range(int(_nr_random_genes)): + gene = random_gene() + f.write('DARTS_%d = %s'%(i, gene)) + f.write('\n') + print(gene) diff --git a/autoPyTorch/components/networks/image/darts/model.py b/autoPyTorch/components/networks/image/darts/model.py new file mode 100644 index 000000000..2fa9d332f --- /dev/null +++ b/autoPyTorch/components/networks/image/darts/model.py @@ -0,0 +1,238 @@ +import torch +import torch.nn as nn +from .operations import * +from .utils import drop_path + + +class Cell(nn.Module): + + def __init__(self, genotype, C_prev_prev, C_prev, C, reduction, reduction_prev): + super(Cell, self).__init__() + # print(C_prev_prev, C_prev, C) + + if reduction_prev: + self.preprocess0 = FactorizedReduce(C_prev_prev, C) + else: + self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0) + self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0) + + if reduction: + op_names, indices = zip(*genotype.reduce) + concat = genotype.reduce_concat + else: + op_names, indices = zip(*genotype.normal) + concat = genotype.normal_concat + self._compile(C, op_names, indices, concat, reduction) + + def _compile(self, C, op_names, indices, concat, reduction): + assert len(op_names) == len(indices) + self._steps = len(op_names) // 2 + self._concat = concat + self.multiplier = len(concat) + + self._ops = nn.ModuleList() + for name, index in zip(op_names, indices): + stride = 2 if reduction and index < 2 else 1 + op = OPS[name](C, stride, True) + self._ops += [op] + self._indices = indices + + def forward(self, s0, s1, drop_prob): + s0 = self.preprocess0(s0) + s1 = self.preprocess1(s1) + + states = [s0, s1] + for i in range(self._steps): + h1 = states[self._indices[2*i]] + h2 = states[self._indices[2*i+1]] + op1 = self._ops[2*i] + op2 = self._ops[2*i+1] + h1 = op1(h1) + h2 = op2(h2) + if self.training and drop_prob > 0.: + if not isinstance(op1, Identity): + h1 = drop_path(h1, drop_prob) + if not isinstance(op2, Identity): + h2 = drop_path(h2, drop_prob) + s = h1 + h2 + states += [s] + return torch.cat([states[i] for i in self._concat], dim=1) + + +class AuxiliaryHeadCIFAR(nn.Module): + + def __init__(self, C, num_classes): + """assuming input size 8x8""" + super(AuxiliaryHeadCIFAR, self).__init__() + self.features = nn.Sequential( + nn.ReLU(inplace=True), + nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False), # image size = 2 x 2 + nn.Conv2d(C, 128, 1, bias=False), + nn.BatchNorm2d(128), + nn.ReLU(inplace=True), + nn.Conv2d(128, 768, 2, bias=False), + nn.BatchNorm2d(768), + nn.ReLU(inplace=True) + ) + self.classifier = nn.Linear(768, num_classes) + + def forward(self, x): + x = self.features(x) + x = self.classifier(x.view(x.size(0),-1)) + return x + + +class AuxiliaryHeadImageNet(nn.Module): + + def __init__(self, C, num_classes): + """assuming input size 14x14""" + super(AuxiliaryHeadImageNet, self).__init__() + self.features = nn.Sequential( + nn.ReLU(inplace=True), + nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False), + nn.Conv2d(C, 128, 1, bias=False), + nn.BatchNorm2d(128), + nn.ReLU(inplace=True), + nn.Conv2d(128, 768, 2, bias=False), + # NOTE: This batchnorm was omitted in my earlier implementation due to a typo. + # Commenting it out for consistency with the experiments in the paper. + # nn.BatchNorm2d(768), + nn.ReLU(inplace=True) + ) + self.classifier = nn.Linear(768, num_classes) + + def forward(self, x): + x = self.features(x) + x = self.classifier(x.view(x.size(0),-1)) + return x + + +from autoPyTorch.components.networks.base_net import BaseImageNet +class NetworkCIFAR(BaseImageNet): + + def __init__(self, C, num_classes, layers, auxiliary, genotype): + #super(NetworkCIFAR, self).__init__() + self._layers = layers + self._auxiliary = auxiliary + + stem_multiplier = 3 + C_curr = stem_multiplier*C + self.stem = nn.Sequential( + nn.Conv2d(3, C_curr, 3, padding=1, bias=False), + nn.BatchNorm2d(C_curr) + ) + + C_prev_prev, C_prev, C_curr = C_curr, C_curr, C + self.cells = nn.ModuleList() + reduction_prev = False + for i in range(layers): + if i in [layers//3, 2*layers//3]: + C_curr *= 2 + reduction = True + else: + reduction = False + cell = Cell(genotype, C_prev_prev, C_prev, C_curr, reduction, reduction_prev) + reduction_prev = reduction + self.cells += [cell] + C_prev_prev, C_prev = C_prev, cell.multiplier*C_curr + if i == 2*layers//3: + C_to_auxiliary = C_prev + + if auxiliary: + self.auxiliary_head = AuxiliaryHeadCIFAR(C_to_auxiliary, num_classes) + self.global_pooling = nn.AdaptiveAvgPool2d(1) + self.classifier = nn.Linear(C_prev, num_classes) + + def forward(self, input): + logits_aux = None + s0 = s1 = self.stem(input) + for i, cell in enumerate(self.cells): + s0, s1 = s1, cell(s0, s1, self.drop_path_prob) + if i == 2*self._layers//3: + if self._auxiliary and self.training: + logits_aux = self.auxiliary_head(s1) + out = self.global_pooling(s1) + logits = self.classifier(out.view(out.size(0),-1)) + return logits#, logits_aux + + + +class NetworkImageNet(BaseImageNet): + + def __init__(self, C, num_classes, layers, auxiliary, genotype): + # super(NetworkImageNet, self).__init__() + self._layers = layers + self._auxiliary = auxiliary + + self.stem0 = nn.Sequential( + nn.Conv2d(3, C // 2, kernel_size=3, stride=2, padding=1, bias=False), + nn.BatchNorm2d(C // 2), + nn.ReLU(inplace=True), + nn.Conv2d(C // 2, C, 3, stride=2, padding=1, bias=False), + nn.BatchNorm2d(C), + ) + + self.stem1 = nn.Sequential( + nn.ReLU(inplace=True), + nn.Conv2d(C, C, 3, stride=2, padding=1, bias=False), + nn.BatchNorm2d(C), + ) + + C_prev_prev, C_prev, C_curr = C, C, C + + self.cells = nn.ModuleList() + reduction_prev = True + for i in range(layers): + if i in [layers // 3, 2 * layers // 3]: + C_curr *= 2 + reduction = True + else: + reduction = False + cell = Cell(genotype, C_prev_prev, C_prev, C_curr, reduction, reduction_prev) + reduction_prev = reduction + self.cells += [cell] + C_prev_prev, C_prev = C_prev, cell.multiplier * C_curr + if i == 2 * layers // 3: + C_to_auxiliary = C_prev + + if auxiliary: + self.auxiliary_head = AuxiliaryHeadImageNet(C_to_auxiliary, num_classes) + self.global_pooling = nn.AdaptiveAvgPool2d((1, 1)) + self.classifier = nn.Linear(C_prev, num_classes) + + def forward(self, input): + logits_aux = None + s0 = self.stem0(input) + s1 = self.stem1(s0) + for i, cell in enumerate(self.cells): + s0, s1 = s1, cell(s0, s1, self.drop_path_prob) + if i == 2 * self._layers // 3: + if self._auxiliary and self.training: + logits_aux = self.auxiliary_head(s1) + out = self.global_pooling(s1) + logits = self.classifier(out.view(out.size(0), -1)) + return logits#, logits_aux + + +from .genotypes import get_gene_from_config +from .darts_worker import DARTSWorker +class DARTSImageNet(NetworkCIFAR): # use cifar10 base as we train ImageNet mostly with 64x64 images + def __init__(self, config, in_features, out_features, final_activation, **kwargs): + super(NetworkCIFAR, self).__init__(config, in_features, out_features, final_activation) + + self.drop_path_prob = config['drop_path_prob'] + topology = {key: config[key] for key in config if ('edge' in key) or ('inputs_node' in key)} + genotype = get_gene_from_config(topology) + super(DARTSImageNet, self).__init__(config['init_channels'], out_features, config['layers'], config['auxiliary'], genotype) + + def forward(self, x): + x = super(DARTSImageNet, self).forward(x) + + if not self.training and self.final_activation is not None: + x = self.final_activation(x) + return x + + @staticmethod + def get_config_space(**kwargs): + return DARTSWorker.get_config_space() + diff --git a/autoPyTorch/components/networks/image/darts/operations.py b/autoPyTorch/components/networks/image/darts/operations.py new file mode 100644 index 000000000..b0c62c575 --- /dev/null +++ b/autoPyTorch/components/networks/image/darts/operations.py @@ -0,0 +1,105 @@ +import torch +import torch.nn as nn + +OPS = { + 'none' : lambda C, stride, affine: Zero(stride), + 'avg_pool_3x3' : lambda C, stride, affine: nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False), + 'max_pool_3x3' : lambda C, stride, affine: nn.MaxPool2d(3, stride=stride, padding=1), + 'skip_connect' : lambda C, stride, affine: Identity() if stride == 1 else FactorizedReduce(C, C, affine=affine), + 'sep_conv_3x3' : lambda C, stride, affine: SepConv(C, C, 3, stride, 1, affine=affine), + 'sep_conv_5x5' : lambda C, stride, affine: SepConv(C, C, 5, stride, 2, affine=affine), + 'sep_conv_7x7' : lambda C, stride, affine: SepConv(C, C, 7, stride, 3, affine=affine), + 'dil_conv_3x3' : lambda C, stride, affine: DilConv(C, C, 3, stride, 2, 2, affine=affine), + 'dil_conv_5x5' : lambda C, stride, affine: DilConv(C, C, 5, stride, 4, 2, affine=affine), + 'conv_7x1_1x7' : lambda C, stride, affine: nn.Sequential( + nn.ReLU(inplace=False), + nn.Conv2d(C, C, (1,7), stride=(1, stride), padding=(0, 3), bias=False), + nn.Conv2d(C, C, (7,1), stride=(stride, 1), padding=(3, 0), bias=False), + nn.BatchNorm2d(C, affine=affine) + ), +} + +class ReLUConvBN(nn.Module): + + def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True): + super(ReLUConvBN, self).__init__() + self.op = nn.Sequential( + nn.ReLU(inplace=False), + nn.Conv2d(C_in, C_out, kernel_size, stride=stride, padding=padding, bias=False), + nn.BatchNorm2d(C_out, affine=affine) + ) + + def forward(self, x): + return self.op(x) + +class DilConv(nn.Module): + + def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, affine=True): + super(DilConv, self).__init__() + self.op = nn.Sequential( + nn.ReLU(inplace=False), + nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=C_in, bias=False), + nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False), + nn.BatchNorm2d(C_out, affine=affine), + ) + + def forward(self, x): + return self.op(x) + + +class SepConv(nn.Module): + + def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True): + super(SepConv, self).__init__() + self.op = nn.Sequential( + nn.ReLU(inplace=False), + nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding, groups=C_in, bias=False), + nn.Conv2d(C_in, C_in, kernel_size=1, padding=0, bias=False), + nn.BatchNorm2d(C_in, affine=affine), + nn.ReLU(inplace=False), + nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=1, padding=padding, groups=C_in, bias=False), + nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False), + nn.BatchNorm2d(C_out, affine=affine), + ) + + def forward(self, x): + return self.op(x) + + +class Identity(nn.Module): + + def __init__(self): + super(Identity, self).__init__() + + def forward(self, x): + return x + + +class Zero(nn.Module): + + def __init__(self, stride): + super(Zero, self).__init__() + self.stride = stride + + def forward(self, x): + if self.stride == 1: + return x.mul(0.) + return x[:,:,::self.stride,::self.stride].mul(0.) + + +class FactorizedReduce(nn.Module): + + def __init__(self, C_in, C_out, affine=True): + super(FactorizedReduce, self).__init__() + assert C_out % 2 == 0 + self.relu = nn.ReLU(inplace=False) + self.conv_1 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False) + self.conv_2 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False) + self.bn = nn.BatchNorm2d(C_out, affine=affine) + + def forward(self, x): + x = self.relu(x) + out = torch.cat([self.conv_1(x), self.conv_2(x[:,:,1:,1:])], dim=1) + out = self.bn(out) + return out + diff --git a/autoPyTorch/components/networks/image/darts/utils.py b/autoPyTorch/components/networks/image/darts/utils.py new file mode 100644 index 000000000..fd5081dbd --- /dev/null +++ b/autoPyTorch/components/networks/image/darts/utils.py @@ -0,0 +1,166 @@ +import os +import numpy as np +import torch +import shutil +import torchvision.transforms as transforms +from torch.autograd import Variable + + +class AvgrageMeter(object): + + def __init__(self): + self.reset() + + def reset(self): + self.avg = 0 + self.sum = 0 + self.cnt = 0 + + def update(self, val, n=1): + self.sum += val * n + self.cnt += n + self.avg = self.sum / self.cnt + + +def accuracy(output, target, topk=(1,)): + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + res = [] + for k in topk: + correct_k = correct[:k].view(-1).float().sum(0) + res.append(correct_k.mul_(100.0/batch_size)) + return res + + +class Cutout(object): + def __init__(self, length): + self.length = length + + def __call__(self, img): + h, w = img.size(1), img.size(2) + mask = np.ones((h, w), np.float32) + y = np.random.randint(h) + x = np.random.randint(w) + + y1 = np.clip(y - self.length // 2, 0, h) + y2 = np.clip(y + self.length // 2, 0, h) + x1 = np.clip(x - self.length // 2, 0, w) + x2 = np.clip(x + self.length // 2, 0, w) + + mask[y1: y2, x1: x2] = 0. + mask = torch.from_numpy(mask) + mask = mask.expand_as(img) + img *= mask + return img + + +def _data_transforms_cifar10(args): + CIFAR_MEAN = [0.49139968, 0.48215827, 0.44653124] + CIFAR_STD = [0.24703233, 0.24348505, 0.26158768] + + train_transform = transforms.Compose([ + transforms.RandomCrop(32, padding=4), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize(CIFAR_MEAN, CIFAR_STD), + ]) + if args.cutout: + train_transform.transforms.append(Cutout(args.cutout_length)) + + valid_transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize(CIFAR_MEAN, CIFAR_STD), + ]) + return train_transform, valid_transform + + +def count_parameters_in_MB(model): + return np.sum(np.prod(v.size()) for v in model.parameters())/1e6 + + +def save_checkpoint(state, is_best, save): + filename = os.path.join(save, 'checkpoint.pth.tar') + torch.save(state, filename) + if is_best: + best_filename = os.path.join(save, 'model_best.pth.tar') + shutil.copyfile(filename, best_filename) + + +def save(model, model_path): + torch.save(model.state_dict(), model_path) + + +def load(model, model_path, genotype): + pretrained_dict = torch.load(model_path) + model_dict = model.state_dict() + + # keep only the weights for the specified genotype, + # and prune all the other weights from the MixedOps + #pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict} + + edge_dict = {(0,2): 0, (0,3): 2, (0,4): 5, (0,5): 9, (1,2): 1, (1,3): 3, (1,4): 6, (1,5): 10, (2,3): 4, (2,4): 7, (3,4): 8, (2,5): 11, (3,5): 12, (4,5): 13} + + for layer in range(8): + first_number = layer + + for p in range(2): + if layer in [3, 6] and p == 0: + key = 'cells.{}.preprocess{}.conv_1.weight'.format(layer, p) + key = 'cells.{}.preprocess{}.conv_2.weight'.format(layer, p) + else: + key = 'cells.{}.preprocess{}.op.1.weight'.format(layer, p) + model_dict[key] = pretrained_dict[key] + + if layer in [2, 5]: + gene = genotype.reduce + else: + gene = genotype.normal + + for i in range(4): + for k in [2*i, 2*i + 1]: + op, j = gene[k] + second_number = edge_dict[(j, i + 2)] + if op == 'sep_conv_3x3': + third_number = 4 + for h in [1, 2, 5, 6]: + key_model = 'cells.{}._ops.{}.op.{}.weight'.format(layer, k, h) + key_pretrained = 'cells.{}._ops.{}._ops.{}.op.{}.weight'.format(first_number, second_number, third_number, h) + model_dict[key_model] = pretrained_dict[key_pretrained] + elif op == 'max_pool_3x3': + third_number = 1 + elif op == 'avg_pool_3x3': + third_number = 2 + + model.load_state_dict(model_dict) + + +def drop_path(x, drop_prob): + if drop_prob > 0.: + keep_prob = 1.-drop_prob + try: + mask = Variable(torch.cuda.FloatTensor(x.size(0), 1, 1, 1).bernoulli_(keep_prob)) + except: + mask = Variable(torch.FloatTensor(x.size(0), 1, 1, 1).bernoulli_(keep_prob)) + x.div_(keep_prob) + x.mul_(mask) + return x + + +def create_exp_dir(path, scripts_to_save=None): + import time, random + time.sleep(random.uniform(1, 2)) + if not os.path.exists(path): + os.mkdir(path) + print('Experiment dir : {}'.format(path)) + + if scripts_to_save is not None: + os.mkdir(os.path.join(path, 'scripts')) + for script in scripts_to_save: + dst_file = os.path.join(path, 'scripts', os.path.basename(script)) + shutil.copyfile(script, dst_file) + diff --git a/autoPyTorch/components/networks/image/densenet.py b/autoPyTorch/components/networks/image/densenet.py new file mode 100644 index 000000000..f0c406cb3 --- /dev/null +++ b/autoPyTorch/components/networks/image/densenet.py @@ -0,0 +1,171 @@ +import re +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.model_zoo as model_zoo +from collections import OrderedDict + +import ConfigSpace +from autoPyTorch.components.networks.base_net import BaseImageNet +from autoPyTorch.utils.config_space_hyperparameter import add_hyperparameter, get_hyperparameter + +from autoPyTorch.components.networks.base_net import BaseImageNet + +class _DenseLayer(nn.Sequential): + def __init__(self, num_input_features, growth_rate, bn_size, drop_rate): + super(_DenseLayer, self).__init__() + self.add_module('norm1', nn.BatchNorm2d(num_input_features)), + self.add_module('relu1', nn.ReLU(inplace=True)), + self.add_module('conv1', nn.Conv2d(num_input_features, bn_size * + growth_rate, kernel_size=1, stride=1, bias=False)), + self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)), + self.add_module('relu2', nn.ReLU(inplace=True)), + self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate, + kernel_size=3, stride=1, padding=1, bias=False)), + self.drop_rate = drop_rate + + def forward(self, x): + new_features = super(_DenseLayer, self).forward(x) + if self.drop_rate > 0: + new_features = F.dropout(new_features, p=self.drop_rate, training=self.training) + return torch.cat([x, new_features], 1) + + +class _DenseBlock(nn.Sequential): + def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate): + super(_DenseBlock, self).__init__() + for i in range(num_layers): + layer = _DenseLayer(num_input_features + i * growth_rate, growth_rate, bn_size, drop_rate) + self.add_module('denselayer%d' % (i + 1), layer) + + +class _Transition(nn.Sequential): + def __init__(self, num_input_features, num_output_features, pool_size): + super(_Transition, self).__init__() + self.add_module('norm', nn.BatchNorm2d(num_input_features)) + self.add_module('relu', nn.ReLU(inplace=True)) + self.add_module('conv', nn.Conv2d(num_input_features, num_output_features, + kernel_size=1, stride=1, bias=False)) + self.add_module('pool', nn.AvgPool2d(kernel_size=pool_size, stride=pool_size)) + + +class DenseNet(BaseImageNet): + r"""Densenet-BC model class, based on + `"Densely Connected Convolutional Networks" `_ + Args: + growth_rate (int) - how many filters to add each layer (`k` in paper) + block_config (list of 4 ints) - how many layers in each pooling block + num_init_features (int) - the number of filters to learn in the first convolution layer + bn_size (int) - multiplicative factor for number of bottle neck layers + (i.e. bn_size * k features in the bottleneck layer) + drop_rate (float) - dropout rate after each dense layer + num_classes (int) - number of classification classes + """ + + def __init__(self, config, in_features, out_features, final_activation, *args, **kwargs): + + super(DenseNet, self).__init__(config, in_features, out_features, final_activation) + + growth_rate = config['growth_rate'] + block_config=[config['layer_in_block_%d' % (i+1)] for i in range(config['blocks'])] + num_init_features = 2 * growth_rate + bn_size = 4 + drop_rate = config['dropout'] if config['use_dropout'] else 0 + num_classes = self.n_classes + + image_size, min_image_size = min(self.iw, self.ih), 1 + + import math + division_steps = math.floor(math.log2(image_size) - math.log2(min_image_size) - 1e-5) + 1 + + if division_steps > len(block_config) + 1: + # First convolution + self.features = nn.Sequential(OrderedDict([ + ('conv0', nn.Conv2d(self.channels, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)), + ('norm0', nn.BatchNorm2d(num_init_features)), + ('relu0', nn.ReLU(inplace=True)), + ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)), + ])) + division_steps -= 2 + else: + self.features = nn.Sequential(OrderedDict([ + ('conv0', nn.Conv2d(self.channels, num_init_features, kernel_size=3, stride=1, padding=1, bias=False)) + ])) + + + # Each denseblock + num_features = num_init_features + for i, num_layers in enumerate(block_config): + block = _DenseBlock(num_layers=num_layers, num_input_features=num_features, + bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate) + self.features.add_module('denseblock%d' % (i + 1), block) + num_features = num_features + num_layers * growth_rate + if i != len(block_config) - 1: + trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2, pool_size=2 if i > len(block_config) - division_steps else 1) + self.features.add_module('transition%d' % (i + 1), trans) + num_features = num_features // 2 + + # Final batch norm + self.features.add_module('last_norm', nn.BatchNorm2d(num_features)) + + # Linear layer + self.classifier = nn.Linear(num_features, num_classes) + + # Official init from torch repo. + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.constant_(m.bias, 0) + + def forward(self, x): + features = self.features(x) + out = F.relu(features, inplace=True) + out = F.adaptive_avg_pool2d(out, (1, 1)).view(features.size(0), -1) + out = self.classifier(out) + + if not self.training and self.final_activation is not None: + out = self.final_activation(out) + return out + + @staticmethod + def get_config_space(growth_rate_range=(12, 40), nr_blocks=(3, 4), layer_range=([1, 12], [6, 24], [12, 64], [12, 64]), num_init_features=(32, 128), **kwargs): + + import ConfigSpace as CS + import ConfigSpace.hyperparameters as CSH + from autoPyTorch.utils.config_space_hyperparameter import add_hyperparameter + + cs = CS.ConfigurationSpace() + growth_rate_hp = get_hyperparameter(ConfigSpace.UniformIntegerHyperparameter, 'growth_rate', growth_rate_range) + cs.add_hyperparameter(growth_rate_hp) + # add_hyperparameter(cs, CSH.UniformFloatHyperparameter, 'bn_size', [2, 4]) + # add_hyperparameter(cs, CSH.UniformIntegerHyperparameter, 'num_init_features', num_init_features, log=True) + # add_hyperparameter(cs, CSH.CategoricalHyperparameter, 'bottleneck', [True, False]) + + blocks_hp = get_hyperparameter(ConfigSpace.UniformIntegerHyperparameter, 'blocks', nr_blocks) + cs.add_hyperparameter(blocks_hp) + use_dropout = add_hyperparameter(cs, CSH.CategoricalHyperparameter, 'use_dropout', [True, False]) + dropout = add_hyperparameter(cs, CSH.UniformFloatHyperparameter, 'dropout', [0.0, 1.0]) + cs.add_condition(CS.EqualsCondition(dropout, use_dropout, True)) + + if type(nr_blocks[0]) == int: + min_blocks = nr_blocks[0] + max_blocks = nr_blocks[1] + else: + min_blocks = nr_blocks[0][0] + max_blocks = nr_blocks[0][1] + + for i in range(1, max_blocks+1): + layer_hp = get_hyperparameter(ConfigSpace.UniformIntegerHyperparameter, 'layer_in_block_%d' % i, layer_range[i-1]) + cs.add_hyperparameter(layer_hp) + + if i > min_blocks: + cs.add_condition(CS.GreaterThanCondition(layer_hp, blocks_hp, i-1)) + + return cs + + + diff --git a/autoPyTorch/components/networks/image/densenet_flexible.py b/autoPyTorch/components/networks/image/densenet_flexible.py new file mode 100644 index 000000000..6f8f13969 --- /dev/null +++ b/autoPyTorch/components/networks/image/densenet_flexible.py @@ -0,0 +1,245 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Implementation of a Dense Net for image data. +""" + +import torch +import torch.nn as nn +import math + +import ConfigSpace +from autoPyTorch.components.networks.base_net import BaseImageNet +from autoPyTorch.utils.config_space_hyperparameter import add_hyperparameter, get_hyperparameter + +import inspect +from autoPyTorch.components.networks.base_net import BaseImageNet +from autoPyTorch.utils.modules import Reshape +from autoPyTorch.components.networks.activations import all_activations, get_activation +from .utils.utils import get_layer_params + +# https://github.com/liuzhuang13/DenseNet + +__author__ = "Michael Burkart" +__version__ = "0.0.1" +__license__ = "BSD" + +import logging +logger = logging.getLogger('autonet') + +class PrintNode(nn.Module): + def __init__(self, msg): + super(PrintNode, self).__init__() + self.msg = msg + + def forward(self, x): + logger.debug(self.msg) + return x + + +class _DenseLayer(nn.Sequential): + def __init__(self, nChannels, growth_rate, drop_rate, bottleneck, kernel_size, activation): + super(_DenseLayer, self).__init__() + # self.add_module('p_layer1', PrintNode("layer1")) + self.add_module('norm1', nn.BatchNorm2d(nChannels)) + self.add_module('relu1', get_activation(activation, inplace=True)) + if bottleneck: + self.add_module('conv1', nn.Conv2d(nChannels, 4 * growth_rate, kernel_size=1, stride=1, bias=False)) + nChannels = 4 * growth_rate + if drop_rate > 0: + self.add_module('drop', nn.Dropout2d(p=drop_rate, inplace=True)) + # self.add_module('p_layer2', PrintNode("layer2")) + self.add_module('norm2', nn.BatchNorm2d(nChannels)) + self.add_module('relu2', get_activation(activation, inplace=True)) + self.add_module('conv2', nn.Conv2d(nChannels, growth_rate, kernel_size=kernel_size, stride=1, padding=int((kernel_size-1)/2), bias=False)) + if drop_rate > 0: + self.add_module('drop', nn.Dropout2d(p=drop_rate, inplace=True)) + + def forward(self, x): + new_features = super(_DenseLayer, self).forward(x) + # logger.debug('concat ' + str(x.shape) + ' and ' + str(new_features.shape)) + return torch.cat([x, new_features], 1) + + +class _DenseBlock(nn.Sequential): + def __init__(self, N, nChannels, growth_rate, drop_rate, bottleneck, kernel_size, activation): + super(_DenseBlock, self).__init__() + for i in range(N): + self.add_module('denselayer%d' % (i + 1), _DenseLayer(nChannels, growth_rate, drop_rate, bottleneck, kernel_size, activation)) + nChannels += growth_rate + + + +class _Transition(nn.Sequential): + def __init__(self, nChannels, nOutChannels, drop_rate, last, pool_size, kernel_size, stride, padding, activation): + super(_Transition, self).__init__() + # self.add_module('p_transition', PrintNode("transition")) + self.add_module('norm', nn.BatchNorm2d(nChannels)) + self.add_module('relu', get_activation(activation, inplace=True)) + # self.add_module('p_last', PrintNode("last transition " + str(last))) + if last: + self.add_module('pool', nn.AvgPool2d(kernel_size=pool_size, stride=pool_size)) + self.add_module('reshape', Reshape(nChannels)) + else: + self.add_module('conv', nn.Conv2d(nChannels, nOutChannels, kernel_size=1, stride=1, bias=False)) + if drop_rate > 0: + self.add_module('drop', nn.Dropout2d(p=drop_rate, inplace=True)) + self.add_module('pool', nn.AvgPool2d(kernel_size=kernel_size, stride=stride, padding=padding)) + + +class DenseNetFlexible(BaseImageNet): + + def __init__(self, config, in_features, out_features, final_activation, *args, **kwargs): + + super(DenseNetFlexible, self).__init__(config, in_features, out_features, final_activation) + + growth_rate=config['growth_rate'] + bottleneck=config['bottleneck'] + channel_reduction=config['channel_reduction'] + + in_size = min(self.iw, self.ih) + out_size = max(1, in_size * config['last_image_size']) + size_reduction = math.pow(in_size / out_size, 1 / (config['blocks'] + 1)) + + nChannels= 2 * growth_rate + + self.features = nn.Sequential() + + sizes = [max(1, round(in_size / math.pow(size_reduction, i+1))) for i in range(config['blocks'] + 2)] + + in_size, kernel_size, stride, padding = get_layer_params(in_size, sizes[0], config['first_conv_kernel']) + self.features.add_module('conv0', nn.Conv2d(self.channels, nChannels, kernel_size=kernel_size, stride=stride, padding=padding, bias=False)) + self.features.add_module('norm0', nn.BatchNorm2d(nChannels)) + self.features.add_module('activ0', get_activation(config['first_activation'], inplace=True)) + + in_size, kernel_size, stride, padding = get_layer_params(in_size, sizes[1], config['first_pool_kernel']) + self.features.add_module('pool0', nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding)) + # print(in_size) + + nOutChannels = nChannels + # Each denseblock + for i in range(1, config['blocks']+1): + nChannels = nOutChannels + + drop_rate = config['dropout_%d' % i] if config['use_dropout'] else 0 + + block = _DenseBlock(N=config['layer_in_block_%d' % i], nChannels=nChannels, bottleneck=bottleneck, + growth_rate=growth_rate, drop_rate=drop_rate, kernel_size=config['conv_kernel_%d' % i], + activation=config['activation_%d' % i]) + + self.features.add_module('denseblock%d' % i, block) + nChannels = nChannels + config['layer_in_block_%d' % i] * growth_rate + nOutChannels = max(1, math.floor(nChannels * channel_reduction)) + + out_size, kernel_size, stride, padding = get_layer_params(in_size, sizes[i+1], config['pool_kernel_%d' % i]) + transition = _Transition( nChannels=nChannels, nOutChannels=nOutChannels, + drop_rate=drop_rate, last=(i == config['blocks']), + pool_size=in_size, # only used in last transition -> reduce to '1x1 image' + kernel_size=kernel_size, stride=stride, padding=padding, + activation=config['activation_%d' % i]) + in_size = out_size + + self.features.add_module('trans%d' % i, transition) + + # Linear layer + self.classifier = nn.Linear(nChannels, out_features) + + # Official init from torch repo. + for m in self.modules(): + if isinstance(m, nn.Conv2d): + self.matrix_init(m.weight, config['conv_init']) + elif isinstance(m, nn.BatchNorm2d): + self.matrix_init(m.weight, config['batchnorm_weight_init']) + self.matrix_init(m.bias, config['batchnorm_bias_init']) + elif isinstance(m, nn.Linear): + self.matrix_init(m.bias, config['linear_bias_init']) + + # logger.debug(print(self)) + + def matrix_init(self, matrix, init_type): + if init_type == 'kaiming_normal': + nn.init.kaiming_normal_(matrix) + elif init_type == 'constant_0': + nn.init.constant_(matrix, 0) + elif init_type == 'constant_1': + nn.init.constant_(matrix, 1) + elif init_type == 'constant_05': + nn.init.constant_(matrix, 0.5) + elif init_type == 'random': + return + else: + raise ValueError('Init type ' + init_type + ' is not supported') + + + def forward(self, x): + out = self.features(x) + out = self.classifier(out) + if not self.training and self.final_activation is not None: + out = self.final_activation(out) + return out + + @staticmethod + def get_config_space( growth_rate_range=(5, 128), nr_blocks=(1, 5), kernel_range=(2, 7), + layer_range=(5, 50), activations=all_activations.keys(), + conv_init=('random', 'kaiming_normal', 'constant_0', 'constant_1', 'constant_05'), + batchnorm_weight_init=('random', 'constant_0', 'constant_1', 'constant_05'), + batchnorm_bias_init=('random', 'constant_0', 'constant_1', 'constant_05'), + linear_bias_init=('random', 'constant_0', 'constant_1', 'constant_05'), **kwargs): + + import ConfigSpace as CS + import ConfigSpace.hyperparameters as CSH + from autoPyTorch.utils.config_space_hyperparameter import add_hyperparameter + + cs = CS.ConfigurationSpace() + growth_rate_hp = get_hyperparameter(ConfigSpace.UniformIntegerHyperparameter, 'growth_rate', growth_rate_range) + first_conv_kernel_hp = get_hyperparameter(ConfigSpace.UniformIntegerHyperparameter, 'first_conv_kernel', kernel_range) + first_pool_kernel_hp = get_hyperparameter(ConfigSpace.UniformIntegerHyperparameter, 'first_pool_kernel', kernel_range) + conv_init_hp = get_hyperparameter(ConfigSpace.CategoricalHyperparameter, 'conv_init', conv_init) + batchnorm_weight_init_hp = get_hyperparameter(ConfigSpace.CategoricalHyperparameter, 'batchnorm_weight_init', batchnorm_weight_init) + batchnorm_bias_init_hp = get_hyperparameter(ConfigSpace.CategoricalHyperparameter, 'batchnorm_bias_init', batchnorm_bias_init) + linear_bias_init_hp = get_hyperparameter(ConfigSpace.CategoricalHyperparameter, 'linear_bias_init', linear_bias_init) + first_activation_hp = get_hyperparameter(ConfigSpace.CategoricalHyperparameter, 'first_activation', set(activations).intersection(all_activations)) + blocks_hp = get_hyperparameter(ConfigSpace.UniformIntegerHyperparameter, 'blocks', nr_blocks) + + cs.add_hyperparameter(growth_rate_hp) + cs.add_hyperparameter(first_conv_kernel_hp) + cs.add_hyperparameter(first_pool_kernel_hp) + cs.add_hyperparameter(conv_init_hp) + cs.add_hyperparameter(batchnorm_weight_init_hp) + cs.add_hyperparameter(batchnorm_bias_init_hp) + cs.add_hyperparameter(linear_bias_init_hp) + cs.add_hyperparameter(first_activation_hp) + cs.add_hyperparameter(blocks_hp) + add_hyperparameter(cs, CSH.UniformFloatHyperparameter, 'channel_reduction', [0.1, 0.9]) + add_hyperparameter(cs, CSH.UniformFloatHyperparameter, 'last_image_size', [0, 1]) + add_hyperparameter(cs, CSH.CategoricalHyperparameter, 'bottleneck', [True, False]) + use_dropout = add_hyperparameter(cs, CSH.CategoricalHyperparameter, 'use_dropout', [True, False]) + + if type(nr_blocks[0]) == int: + min_blocks = nr_blocks[0] + max_blocks = nr_blocks[1] + else: + min_blocks = nr_blocks[0][0] + max_blocks = nr_blocks[0][1] + + for i in range(1, max_blocks+1): + layer_hp = get_hyperparameter(ConfigSpace.UniformIntegerHyperparameter, 'layer_in_block_%d' % i, layer_range) + pool_kernel_hp = get_hyperparameter(ConfigSpace.UniformIntegerHyperparameter, 'pool_kernel_%d' % i, kernel_range) + activation_hp = get_hyperparameter(ConfigSpace.CategoricalHyperparameter, 'activation_%d' % i, set(activations).intersection(all_activations)) + cs.add_hyperparameter(layer_hp) + cs.add_hyperparameter(pool_kernel_hp) + cs.add_hyperparameter(activation_hp) + dropout = add_hyperparameter(cs, CSH.UniformFloatHyperparameter, 'dropout_%d' % i, [0.0, 1.0]) + conv_kernel = add_hyperparameter(cs, CSH.CategoricalHyperparameter, 'conv_kernel_%d' % i, [3, 5, 7]) + + + if i > min_blocks: + cs.add_condition(CS.GreaterThanCondition(layer_hp, blocks_hp, i-1)) + cs.add_condition(CS.GreaterThanCondition(conv_kernel, blocks_hp, i-1)) + cs.add_condition(CS.GreaterThanCondition(pool_kernel_hp, blocks_hp, i-1)) + cs.add_condition(CS.GreaterThanCondition(activation_hp, blocks_hp, i-1)) + cs.add_condition(CS.AndConjunction(CS.EqualsCondition(dropout, use_dropout, True), CS.GreaterThanCondition(dropout, blocks_hp, i-1))) + else: + cs.add_condition(CS.EqualsCondition(dropout, use_dropout, True)) + + return cs diff --git a/autoPyTorch/components/networks/image/mobilenet.py b/autoPyTorch/components/networks/image/mobilenet.py new file mode 100644 index 000000000..a2190b1a3 --- /dev/null +++ b/autoPyTorch/components/networks/image/mobilenet.py @@ -0,0 +1,258 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +import math + +import ConfigSpace +from autoPyTorch.components.networks.base_net import BaseImageNet +from autoPyTorch.utils.config_space_hyperparameter import add_hyperparameter, get_hyperparameter + +from torch.autograd import Variable +from autoPyTorch.components.networks.base_net import BaseImageNet + +from .utils.mobilenet_utils import GenEfficientNet, _decode_arch_def, _resolve_bn_args, _round_channels, swish, sigmoid, hard_swish, hard_sigmoid, SelectAdaptivePool2d + +# TODO +# EXPANSION RATIO (currenty hardcoded) +# ACTIVATION? (currently swish) + +class Arch_Encoder(): + """ Encode block definition string + Encodes a list of config space (dicts) through a string notation of arguments for further usage with _decode_architecure and timm. + E.g. ir_r2_k3_s2_e1_i32_o16_se0.25_noskip + + leading string - block type ( + ir = InvertedResidual, ds = DepthwiseSep, dsa = DeptwhiseSep with pw act, cn = ConvBnAct) + r - number of repeat blocks, + k - kernel size, + s - strides (1-9), + e - expansion ratio, + c - output channels, + se - squeeze/excitation ratio + n - activation fn ('re', 'r6', 'hs', or 'sw') + Args: + block hyperpar dict as coming from MobileNet class + Returns: + Architecture encoded as string for further usage with _decode_architecure and timm. + """ + + def __init__(self, block_types, nr_sub_blocks, kernel_sizes, strides, output_filters, se_ratios, skip_connections, expansion_rates=0): + self.block_types = block_types + self.nr_sub_blocks = nr_sub_blocks + self.kernel_sizes = kernel_sizes + self.strides = strides + self.expansion_rates = expansion_rates + self.output_filters = output_filters + self.se_ratios = se_ratios + self.skip_connections = skip_connections + + self.arch_encoded = [[""] for ind in range(len(self.block_types))] + self._encode_architecture() + + def _encode_architecture(self): + encoding_functions = [self._get_encoded_blocks, self._get_encoded_nr_sub_bocks, self._get_encoded_kernel_sizes, self._get_encoded_strides, + self._get_encoded_expansion_rates , self._get_encoded_output_filters, self._get_encoded_se_ratios, self._get_encoded_skip_connections] + + for func in encoding_functions: + return_val = func() + self._add_specifications(return_val) + + def _add_specifications(self, arguments): + for ind, arg in enumerate(arguments): + if len(self.arch_encoded[ind][0])!=0 and arg!="" and not self.arch_encoded[ind][0].endswith("_") : + self.arch_encoded[ind][0] = self.arch_encoded[ind][0] + "_" + self.arch_encoded[ind][0] = self.arch_encoded[ind][0] + arg + + def _get_encoded_blocks(self): + block_type_dict = {"inverted_residual":"ir", "dwise_sep_conv":"ds", "conv_bn_act":"cn"} + block_type_list = self._dict_to_list(self.block_types) + return [block_type_dict[item] for item in block_type_list] + + def _get_encoded_nr_sub_bocks(self): + nr_sub_blocks_dict = dict([(i, "r"+str(i)) for i in range(10)]) + nr_sub_blocks_list = self._dict_to_list(self.nr_sub_blocks) + return [nr_sub_blocks_dict[item] for item in nr_sub_blocks_list] + + def _get_encoded_kernel_sizes(self): + kernel_sizes_dict = dict([(i, "k"+str(i)) for i in range(10)]) + kernel_sizes_list = self._dict_to_list(self.kernel_sizes) + return [kernel_sizes_dict[item] for item in kernel_sizes_list] + + def _get_encoded_strides(self): + strides_dict = dict([(i, "s"+str(i)) for i in range(10)]) + strides_list = self._dict_to_list(self.strides) + return [strides_dict[item] for item in strides_list] + + def _get_encoded_expansion_rates(self): + if self.expansion_rates == 0: + exp_list = ["e1","e6","e6","e6","e6","e6","e6"] + return exp_list[0:len(self.block_types)] + else: + expansion_rates_dict = dict([(i, "e"+str(i)) for i in range(10)]) + expansion_rates_list = self._dict_to_list(self.expansion_rates) + return [expansion_rates_dict[item] for item in expansion_rates_list] + + def _get_encoded_output_filters(self): + output_filters_dict = dict([(i, "c"+str(i)) for i in range(5000)]) + output_filters_list = self._dict_to_list(self.output_filters) + return [output_filters_dict[item] for item in output_filters_list] + + def _get_encoded_se_ratios(self): + se_ratios_dict = {0:"", 0.25:"se0.25"} + se_ratios_list = self._dict_to_list(self.se_ratios) + return [se_ratios_dict[item] for item in se_ratios_list] + + def _get_encoded_skip_connections(self): + skip_connections_dict = {True : "", False: "no_skip"} + skip_connections_list = self._dict_to_list(self.skip_connections) + return [skip_connections_dict[item] for item in skip_connections_list] + + def _dict_to_list(self, input_dict): + output_list = [] + dict_len = len(input_dict) + for ind in range(dict_len): + output_list.append(input_dict["Group_" + str(ind+1)]) + return output_list + + def get_encoded_architecture(self): + return self.arch_encoded + + +class MobileNet(BaseImageNet): + """ + Implements a search space as in MnasNet (https://arxiv.org/abs/1807.11626) using inverted residuals. + """ + def __init__(self, config, in_features, out_features, final_activation, **kwargs): + super(MobileNet, self).__init__(config, in_features, out_features, final_activation) + + # Initialize hyperpars for architecture + nn.Module.config = config + self.final_activation = final_activation + self.nr_main_blocks = config['nr_main_blocks'] + self.initial_filters = config['initial_filters'] + + + self.nr_sub_blocks = dict([ + ('Group_%d' % (i+1), config['nr_sub_blocks_%i' % (i+1)]) + for i in range(self.nr_main_blocks)]) + + self.op_types = dict([ + ('Group_%d' % (i+1), config['op_type_%i' % (i+1)]) + for i in range(self.nr_main_blocks)]) + + self.kernel_sizes = dict([ + ('Group_%d' % (i+1), config['kernel_size_%i' % (i+1)]) + for i in range(self.nr_main_blocks)]) + + self.strides = dict([ + ('Group_%d' % (i+1), config['stride_%i' % (i+1)]) + for i in range(self.nr_main_blocks)]) + + self.output_filters = dict([ + ('Group_%d' % (i+1), config['out_filters_%i' % (i+1)]) + for i in range(self.nr_main_blocks)]) + + self.skip_cons = dict([ + ('Group_%d' % (i+1), config['skip_con_%i' % (i+1)]) + for i in range(self.nr_main_blocks)]) + + self.se_ratios = dict([ + ('Group_%d' % (i+1), config['se_ratio_%i' % (i+1)]) + for i in range(self.nr_main_blocks)]) + + + ########## Create model + encoder = Arch_Encoder(block_types=self.op_types, + nr_sub_blocks=self.nr_sub_blocks, + kernel_sizes=self.kernel_sizes, + strides=self.strides, + expansion_rates=0, + output_filters=self.output_filters, + se_ratios=self.se_ratios, + skip_connections=self.skip_cons) + arch_enc = encoder.get_encoded_architecture() + + kwargs["bn_momentum"] = 0.01 + + self.model = GenEfficientNet(_decode_arch_def(arch_enc, depth_multiplier=1.0), + num_classes=out_features, + stem_size=self.initial_filters, + channel_multiplier=1.0, + num_features=_round_channels(1280, 1.0, 8, None), + bn_args=_resolve_bn_args(kwargs), + act_fn=swish, + drop_connect_rate=0.2, + drop_rate=0.2, + **kwargs) + + def _cfg(url='', **kwargs): + return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': (0.485, 0.456, 0.406), 'std': (0.229, 0.224, 0.225), + 'first_conv': 'conv_stem', 'classifier': 'classifier', **kwargs} + + self.model.default_cfg = _cfg(url='', input_size=in_features, pool_size=(10, 10), crop_pct=0.904, num_classes=out_features) + + def forward(self, x): + # make sure channels first + x = self.model(x) + if not self.training and self.final_activation is not None: + x = self.final_activation(x) + return x + + @staticmethod + def get_config_space( nr_main_blocks=[3, 7], initial_filters=([8, 32], True), nr_sub_blocks=([1, 4], False), + op_types = ["inverted_residual", "dwise_sep_conv"], kernel_sizes=[3, 5], strides=[1,2], + output_filters = [[12, 16, 20], + [18, 24, 30], + [24, 32, 40], + [48, 64, 80], + [72, 96, 120], + [120, 160, 200], + [240, 320, 400]], # the idea is to search for e.g. 0.75, 1, 1.25* output_filters(mainblock number) + skip_connection = [True, False], se_ratios = [0, 0.25], **kwargs): + + import ConfigSpace as CS + import ConfigSpace.hyperparameters as CSH + + cs = CS.ConfigurationSpace() + + + main_blocks_hp = get_hyperparameter(ConfigSpace.UniformIntegerHyperparameter, "nr_main_blocks", nr_main_blocks) + initial_filters_hp = get_hyperparameter(ConfigSpace.UniformIntegerHyperparameter, "initial_filters", initial_filters) + cs.add_hyperparameter(main_blocks_hp) + cs.add_hyperparameter(initial_filters_hp) + + if type(nr_main_blocks[0]) == int: + min_blocks = nr_main_blocks[0] + max_blocks = nr_main_blocks[1] + else: + min_blocks = nr_main_blocks[0][0] + max_blocks = nr_main_blocks[0][1] + + for i in range(1, max_blocks + 1): + sub_blocks_hp = get_hyperparameter(ConfigSpace.UniformIntegerHyperparameter, 'nr_sub_blocks_%d' % i, nr_sub_blocks) + op_type_hp = get_hyperparameter(ConfigSpace.CategoricalHyperparameter, 'op_type_%d' % i, op_types) + kernel_size_hp = get_hyperparameter(ConfigSpace.CategoricalHyperparameter, 'kernel_size_%d' % i, kernel_sizes) + stride_hp = get_hyperparameter(ConfigSpace.CategoricalHyperparameter, 'stride_%d' % i, strides) + out_filters_hp = get_hyperparameter(ConfigSpace.CategoricalHyperparameter, 'out_filters_%d' % i, output_filters[i-1]) # take output_filters list i-1 as options + se_ratio_hp = get_hyperparameter(ConfigSpace.CategoricalHyperparameter, 'se_ratio_%d' % i, se_ratios) + cs.add_hyperparameter(sub_blocks_hp) + cs.add_hyperparameter(op_type_hp) + cs.add_hyperparameter(kernel_size_hp) + cs.add_hyperparameter(stride_hp) + cs.add_hyperparameter(out_filters_hp) + cs.add_hyperparameter(se_ratio_hp) + skip_con = cs.add_hyperparameter(CSH.CategoricalHyperparameter('skip_con_%d' % i, [True, False])) + + if i > min_blocks: + cs.add_condition(CS.GreaterThanCondition(sub_blocks_hp, main_blocks_hp, i-1)) + cs.add_condition(CS.GreaterThanCondition(op_type_hp, main_blocks_hp, i-1)) + cs.add_condition(CS.GreaterThanCondition(kernel_size_hp, main_blocks_hp, i-1)) + cs.add_condition(CS.GreaterThanCondition(stride_hp, main_blocks_hp, i-1)) + cs.add_condition(CS.GreaterThanCondition(out_filters_hp, main_blocks_hp, i-1)) + cs.add_condition(CS.GreaterThanCondition(skip_con, main_blocks_hp, i-1)) + cs.add_condition(CS.GreaterThanCondition(se_ratio_hp, main_blocks_hp, i-1)) + + return cs diff --git a/autoPyTorch/components/networks/image/resnet.py b/autoPyTorch/components/networks/image/resnet.py new file mode 100644 index 000000000..27981911b --- /dev/null +++ b/autoPyTorch/components/networks/image/resnet.py @@ -0,0 +1,292 @@ +import math +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable + +import ConfigSpace +from autoPyTorch.components.networks.base_net import BaseImageNet +from autoPyTorch.utils.config_space_hyperparameter import add_hyperparameter, get_hyperparameter + +from autoPyTorch.components.networks.image.utils.utils import initialize_weights +from autoPyTorch.components.networks.image.utils.shakeshakeblock import shake_shake, generate_alpha_beta +from autoPyTorch.components.networks.image.utils.shakedrop import shake_drop, generate_alpha_beta_single + +class SkipConnection(nn.Module): + def __init__(self, in_channels, out_channels, stride): + super(SkipConnection, self).__init__() + + self.s1 = nn.Sequential() + self.s1.add_module('Skip_1_AvgPool', + nn.AvgPool2d(1, stride=stride)) + self.s1.add_module('Skip_1_Conv', + nn.Conv2d(in_channels, + int(out_channels / 2), + kernel_size=1, + stride=1, + padding=0, + bias=False)) + + self.s2 = nn.Sequential() + self.s2.add_module('Skip_2_AvgPool', + nn.AvgPool2d(1, stride=stride)) + self.s2.add_module('Skip_2_Conv', + nn.Conv2d(in_channels, + int(out_channels / 2) if out_channels % 2 == 0 else int(out_channels / 2) + 1, + kernel_size=1, + stride=1, + padding=0, + bias=False)) + + self.batch_norm = nn.BatchNorm2d(out_channels) + + def forward(self, x): + out1 = F.relu(x, inplace=False) + out1 = self.s1(out1) + + out2 = F.pad(x[:, :, 1:, 1:], (0, 1, 0, 1)) + out2 = self.s2(out2) + + out = torch.cat([out1, out2], dim=1) + out = self.batch_norm(out) + + return out + + +class ResidualBranch(nn.Module): + def __init__(self, in_channels, out_channels, filter_size, stride, branch_index): + super(ResidualBranch, self).__init__() + + self.residual_branch = nn.Sequential() + + self.residual_branch.add_module('Branch_{}:ReLU_1'.format(branch_index), + nn.ReLU(inplace=False)) + self.residual_branch.add_module('Branch_{}:Conv_1'.format(branch_index), + nn.Conv2d(in_channels, + out_channels, + kernel_size=filter_size, + stride=stride, + padding=round(filter_size / 3), + bias=False)) + self.residual_branch.add_module('Branch_{}:BN_1'.format(branch_index), + nn.BatchNorm2d(out_channels)) + self.residual_branch.add_module('Branch_{}:ReLU_2'.format(branch_index), + nn.ReLU(inplace=False)) + self.residual_branch.add_module('Branch_{}:Conv_2'.format(branch_index), + nn.Conv2d(out_channels, + out_channels, + kernel_size=filter_size, + stride=1, + padding=round(filter_size / 3), + bias=False)) + self.residual_branch.add_module('Branch_{}:BN_2'.format(branch_index), + nn.BatchNorm2d(out_channels)) + + def forward(self, x): + return self.residual_branch(x) + + +class BasicBlock(nn.Module): + def __init__(self, n_input_plane, n_output_plane, filter_size, res_branches, stride, shake_config): + super(BasicBlock, self).__init__() + + self.shake_config = shake_config + self.branches = nn.ModuleList([ResidualBranch(n_input_plane, n_output_plane, filter_size, stride, branch + 1) for branch in range(res_branches)]) + + # Skip connection + self.skip = nn.Sequential() + if n_input_plane != n_output_plane or stride != 1: + self.skip.add_module('Skip_connection', + SkipConnection(n_input_plane, n_output_plane, stride)) + + + def forward(self, x): + if len(self.branches) == 1: + out = self.branches[0](x) + if self.config.apply_shakeDrop: + alpha, beta = generate_alpha_beta_single(out.size(), self.shake_config if self.training else (False, False, False), x.is_cuda) + out = shake_drop(out, alpha, beta, self.config.death_rate, self.training) + else: + if self.config.apply_shakeShake: + alpha, beta = generate_alpha_beta(len(self.branches), x.size(0), self.shake_config if self.training else (False, False, False), x.is_cuda) + branches = [self.branches[i](x) for i in range(len(self.branches))] + out = shake_shake(alpha, beta, *branches) + else: + out = sum([self.branches[i](x) for i in range(len(self.branches))]) + + return out + self.skip(x) + + +class ResidualGroup(nn.Module): + def __init__(self, block, n_input_plane, n_output_plane, n_blocks, filter_size, res_branches, stride, shake_config): + super(ResidualGroup, self).__init__() + self.group = nn.Sequential() + self.n_blocks = n_blocks + + # The first residual block in each group is responsible for the input downsampling + self.group.add_module('Block_1', + block(n_input_plane, + n_output_plane, + filter_size, + res_branches, + stride=stride, + shake_config=shake_config)) + + # The following residual block do not perform any downsampling (stride=1) + for block_index in range(2, n_blocks + 1): + block_name = 'Block_{}'.format(block_index) + self.group.add_module(block_name, + block(n_output_plane, + n_output_plane, + filter_size, + res_branches, + stride=1, + shake_config=shake_config)) + + def forward(self, x): + return self.group(x) + + +class ResNet(BaseImageNet): + def __init__(self, config, in_features, out_features, final_activation, **kwargs): + super(ResNet, self).__init__(config, in_features, out_features, final_activation) + + nn.Module.config = config + self.final_activation = final_activation + self.nr_main_blocks = config['nr_main_blocks'] + config.initial_filters = config['initial_filters'] + config.death_rate = config['death_rate'] + + config.forward_shake = True + config.backward_shake = True + config.shake_image = True + config.apply_shakeDrop = True + config.apply_shakeShake = True + + self.nr_residual_blocks = dict([ + ('Group_%d' % (i+1), config['nr_residual_blocks_%i' % (i+1)]) + for i in range(self.nr_main_blocks)]) + + self.widen_factors = dict([ + ('Group_%d' % (i+1), config['widen_factor_%i' % (i+1)]) + for i in range(self.nr_main_blocks)]) + + self.res_branches = dict([ + ('Group_%d' % (i+1), config['res_branches_%i' % (i+1)]) + for i in range(self.nr_main_blocks)]) + + self.filters_size = dict([ + ('Group_%d' % (i+1), 3) #config['filters_size_%i' % (i+1)]) + for i in range(self.nr_main_blocks)]) + + shake_config = (config.forward_shake, config.backward_shake, + config.shake_image) + + ########## + self.model = nn.Sequential() + + # depth = sum([config.nr_convs * self.nr_residual_blocks['Group_{}'.format(i)] + 2 for i in range(1, self.nr_main_blocks + 1)]) + # print(' | Multi-branch ResNet-' + str(depth) + ' CIFAR-10') + + block = BasicBlock + + im_size = max(self.ih, self.iw) + + self.model.add_module('Conv_0', + nn.Conv2d(self.channels, + config.initial_filters, + kernel_size=7 if im_size > 200 else 3, + stride=2 if im_size > 200 else 1, + padding=3 if im_size > 200 else 1, + bias=False)) + self.model.add_module('BN_0', + nn.BatchNorm2d(config.initial_filters)) + + if im_size > 200: + self.model.add_module('ReLU_0', nn.ReLU(inplace=True)) + self.model.add_module('Pool_0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) + + feature_maps_in = int(round(config.initial_filters * self.widen_factors['Group_1'])) + self.model.add_module('Group_1', + ResidualGroup(block, + config.initial_filters, + feature_maps_in, + self.nr_residual_blocks['Group_1'], + self.filters_size['Group_1'], + self.res_branches['Group_1'], + 1, #2 if im_size > 100 else 1, + shake_config)) + + # image_size, min_image_size = min(self.iw, self.ih), 5 + # division_steps = math.floor(math.log2(image_size) - math.log2(min_image_size) - 1e-5) + + for main_block_nr in range(2, self.nr_main_blocks + 1): + feature_maps_out = int(round(feature_maps_in * self.widen_factors['Group_{}'.format(main_block_nr)])) + self.model.add_module('Group_{}'.format(main_block_nr), + ResidualGroup(block, + feature_maps_in, + feature_maps_out, + self.nr_residual_blocks['Group_{}'.format(main_block_nr)], + self.filters_size['Group_{}'.format(main_block_nr)], + self.res_branches['Group_{}'.format(main_block_nr)], + 2, # if main_block_nr > self.nr_main_blocks - division_steps else 1, + shake_config)) + + #image_size = math.floor((image_size+1)/2.0) if main_block_nr > self.nr_main_blocks - division_steps else image_size + feature_maps_in = feature_maps_out + + self.feature_maps_out = feature_maps_in + self.model.add_module('ReLU_0', nn.ReLU(inplace=True)) + self.model.add_module('AveragePool', nn.AdaptiveAvgPool2d(1)) + self.fc = nn.Linear(self.feature_maps_out, out_features) + + self.apply(initialize_weights) + + def forward(self, x): + x = self.model(x) + x = x.view(-1, self.feature_maps_out) + x = self.fc(x) + if not self.training and self.final_activation is not None: + x = self.final_activation(x) + return x + + @staticmethod + def get_config_space( nr_main_blocks=[1, 8], nr_residual_blocks=([1, 16], True), initial_filters=([8, 32], True), widen_factor=([0.5, 4], True), + res_branches=([1, 5], False), filters_size=[3, 3], **kwargs): + + import ConfigSpace as CS + import ConfigSpace.hyperparameters as CSH + + cs = CS.ConfigurationSpace() + + nr_main_blocks_hp = get_hyperparameter(ConfigSpace.UniformIntegerHyperparameter, "nr_main_blocks", nr_main_blocks) + cs.add_hyperparameter(nr_main_blocks_hp) + initial_filters_hp = get_hyperparameter(ConfigSpace.UniformIntegerHyperparameter, "initial_filters", initial_filters) + cs.add_hyperparameter(initial_filters_hp) + # add_hyperparameter(cs, CSH.UniformIntegerHyperparameter, 'nr_convs', nr_convs, log=True) + death_rate_hp = get_hyperparameter(ConfigSpace.UniformFloatHyperparameter, "death_rate", ([0,1], False)) + cs.add_hyperparameter(death_rate_hp) + + if type(nr_main_blocks[0]) is int: + main_blocks_min = nr_main_blocks[0] + main_blocks_max = nr_main_blocks[1] + else: + main_blocks_min = nr_main_blocks[0][0] + main_blocks_max = nr_main_blocks[0][1] + + for i in range(1, main_blocks_max + 1): + blocks_hp = get_hyperparameter(ConfigSpace.UniformIntegerHyperparameter, 'nr_residual_blocks_%d' % i, nr_residual_blocks) + blocks = cs.add_hyperparameter(blocks_hp) + widen_hp = get_hyperparameter(ConfigSpace.UniformFloatHyperparameter, 'widen_factor_%d' % i, widen_factor) + widen = cs.add_hyperparameter(widen_hp) + branches_hp = get_hyperparameter(ConfigSpace.UniformIntegerHyperparameter, 'res_branches_%d' % i, res_branches) + branches = cs.add_hyperparameter(branches_hp) + # filters = add_hyperparameter(cs, CSH.UniformIntegerHyperparameter, 'filters_size_%d' % i, filters_size, log=False) + + if i > main_blocks_min: + cs.add_condition(CS.GreaterThanCondition(blocks_hp, nr_main_blocks_hp, i-1)) + cs.add_condition(CS.GreaterThanCondition(widen_hp, nr_main_blocks_hp, i-1)) + cs.add_condition(CS.GreaterThanCondition(branches_hp, nr_main_blocks_hp, i-1)) + # cs.add_condition(CS.GreaterThanCondition(filters, main_blocks, i-1)) + + return cs diff --git a/autoPyTorch/components/networks/image/resnet152.py b/autoPyTorch/components/networks/image/resnet152.py new file mode 100644 index 000000000..68748ad75 --- /dev/null +++ b/autoPyTorch/components/networks/image/resnet152.py @@ -0,0 +1,190 @@ + +import torch.nn as nn +import torch.utils.model_zoo as model_zoo + + +__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', + 'resnet152'] + + +model_urls = { + 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', + 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', + 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', + 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', + 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', +} + + +def conv3x3(in_planes, out_planes, stride=1): + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=1, bias=False) + + +def conv1x1(in_planes, out_planes, stride=1): + """1x1 convolution""" + return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(BasicBlock, self).__init__() + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = nn.BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = nn.BatchNorm2d(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(Bottleneck, self).__init__() + self.conv1 = conv1x1(inplanes, planes) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = conv3x3(planes, planes, stride) + self.bn2 = nn.BatchNorm2d(planes) + self.conv3 = conv1x1(planes, planes * self.expansion) + self.bn3 = nn.BatchNorm2d(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +from autoPyTorch.components.networks.base_net import BaseImageNet +class ResNet(BaseImageNet): + + def __init__(self, block, layers, num_classes=1000, zero_init_residual=False): + self.inplanes = 64 + self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, + bias=False) + self.bn1 = nn.BatchNorm2d(64) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2) + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = nn.Linear(512 * block.expansion, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + # Zero-initialize the last BN in each residual branch, + # so that the residual branch starts with zeros, and each residual block behaves like an identity. + # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 + if zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + nn.init.constant_(m.bn3.weight, 0) + elif isinstance(m, BasicBlock): + nn.init.constant_(m.bn2.weight, 0) + + def _make_layer(self, block, planes, blocks, stride=1): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + nn.BatchNorm2d(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample)) + self.inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes)) + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.fc(x) + + return x + + +class ResNet152(ResNet): + def __init__(self, config, in_features, out_features, final_activation, **kwargs): + super(ResNet, self).__init__(config, in_features, out_features, final_activation) + super(ResNet152, self).__init__(Bottleneck, [3, 8, 36, 3], num_classes=out_features) + + + def forward(self, x): + x = super(ResNet152, self).forward(x) + + if not self.training and self.final_activation is not None: + x = self.final_activation(x) + return x + + +# def resnet152(pretrained=False, **kwargs): +# """Constructs a ResNet-152 model. +# Args: +# pretrained (bool): If True, returns a model pre-trained on ImageNet +# """ +# model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs) +# if pretrained: +# model.load_state_dict(model_zoo.load_url(model_urls['resnet152'])) +# return model diff --git a/autoPyTorch/components/networks/image/utils/__init__.py b/autoPyTorch/components/networks/image/utils/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/autoPyTorch/components/networks/image/utils/conv2d_helpers.py b/autoPyTorch/components/networks/image/utils/conv2d_helpers.py new file mode 100644 index 000000000..75801c374 --- /dev/null +++ b/autoPyTorch/components/networks/image/utils/conv2d_helpers.py @@ -0,0 +1,135 @@ +# Copyright 2019 Ross Wightman +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + + +import torch +import torch.nn as nn +import torch.nn.functional as F +import math + + +def _is_static_pad(kernel_size, stride=1, dilation=1, **_): + return stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0 + + +def _get_padding(kernel_size, stride=1, dilation=1, **_): + padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2 + return padding + + +def _calc_same_pad(i, k, s, d): + return max((math.ceil(i / s) - 1) * s + (k - 1) * d + 1 - i, 0) + + +def _split_channels(num_chan, num_groups): + split = [num_chan // num_groups for _ in range(num_groups)] + split[0] += num_chan - sum(split) + return split + + +class Conv2dSame(nn.Conv2d): + """ Tensorflow like 'SAME' convolution wrapper for 2D convolutions + """ + def __init__(self, in_channels, out_channels, kernel_size, stride=1, + padding=0, dilation=1, groups=1, bias=True): + super(Conv2dSame, self).__init__( + in_channels, out_channels, kernel_size, stride, 0, dilation, + groups, bias) + + def forward(self, x): + ih, iw = x.size()[-2:] + kh, kw = self.weight.size()[-2:] + pad_h = _calc_same_pad(ih, kh, self.stride[0], self.dilation[0]) + pad_w = _calc_same_pad(iw, kw, self.stride[1], self.dilation[1]) + if pad_h > 0 or pad_w > 0: + x = F.pad(x, [pad_w//2, pad_w - pad_w//2, pad_h//2, pad_h - pad_h//2]) + return F.conv2d(x, self.weight, self.bias, self.stride, + self.padding, self.dilation, self.groups) + + +def conv2d_pad(in_chs, out_chs, kernel_size, **kwargs): + padding = kwargs.pop('padding', '') + kwargs.setdefault('bias', False) + if isinstance(padding, str): + # for any string padding, the padding will be calculated for you, one of three ways + padding = padding.lower() + if padding == 'same': + # TF compatible 'SAME' padding, has a performance and GPU memory allocation impact + if _is_static_pad(kernel_size, **kwargs): + # static case, no extra overhead + padding = _get_padding(kernel_size, **kwargs) + return nn.Conv2d(in_chs, out_chs, kernel_size, padding=padding, **kwargs) + else: + # dynamic padding + return Conv2dSame(in_chs, out_chs, kernel_size, **kwargs) + elif padding == 'valid': + # 'VALID' padding, same as padding=0 + return nn.Conv2d(in_chs, out_chs, kernel_size, padding=0, **kwargs) + else: + # Default to PyTorch style 'same'-ish symmetric padding + padding = _get_padding(kernel_size, **kwargs) + return nn.Conv2d(in_chs, out_chs, kernel_size, padding=padding, **kwargs) + else: + # padding was specified as a number or pair + return nn.Conv2d(in_chs, out_chs, kernel_size, padding=padding, **kwargs) + + +class MixedConv2d(nn.Module): + """ Mixed Grouped Convolution + Based on MDConv and GroupedConv in MixNet impl: + https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mixnet/custom_layers.py + """ + + def __init__(self, in_channels, out_channels, kernel_size=3, + stride=1, padding='', dilated=False, depthwise=False, **kwargs): + super(MixedConv2d, self).__init__() + + kernel_size = kernel_size if isinstance(kernel_size, list) else [kernel_size] + num_groups = len(kernel_size) + in_splits = _split_channels(in_channels, num_groups) + out_splits = _split_channels(out_channels, num_groups) + for idx, (k, in_ch, out_ch) in enumerate(zip(kernel_size, in_splits, out_splits)): + d = 1 + # FIXME make compat with non-square kernel/dilations/strides + if stride == 1 and dilated: + d, k = (k - 1) // 2, 3 + conv_groups = out_ch if depthwise else 1 + # use add_module to keep key space clean + self.add_module( + str(idx), + conv2d_pad( + in_ch, out_ch, k, stride=stride, + padding=padding, dilation=d, groups=conv_groups, **kwargs) + ) + self.splits = in_splits + + def forward(self, x): + x_split = torch.split(x, self.splits, 1) + x_out = [c(x) for x, c in zip(x_split, self._modules.values())] + x = torch.cat(x_out, 1) + return x + + +# helper method +def select_conv2d(in_chs, out_chs, kernel_size, **kwargs): + assert 'groups' not in kwargs # only use 'depthwise' bool arg + if isinstance(kernel_size, list): + # We're going to use only lists for defining the MixedConv2d kernel groups, + # ints, tuples, other iterables will continue to pass to normal conv and specify h, w. + return MixedConv2d(in_chs, out_chs, kernel_size, **kwargs) + else: + depthwise = kwargs.pop('depthwise', False) + groups = out_chs if depthwise else 1 + return conv2d_pad(in_chs, out_chs, kernel_size, groups=groups, **kwargs) diff --git a/autoPyTorch/components/networks/image/utils/mobilenet_utils.py b/autoPyTorch/components/networks/image/utils/mobilenet_utils.py new file mode 100644 index 000000000..4218554bb --- /dev/null +++ b/autoPyTorch/components/networks/image/utils/mobilenet_utils.py @@ -0,0 +1,753 @@ +# Copyright 2019 Ross Wightman +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +""" Generic EfficientNets +A generic class with building blocks to support a variety of models with efficient architectures: +* EfficientNet (B0-B7) +* MixNet (Small, Medium, and Large) +* MnasNet B1, A1 (SE), Small +* MobileNet V1, V2, and V3 +* FBNet-C (TODO A & B) +* ChamNet (TODO still guessing at architecture definition) +* Single-Path NAS Pixel1 +* And likely more... +TODO not all combinations and variations have been tested. Currently working on training hyper-params... +Hacked together by Ross Wightman +""" + +import math +import re +import logging +from copy import deepcopy + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .conv2d_helpers import select_conv2d + + +IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406) +IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225) + + +__all__ = ['GenEfficientNet'] + + +def _cfg(url='', **kwargs): + return { + 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), + 'crop_pct': 0.875, 'interpolation': 'bicubic', + 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, + 'first_conv': 'conv_stem', 'classifier': 'classifier', + **kwargs + } + + +_DEBUG = False + +# Default args for PyTorch BN impl +_BN_MOMENTUM_PT_DEFAULT = 0.01 +_BN_EPS_PT_DEFAULT = 1e-5 +_BN_ARGS_PT = dict(momentum=_BN_MOMENTUM_PT_DEFAULT, eps=_BN_EPS_PT_DEFAULT) + +# Defaults used for Google/Tensorflow training of mobile networks /w RMSprop as per +# papers and TF reference implementations. PT momentum equiv for TF decay is (1 - TF decay) +# NOTE: momentum varies btw .99 and .9997 depending on source +# .99 in official TF TPU impl +# .9997 (/w .999 in search space) for paper +# HERE CHANGED TO WORK WITH PYTORCH +_BN_MOMENTUM_TF_DEFAULT = 1 - 0.99 +_BN_EPS_TF_DEFAULT = 1e-3 +_BN_ARGS_TF = dict(momentum=_BN_MOMENTUM_TF_DEFAULT, eps=_BN_EPS_TF_DEFAULT) + + +def adaptive_pool_feat_mult(pool_type='avg'): + if pool_type == 'catavgmax': + return 2 + else: + return 1 + + +def adaptive_avgmax_pool2d(x, output_size=1): + x_avg = F.adaptive_avg_pool2d(x, output_size) + x_max = F.adaptive_max_pool2d(x, output_size) + return 0.5 * (x_avg + x_max) + + +def adaptive_catavgmax_pool2d(x, output_size=1): + x_avg = F.adaptive_avg_pool2d(x, output_size) + x_max = F.adaptive_max_pool2d(x, output_size) + return torch.cat((x_avg, x_max), 1) + + +def select_adaptive_pool2d(x, pool_type='avg', output_size=1): + """Selectable global pooling function with dynamic input kernel size + """ + if pool_type == 'avg': + x = F.adaptive_avg_pool2d(x, output_size) + elif pool_type == 'avgmax': + x = adaptive_avgmax_pool2d(x, output_size) + elif pool_type == 'catavgmax': + x = adaptive_catavgmax_pool2d(x, output_size) + elif pool_type == 'max': + x = F.adaptive_max_pool2d(x, output_size) + else: + assert False, 'Invalid pool type: %s' % pool_type + return x + + +class AdaptiveAvgMaxPool2d(nn.Module): + def __init__(self, output_size=1): + super(AdaptiveAvgMaxPool2d, self).__init__() + self.output_size = output_size + + def forward(self, x): + return adaptive_avgmax_pool2d(x, self.output_size) + + +class AdaptiveCatAvgMaxPool2d(nn.Module): + def __init__(self, output_size=1): + super(AdaptiveCatAvgMaxPool2d, self).__init__() + self.output_size = output_size + + def forward(self, x): + return adaptive_catavgmax_pool2d(x, self.output_size) + + +class SelectAdaptivePool2d(nn.Module): + """Selectable global pooling layer with dynamic input kernel size + """ + def __init__(self, output_size=1, pool_type='avg'): + super(SelectAdaptivePool2d, self).__init__() + self.output_size = output_size + self.pool_type = pool_type + if pool_type == 'avgmax': + self.pool = AdaptiveAvgMaxPool2d(output_size) + elif pool_type == 'catavgmax': + self.pool = AdaptiveCatAvgMaxPool2d(output_size) + elif pool_type == 'max': + self.pool = nn.AdaptiveMaxPool2d(output_size) + else: + if pool_type != 'avg': + assert False, 'Invalid pool type: %s' % pool_type + self.pool = nn.AdaptiveAvgPool2d(output_size) + + def forward(self, x): + return self.pool(x) + + def feat_mult(self): + return adaptive_pool_feat_mult(self.pool_type) + + def __repr__(self): + return self.__class__.__name__ + ' (' \ + + 'output_size=' + str(self.output_size) \ + + ', pool_type=' + self.pool_type + ')' + + +def _resolve_bn_args(kwargs): + bn_args = _BN_ARGS_TF.copy() if kwargs.pop('bn_tf', False) else _BN_ARGS_PT.copy() + bn_momentum = kwargs.pop('bn_momentum', None) + if bn_momentum is not None: + bn_args['momentum'] = bn_momentum + bn_eps = kwargs.pop('bn_eps', None) + if bn_eps is not None: + bn_args['eps'] = bn_eps + return bn_args + + +def _round_channels(channels, multiplier=1.0, divisor=8, channel_min=None): + """Round number of filters based on depth multiplier.""" + if not multiplier: + return channels + + channels *= multiplier + channel_min = channel_min or divisor + new_channels = max( + int(channels + divisor / 2) // divisor * divisor, + channel_min) + # Make sure that round down does not go down by more than 10%. + if new_channels < 0.9 * channels: + new_channels += divisor + return new_channels + + +def _parse_ksize(ss): + if ss.isdigit(): + return int(ss) + else: + return [int(k) for k in ss.split('.')] + + +def _decode_block_str(block_str, depth_multiplier=1.0): + """ Decode block definition string + Gets a list of block arg (dicts) through a string notation of arguments. + E.g. ir_r2_k3_s2_e1_i32_o16_se0.25_noskip + All args can exist in any order with the exception of the leading string which + is assumed to indicate the block type. + leading string - block type ( + ir = InvertedResidual, ds = DepthwiseSep, dsa = DeptwhiseSep with pw act, cn = ConvBnAct) + r - number of repeat blocks, + k - kernel size, + s - strides (1-9), + e - expansion ratio, + c - output channels, + se - squeeze/excitation ratio + n - activation fn ('re', 'r6', 'hs', or 'sw') + Args: + block_str: a string representation of block arguments. + Returns: + A list of block args (dicts) + Raises: + ValueError: if the string def not properly specified (TODO) + """ + assert isinstance(block_str, str) + ops = block_str.split('_') + block_type = ops[0] # take the block type off the front + ops = ops[1:] + options = {} + noskip = False + for op in ops: + # string options being checked on individual basis, combine if they grow + if op == 'noskip': + noskip = True + elif op.startswith('n'): + # activation fn + key = op[0] + v = op[1:] + if v == 're': + value = F.relu + elif v == 'r6': + value = F.relu6 + elif v == 'hs': + value = hard_swish + elif v == 'sw': + value = swish + else: + continue + options[key] = value + else: + # all numeric options + splits = re.split(r'(\d.*)', op) + if len(splits) >= 2: + key, value = splits[:2] + options[key] = value + + # if act_fn is None, the model default (passed to model init) will be used + act_fn = options['n'] if 'n' in options else None + exp_kernel_size = _parse_ksize(options['a']) if 'a' in options else 1 + pw_kernel_size = _parse_ksize(options['p']) if 'p' in options else 1 + + num_repeat = int(options['r']) + # each type of block has different valid arguments, fill accordingly + if block_type == 'ir': + block_args = dict( + block_type=block_type, + dw_kernel_size=_parse_ksize(options['k']), + exp_kernel_size=exp_kernel_size, + pw_kernel_size=pw_kernel_size, + out_chs=int(options['c']), + exp_ratio=float(options['e']), + se_ratio=float(options['se']) if 'se' in options else None, + stride=int(options['s']), + act_fn=act_fn, + noskip=noskip, + ) + elif block_type == 'ds' or block_type == 'dsa': + block_args = dict( + block_type=block_type, + dw_kernel_size=_parse_ksize(options['k']), + pw_kernel_size=pw_kernel_size, + out_chs=int(options['c']), + se_ratio=float(options['se']) if 'se' in options else None, + stride=int(options['s']), + act_fn=act_fn, + pw_act=block_type == 'dsa', + noskip=block_type == 'dsa' or noskip, + ) + elif block_type == 'cn': + block_args = dict( + block_type=block_type, + kernel_size=int(options['k']), + out_chs=int(options['c']), + stride=int(options['s']), + act_fn=act_fn, + ) + else: + assert False, 'Unknown block type (%s)' % block_type + + # return a list of block args expanded by num_repeat and + # scaled by depth_multiplier + num_repeat = int(math.ceil(num_repeat * depth_multiplier)) + return [deepcopy(block_args) for _ in range(num_repeat)] + + +def _decode_arch_def(arch_def, depth_multiplier=1.0): + arch_args = [] + for stack_idx, block_strings in enumerate(arch_def): + assert isinstance(block_strings, list) + stack_args = [] + for block_str in block_strings: + assert isinstance(block_str, str) + stack_args.extend(_decode_block_str(block_str, depth_multiplier)) + arch_args.append(stack_args) + return arch_args + + +def swish(x, inplace=False): + if inplace: + return x.mul_(x.sigmoid()) + else: + return x * x.sigmoid() + + +def sigmoid(x, inplace=False): + return x.sigmoid_() if inplace else x.sigmoid() + + +def hard_swish(x, inplace=False): + if inplace: + return x.mul_(F.relu6(x + 3.) / 6.) + else: + return x * F.relu6(x + 3.) / 6. + + +def hard_sigmoid(x, inplace=False): + if inplace: + return x.add_(3.).clamp_(0., 6.).div_(6.) + else: + return F.relu6(x + 3.) / 6. + + +class _BlockBuilder: + """ Build Trunk Blocks + This ended up being somewhat of a cross between + https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_models.py + and + https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/modeling/backbone/fbnet_builder.py + """ + def __init__(self, channel_multiplier=1.0, channel_divisor=8, channel_min=None, + pad_type='', act_fn=None, se_gate_fn=sigmoid, se_reduce_mid=False, + bn_args=_BN_ARGS_PT, drop_connect_rate=0., verbose=False): + self.channel_multiplier = channel_multiplier + self.channel_divisor = channel_divisor + self.channel_min = channel_min + self.pad_type = pad_type + self.act_fn = act_fn + self.se_gate_fn = se_gate_fn + self.se_reduce_mid = se_reduce_mid + self.bn_args = bn_args + self.drop_connect_rate = drop_connect_rate + self.verbose = verbose + + # updated during build + self.in_chs = None + self.block_idx = 0 + self.block_count = 0 + + def _round_channels(self, chs): + return _round_channels(chs, self.channel_multiplier, self.channel_divisor, self.channel_min) + + def _make_block(self, ba): + bt = ba.pop('block_type') + ba['in_chs'] = self.in_chs + ba['out_chs'] = self._round_channels(ba['out_chs']) + ba['bn_args'] = self.bn_args + ba['pad_type'] = self.pad_type + # block act fn overrides the model default + ba['act_fn'] = ba['act_fn'] if ba['act_fn'] is not None else self.act_fn + assert ba['act_fn'] is not None + if bt == 'ir': + ba['drop_connect_rate'] = self.drop_connect_rate * self.block_idx / self.block_count + ba['se_gate_fn'] = self.se_gate_fn + ba['se_reduce_mid'] = self.se_reduce_mid + if self.verbose: + logging.info(' InvertedResidual {}, Args: {}'.format(self.block_idx, str(ba))) + block = InvertedResidual(**ba) + elif bt == 'ds' or bt == 'dsa': + ba['drop_connect_rate'] = self.drop_connect_rate * self.block_idx / self.block_count + if self.verbose: + logging.info(' DepthwiseSeparable {}, Args: {}'.format(self.block_idx, str(ba))) + block = DepthwiseSeparableConv(**ba) + elif bt == 'cn': + if self.verbose: + logging.info(' ConvBnAct {}, Args: {}'.format(self.block_idx, str(ba))) + block = ConvBnAct(**ba) + else: + assert False, 'Uknkown block type (%s) while building model.' % bt + self.in_chs = ba['out_chs'] # update in_chs for arg of next block + + return block + + def _make_stack(self, stack_args): + blocks = [] + # each stack (stage) contains a list of block arguments + for i, ba in enumerate(stack_args): + if self.verbose: + logging.info(' Block: {}'.format(i)) + if i >= 1: + # only the first block in any stack can have a stride > 1 + ba['stride'] = 1 + block = self._make_block(ba) + blocks.append(block) + self.block_idx += 1 # incr global idx (across all stacks) + return nn.Sequential(*blocks) + + def __call__(self, in_chs, block_args): + """ Build the blocks + Args: + in_chs: Number of input-channels passed to first block + block_args: A list of lists, outer list defines stages, inner + list contains strings defining block configuration(s) + Return: + List of block stacks (each stack wrapped in nn.Sequential) + """ + if self.verbose: + logging.info('Building model trunk with %d stages...' % len(block_args)) + self.in_chs = in_chs + self.block_count = sum([len(x) for x in block_args]) + self.block_idx = 0 + blocks = [] + # outer list of block_args defines the stacks ('stages' by some conventions) + for stack_idx, stack in enumerate(block_args): + if self.verbose: + logging.info('Stack: {}'.format(stack_idx)) + assert isinstance(stack, list) + stack = self._make_stack(stack) + blocks.append(stack) + return blocks + + +def _initialize_weight_goog(m): + # weight init as per Tensorflow Official impl + # https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_model.py + if isinstance(m, nn.Conv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels # fan-out + m.weight.data.normal_(0, math.sqrt(2.0 / n)) + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1.0) + m.bias.data.zero_() + elif isinstance(m, nn.Linear): + n = m.weight.size(0) # fan-out + init_range = 1.0 / math.sqrt(n) + m.weight.data.uniform_(-init_range, init_range) + m.bias.data.zero_() + + +def _initialize_weight_default(m): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1.0) + m.bias.data.zero_() + elif isinstance(m, nn.Linear): + nn.init.kaiming_uniform_(m.weight, mode='fan_in', nonlinearity='linear') + + +def drop_connect(inputs, training=False, drop_connect_rate=0.): + """Apply drop connect.""" + if not training: + return inputs + + keep_prob = 1 - drop_connect_rate + random_tensor = keep_prob + torch.rand( + (inputs.size()[0], 1, 1, 1), dtype=inputs.dtype, device=inputs.device) + random_tensor.floor_() # binarize + output = inputs.div(keep_prob) * random_tensor + return output + + +class ChannelShuffle(nn.Module): + # FIXME haven't used yet + def __init__(self, groups): + super(ChannelShuffle, self).__init__() + self.groups = groups + + def forward(self, x): + """Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]""" + N, C, H, W = x.size() + g = self.groups + assert C % g == 0, "Incompatible group size {} for input channel {}".format( + g, C + ) + return ( + x.view(N, g, int(C / g), H, W) + .permute(0, 2, 1, 3, 4) + .contiguous() + .view(N, C, H, W) + ) + + +class SqueezeExcite(nn.Module): + def __init__(self, in_chs, reduce_chs=None, act_fn=F.relu, gate_fn=sigmoid): + super(SqueezeExcite, self).__init__() + self.act_fn = act_fn + self.gate_fn = gate_fn + reduced_chs = reduce_chs or in_chs + self.conv_reduce = nn.Conv2d(in_chs, reduced_chs, 1, bias=True) + self.conv_expand = nn.Conv2d(reduced_chs, in_chs, 1, bias=True) + + def forward(self, x): + # NOTE adaptiveavgpool can be used here, but seems to cause issues with NVIDIA AMP performance + x_se = x.view(x.size(0), x.size(1), -1).mean(-1).view(x.size(0), x.size(1), 1, 1) + x_se = self.conv_reduce(x_se) + x_se = self.act_fn(x_se, inplace=True) + x_se = self.conv_expand(x_se) + x = x * self.gate_fn(x_se) + return x + + +class ConvBnAct(nn.Module): + def __init__(self, in_chs, out_chs, kernel_size, + stride=1, pad_type='', act_fn=F.relu, bn_args=_BN_ARGS_PT): + super(ConvBnAct, self).__init__() + assert stride in [1, 2] + self.act_fn = act_fn + self.conv = select_conv2d(in_chs, out_chs, kernel_size, stride=stride, padding=pad_type) + self.bn1 = nn.BatchNorm2d(out_chs, **bn_args) + + def forward(self, x): + x = self.conv(x) + x = self.bn1(x) + x = self.act_fn(x, inplace=True) + return x + + +class DepthwiseSeparableConv(nn.Module): + """ DepthwiseSeparable block + Used for DS convs in MobileNet-V1 and in the place of IR blocks with an expansion + factor of 1.0. This is an alternative to having a IR with optional first pw conv. + """ + def __init__(self, in_chs, out_chs, dw_kernel_size=3, + stride=1, pad_type='', act_fn=F.relu, noskip=False, + pw_kernel_size=1, pw_act=False, + se_ratio=0., se_gate_fn=sigmoid, + bn_args=_BN_ARGS_PT, drop_connect_rate=0.): + super(DepthwiseSeparableConv, self).__init__() + assert stride in [1, 2] + self.has_se = se_ratio is not None and se_ratio > 0. + self.has_residual = (stride == 1 and in_chs == out_chs) and not noskip + self.has_pw_act = pw_act # activation after point-wise conv + self.act_fn = act_fn + self.drop_connect_rate = drop_connect_rate + + self.conv_dw = select_conv2d( + in_chs, in_chs, dw_kernel_size, stride=stride, padding=pad_type, depthwise=True) + self.bn1 = nn.BatchNorm2d(in_chs, **bn_args) + + # Squeeze-and-excitation + if self.has_se: + self.se = SqueezeExcite( + in_chs, reduce_chs=max(1, int(in_chs * se_ratio)), act_fn=act_fn, gate_fn=se_gate_fn) + + self.conv_pw = select_conv2d(in_chs, out_chs, pw_kernel_size, padding=pad_type) + self.bn2 = nn.BatchNorm2d(out_chs, **bn_args) + + def forward(self, x): + residual = x + + x = self.conv_dw(x) + x = self.bn1(x) + x = self.act_fn(x, inplace=True) + + if self.has_se: + x = self.se(x) + + x = self.conv_pw(x) + x = self.bn2(x) + if self.has_pw_act: + x = self.act_fn(x, inplace=True) + + if self.has_residual: + if self.drop_connect_rate > 0.: + x = drop_connect(x, self.training, self.drop_connect_rate) + x += residual + return x + + +class InvertedResidual(nn.Module): + """ Inverted residual block w/ optional SE""" + + def __init__(self, in_chs, out_chs, dw_kernel_size=3, + stride=1, pad_type='', act_fn=F.relu, noskip=False, + exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1, + se_ratio=0., se_reduce_mid=False, se_gate_fn=sigmoid, + shuffle_type=None, bn_args=_BN_ARGS_PT, drop_connect_rate=0.): + super(InvertedResidual, self).__init__() + mid_chs = int(in_chs * exp_ratio) + self.has_se = se_ratio is not None and se_ratio > 0. + self.has_residual = (in_chs == out_chs and stride == 1) and not noskip + self.act_fn = act_fn + self.drop_connect_rate = drop_connect_rate + + # Point-wise expansion + self.conv_pw = select_conv2d(in_chs, mid_chs, exp_kernel_size, padding=pad_type) + self.bn1 = nn.BatchNorm2d(mid_chs, **bn_args) + + self.shuffle_type = shuffle_type + if shuffle_type is not None and isinstance(exp_kernel_size, list): + self.shuffle = ChannelShuffle(len(exp_kernel_size)) + + # Depth-wise convolution + self.conv_dw = select_conv2d( + mid_chs, mid_chs, dw_kernel_size, stride=stride, padding=pad_type, depthwise=True) + self.bn2 = nn.BatchNorm2d(mid_chs, **bn_args) + + # Squeeze-and-excitation + if self.has_se: + se_base_chs = mid_chs if se_reduce_mid else in_chs + self.se = SqueezeExcite( + mid_chs, reduce_chs=max(1, int(se_base_chs * se_ratio)), act_fn=act_fn, gate_fn=se_gate_fn) + + # Point-wise linear projection + self.conv_pwl = select_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type) + self.bn3 = nn.BatchNorm2d(out_chs, **bn_args) + + def forward(self, x): + residual = x + + # Point-wise expansion + x = self.conv_pw(x) + x = self.bn1(x) + x = self.act_fn(x, inplace=True) + + # FIXME haven't tried this yet + # for channel shuffle when using groups with pointwise convs as per FBNet variants + if self.shuffle_type == "mid": + x = self.shuffle(x) + + # Depth-wise convolution + x = self.conv_dw(x) + x = self.bn2(x) + x = self.act_fn(x, inplace=True) + + # Squeeze-and-excitation + if self.has_se: + x = self.se(x) + + # Point-wise linear projection + x = self.conv_pwl(x) + x = self.bn3(x) + + if self.has_residual: + if self.drop_connect_rate > 0.: + x = drop_connect(x, self.training, self.drop_connect_rate) + x += residual + + # NOTE maskrcnn_benchmark building blocks have an SE module defined here for some variants + + return x + + +class GenEfficientNet(nn.Module): + """ Generic EfficientNet + An implementation of efficient network architectures, in many cases mobile optimized networks: + * MobileNet-V1 + * MobileNet-V2 + * MobileNet-V3 + * MnasNet A1, B1, and small + * FBNet A, B, and C + * ChamNet (arch details are murky) + * Single-Path NAS Pixel1 + * EfficientNet B0-B7 + * MixNet S, M, L + """ + + def __init__(self, block_args, num_classes=1000, in_chans=3, stem_size=32, num_features=1280, + channel_multiplier=1.0, channel_divisor=8, channel_min=None, + pad_type='', act_fn=F.relu, drop_rate=0., drop_connect_rate=0., + se_gate_fn=sigmoid, se_reduce_mid=False, bn_args=_BN_ARGS_PT, + global_pool='avg', head_conv='default', weight_init='goog'): + super(GenEfficientNet, self).__init__() + self.num_classes = num_classes + self.drop_rate = drop_rate + self.act_fn = act_fn + self.num_features = num_features + + stem_size = _round_channels(stem_size, channel_multiplier, channel_divisor, channel_min) + self.conv_stem = select_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type) + self.bn1 = nn.BatchNorm2d(stem_size, **bn_args) + in_chs = stem_size + + builder = _BlockBuilder( + channel_multiplier, channel_divisor, channel_min, + pad_type, act_fn, se_gate_fn, se_reduce_mid, + bn_args, drop_connect_rate, verbose=_DEBUG) + self.blocks = nn.Sequential(*builder(in_chs, block_args)) + in_chs = builder.in_chs + + if not head_conv or head_conv == 'none': + self.efficient_head = False + self.conv_head = None + assert in_chs == self.num_features + else: + self.efficient_head = head_conv == 'efficient' + self.conv_head = select_conv2d(in_chs, self.num_features, 1, padding=pad_type) + self.bn2 = None if self.efficient_head else nn.BatchNorm2d(self.num_features, **bn_args) + + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.classifier = nn.Linear(self.num_features * self.global_pool.feat_mult(), self.num_classes) + + for m in self.modules(): + if weight_init == 'goog': + _initialize_weight_goog(m) + else: + _initialize_weight_default(m) + + def get_classifier(self): + return self.classifier + + def reset_classifier(self, num_classes, global_pool='avg'): + self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) + self.num_classes = num_classes + del self.classifier + if num_classes: + self.classifier = nn.Linear( + self.num_features * self.global_pool.feat_mult(), num_classes) + else: + self.classifier = None + + def forward_features(self, x, pool=True): + x = self.conv_stem(x) + x = self.bn1(x) + x = self.act_fn(x, inplace=True) + x = self.blocks(x) + if self.efficient_head: + # efficient head, currently only mobilenet-v3 performs pool before last 1x1 conv + x = self.global_pool(x) # always need to pool here regardless of flag + x = self.conv_head(x) + # no BN + x = self.act_fn(x, inplace=True) + if pool: + # expect flattened output if pool is true, otherwise keep dim + x = x.view(x.size(0), -1) + else: + if self.conv_head is not None: + x = self.conv_head(x) + x = self.bn2(x) + x = self.act_fn(x, inplace=True) + if pool: + x = self.global_pool(x) + x = x.view(x.size(0), -1) + return x + + def forward(self, x): + x = self.forward_features(x) + if self.drop_rate > 0.: + x = F.dropout(x, p=self.drop_rate, training=self.training) + return self.classifier(x) + diff --git a/autoPyTorch/components/networks/image/utils/shakedrop.py b/autoPyTorch/components/networks/image/utils/shakedrop.py new file mode 100644 index 000000000..3cfa6d3f4 --- /dev/null +++ b/autoPyTorch/components/networks/image/utils/shakedrop.py @@ -0,0 +1,60 @@ +import torch +from torch.autograd import Variable, Function + + +class ShakeDrop(Function): + @staticmethod + def forward(ctx, x, alpha, beta, death_rate, is_train): + gate = (torch.rand(1) > death_rate).numpy() + ctx.gate = gate + ctx.save_for_backward(x, alpha, beta) + + if is_train: + if not gate: + y = alpha * x + else: + y = x + else: + y = x.mul(1 - (death_rate * 1.0)) + + return y + + @staticmethod + def backward(ctx, grad_output): + x, alpha, beta = ctx.saved_variables + grad_x1 = grad_alpha = grad_beta = None + + if ctx.needs_input_grad[0]: + if not ctx.gate: + grad_x = grad_output * beta + else: + grad_x = grad_output + + return grad_x, grad_alpha, grad_beta, None, None + +shake_drop = ShakeDrop.apply + + +def generate_alpha_beta_single(tensor_size, shake_config, is_cuda): + forward_shake, backward_shake, shake_image = shake_config + + if forward_shake and not shake_image: + alpha = torch.rand(tensor_size).mul(2).add(-1) + elif forward_shake and shake_image: + alpha = torch.rand(tensor_size[0]).view(tensor_size[0], 1, 1, 1) + alpha.mul_(2).add_(-1) # alpha from -1 to 1 + else: + alpha = torch.FloatTensor([0.5]) + + if backward_shake and not shake_image: + beta = torch.rand(tensor_size) + elif backward_shake and shake_image: + beta = torch.rand(tensor_size[0]).view(tensor_size[0], 1, 1, 1) + else: + beta = torch.FloatTensor([0.5]) + + if is_cuda: + alpha = alpha.cuda() + beta = beta.cuda() + + return Variable(alpha), Variable(beta) \ No newline at end of file diff --git a/autoPyTorch/components/networks/image/utils/shakeshakeblock.py b/autoPyTorch/components/networks/image/utils/shakeshakeblock.py new file mode 100644 index 000000000..4ebc5085b --- /dev/null +++ b/autoPyTorch/components/networks/image/utils/shakeshakeblock.py @@ -0,0 +1,49 @@ +# coding: utf-8 + +import torch +from torch.autograd import Variable, Function + + +class ShakeShakeBlock(Function): + @staticmethod + def forward(ctx, alpha, beta, *args): + ctx.save_for_backward(beta) + + y = sum(alpha[i] * args[i] for i in range(len(args))) + return y + + @staticmethod + def backward(ctx, grad_output): + beta = ctx.saved_variables + grad_x = [beta[0][i] * grad_output for i in range(beta[0].shape[0])] + + return (None, None, *grad_x) + +shake_shake = ShakeShakeBlock.apply + + +def generate_alpha_beta(num_branches, batch_size, shake_config, is_cuda): + forward_shake, backward_shake, shake_image = shake_config + + if forward_shake and not shake_image: + alpha = torch.rand(num_branches) + elif forward_shake and shake_image: + alpha = torch.rand(num_branches, batch_size).view(num_branches, batch_size, 1, 1, 1) + else: + alpha = torch.ones(num_branches) + + if backward_shake and not shake_image: + beta = torch.rand(num_branches) + elif backward_shake and shake_image: + beta = torch.rand(num_branches, batch_size).view(num_branches, batch_size, 1, 1, 1) + else: + beta = torch.ones(num_branches) + + alpha = torch.nn.Softmax(0)(Variable(alpha)) + beta = torch.nn.Softmax(0)(Variable(beta)) + + if is_cuda: + alpha = alpha.cuda() + beta = beta.cuda() + + return alpha, beta \ No newline at end of file diff --git a/autoPyTorch/components/networks/image/utils/utils.py b/autoPyTorch/components/networks/image/utils/utils.py new file mode 100644 index 000000000..c743b4eb0 --- /dev/null +++ b/autoPyTorch/components/networks/image/utils/utils.py @@ -0,0 +1,44 @@ +import torch.nn as nn +import math + +def initialize_weights(module): + if isinstance(module, nn.Conv2d): + n = module.kernel_size[0] * module.kernel_size[1] * module.out_channels + module.weight.data.normal_(0, math.sqrt(2. / n)) + #nn.init.kaiming_normal(module.weight.data, mode='fan_out') + elif isinstance(module, nn.BatchNorm2d): + module.weight.data.fill_(1) + module.bias.data.zero_() + elif isinstance(module, nn.Linear): + module.bias.data.zero_() + +def get_layer_params(in_size, out_size, kernel_size): + kernel_size = int(kernel_size) + stride = int(max(1, math.ceil((in_size - kernel_size) / (out_size - 1)) if out_size > 1 else 1)) + cur_out_size = _get_out_size(in_size, kernel_size, stride, 0) + required_padding = (stride / 2) * (in_size - cur_out_size) + + cur_padding = int(math.ceil(required_padding)) + cur_out_size = _get_out_size(in_size, kernel_size, stride, cur_padding) + if cur_padding < kernel_size and cur_out_size <= in_size and cur_out_size >= 1: + return cur_out_size, kernel_size, stride, cur_padding + + cur_padding = int(math.floor(required_padding)) + cur_out_size = _get_out_size(in_size, kernel_size, stride, cur_padding) + if cur_padding < kernel_size and cur_out_size <= in_size and cur_out_size >= 1: + return cur_out_size, kernel_size, stride, cur_padding + + if stride > 1: + stride = int(stride - 1) + cur_padding = 0 + cur_out_size = int(_get_out_size(in_size, kernel_size, stride, cur_padding)) + if cur_padding < kernel_size and cur_out_size <= in_size and cur_out_size >= 1: + return cur_out_size, kernel_size, stride, cur_padding + + if (kernel_size % 2) == 0 and out_size == in_size: + return get_layer_params(in_size, out_size, kernel_size + 1) # an odd kernel can always keep the dimension (with stride 1) + + raise Exception('Could not find padding and stride to reduce ' + str(in_size) + ' to ' + str(out_size) + ' using kernel ' + str(kernel_size)) + +def _get_out_size(in_size, kernel_size, stride, padding): + return int(math.floor((in_size - kernel_size + 2 * padding) / stride + 1)) \ No newline at end of file diff --git a/autoPyTorch/components/optimizer/optimizer.py b/autoPyTorch/components/optimizer/optimizer.py index 7d5c0c86b..21197b9d0 100644 --- a/autoPyTorch/components/optimizer/optimizer.py +++ b/autoPyTorch/components/optimizer/optimizer.py @@ -11,10 +11,12 @@ import ConfigSpace as CS import ConfigSpace.hyperparameters as CSH + __author__ = "Max Dippel, Michael Burkart and Matthias Urban" __version__ = "0.0.1" __license__ = "BSD" + class AutoNetOptimizerBase(object): def __new__(cls, params, config): return cls._get_optimizer(cls, params, config) @@ -34,8 +36,24 @@ def _get_optimizer(self, params, config): @staticmethod def get_config_space( - learning_rate=((0.0001, 0.1), True), - weight_decay=(0.0001, 0.1) + learning_rate=((1e-4, 0.1), True), + weight_decay=(1e-5, 0.1) + ): + cs = CS.ConfigurationSpace() + add_hyperparameter(cs, CSH.UniformFloatHyperparameter, 'learning_rate', learning_rate) + add_hyperparameter(cs, CSH.UniformFloatHyperparameter, 'weight_decay', weight_decay) + return cs + + +class AdamWOptimizer(AutoNetOptimizerBase): + + def _get_optimizer(self, params, config): + return optim.AdamW(params=params, lr=config['learning_rate'], weight_decay=config['weight_decay']) + + @staticmethod + def get_config_space( + learning_rate=((1e-4, 0.1), True), + weight_decay=(1e-5, 0.1) ): cs = CS.ConfigurationSpace() add_hyperparameter(cs, CSH.UniformFloatHyperparameter, 'learning_rate', learning_rate) @@ -50,12 +68,32 @@ def _get_optimizer(self, params, config): @staticmethod def get_config_space( - learning_rate=((0.0001, 0.1), True), - momentum=((0.1, 0.9), True), - weight_decay=(0.0001, 0.1) + learning_rate=((1e-4, 0.1), True), + momentum=((0.1, 0.99), True), + weight_decay=(1e-5, 0.1) + ): + cs = CS.ConfigurationSpace() + add_hyperparameter(cs, CSH.UniformFloatHyperparameter, 'learning_rate', learning_rate) + add_hyperparameter(cs, CSH.UniformFloatHyperparameter, 'momentum', momentum) + add_hyperparameter(cs, CSH.UniformFloatHyperparameter, 'weight_decay', weight_decay) + return cs + + +class RMSpropOptimizer(AutoNetOptimizerBase): + + def _get_optimizer(self, params, config): + return optim.RMSprop(params=params, lr=config['learning_rate'], momentum=config['momentum'], weight_decay=config['weight_decay'], centered=False) + + @staticmethod + def get_config_space( + learning_rate=((1e-4, 0.1), True), + momentum=((0.1, 0.99), True), + weight_decay=(1e-5, 0.1), + alpha=(0.1,0.99) ): cs = CS.ConfigurationSpace() add_hyperparameter(cs, CSH.UniformFloatHyperparameter, 'learning_rate', learning_rate) add_hyperparameter(cs, CSH.UniformFloatHyperparameter, 'momentum', momentum) add_hyperparameter(cs, CSH.UniformFloatHyperparameter, 'weight_decay', weight_decay) + add_hyperparameter(cs, CSH.UniformFloatHyperparameter, 'alpha', alpha) return cs diff --git a/autoPyTorch/components/preprocessing/image_preprocessing/__init__.py b/autoPyTorch/components/preprocessing/image_preprocessing/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/autoPyTorch/components/preprocessing/image_preprocessing/archive.py b/autoPyTorch/components/preprocessing/image_preprocessing/archive.py new file mode 100644 index 000000000..6a12871a6 --- /dev/null +++ b/autoPyTorch/components/preprocessing/image_preprocessing/archive.py @@ -0,0 +1,28 @@ +# Copyright 2019 Kakao Brain +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +def fa_reduced_cifar10(): + p = [[["Contrast", 0.8320659688593578, 0.49884310562180767], ["TranslateX", 0.41849883971249136, 0.394023086494538]], [["Color", 0.3500483749890918, 0.43355143929883955], ["Color", 0.5120716140300229, 0.7508299643325016]], [["Rotate", 0.9447932604389472, 0.29723465088990375], ["Sharpness", 0.1564936149799504, 0.47169309978091745]], [["Rotate", 0.5430015349185097, 0.6518626678905443], ["Color", 0.5694844928020679, 0.3494533005430269]], [["AutoContrast", 0.5558922032451064, 0.783136004977799], ["TranslateY", 0.683914191471972, 0.7597025305860181]], [["TranslateX", 0.03489224481658926, 0.021025488042663354], ["Equalize", 0.4788637403857401, 0.3535481281496117]], [["Sharpness", 0.6428916269794158, 0.22791511918580576], ["Contrast", 0.016014045073950323, 0.26811312269487575]], [["Rotate", 0.2972727228410451, 0.7654251516829896], ["AutoContrast", 0.16005809254943348, 0.5380523650108116]], [["Contrast", 0.5823671057717301, 0.7521166301398389], ["TranslateY", 0.9949449214751978, 0.9612671341689751]], [["Equalize", 0.8372126687702321, 0.6944127225621206], ["Rotate", 0.25393282929784755, 0.3261658365286546]], [["Invert", 0.8222011603194572, 0.6597915864008403], ["Posterize", 0.31858707654447327, 0.9541013715579584]], [["Sharpness", 0.41314621282107045, 0.9437344470879956], ["Cutout", 0.6610495837889337, 0.674411664255093]], [["Contrast", 0.780121736705407, 0.40826152397463156], ["Color", 0.344019192125256, 0.1942922781355767]], [["Rotate", 0.17153139555621344, 0.798745732456474], ["Invert", 0.6010555860501262, 0.320742172554767]], [["Invert", 0.26816063450777416, 0.27152062163148327], ["Equalize", 0.6786829200236982, 0.7469412443514213]], [["Contrast", 0.3920564414367518, 0.7493644582838497], ["TranslateY", 0.8941657805606704, 0.6580846856375955]], [["Equalize", 0.875509207399372, 0.9061130537645283], ["Cutout", 0.4940280679087308, 0.7896229623628276]], [["Contrast", 0.3331423298065147, 0.7170041362529597], ["ShearX", 0.7425484291842793, 0.5285117152426109]], [["Equalize", 0.97344237365026, 0.4745759720473106], ["TranslateY", 0.055863458430295276, 0.9625142022954672]], [["TranslateX", 0.6810614083109192, 0.7509937355495521], ["TranslateY", 0.3866463019475701, 0.5185481505576112]], [["Sharpness", 0.4751529944753671, 0.550464012488733], ["Cutout", 0.9472914750534814, 0.5584925992985023]], [["Contrast", 0.054606784909375095, 0.17257080196712182], ["Cutout", 0.6077026782754803, 0.7996504165944938]], [["ShearX", 0.328798428243695, 0.2769563264079157], ["Cutout", 0.9037632437023772, 0.4915809476763595]], [["Cutout", 0.6891202672363478, 0.9951490996172914], ["Posterize", 0.06532762462628705, 0.4005246609075227]], [["TranslateY", 0.6908583592523334, 0.725612120376128], ["Rotate", 0.39907735501746666, 0.36505798032223147]], [["TranslateX", 0.10398364107399072, 0.5913918470536627], ["Rotate", 0.7169811539340365, 0.8283850670648724]], [["ShearY", 0.9526373530768361, 0.4482347365639251], ["Contrast", 0.4203947336351471, 0.41526799558953864]], [["Contrast", 0.24894431199700073, 0.09578870500994707], ["Solarize", 0.2273713345927395, 0.6214942914963707]], [["TranslateX", 0.06331228870032912, 0.8961907489444944], ["Cutout", 0.5110007859958743, 0.23704875994050723]], [["Cutout", 0.3769183548846172, 0.6560944580253987], ["TranslateY", 0.7201924599434143, 0.4132476526938319]], [["Invert", 0.6707431156338866, 0.11622795952464149], ["Posterize", 0.12075972752370845, 0.18024933294172307]], [["Color", 0.5010057264087142, 0.5277767327434318], ["Rotate", 0.9486115946366559, 0.31485546630220784]], [["ShearX", 0.31741302466630406, 0.1991215806270692], ["Invert", 0.3744727015523084, 0.6914113986757578]], [["Brightness", 0.40348479064392617, 0.8924182735724888], ["Brightness", 0.1973098763857779, 0.3939288933689655]], [["Color", 0.01208688664030888, 0.6055693000885217], ["Equalize", 0.433259451147881, 0.420711137966155]], [["Cutout", 0.2620018360076487, 0.11594468278143644], ["Rotate", 0.1310401567856766, 0.7244318146544101]], [["ShearX", 0.15249651845933576, 0.35277277071866986], ["Contrast", 0.28221794032094016, 0.42036586509397444]], [["Brightness", 0.8492912150468908, 0.26386920887886056], ["Solarize", 0.8764208056263386, 0.1258195122766067]], [["ShearX", 0.8537058239675831, 0.8415101816171269], ["AutoContrast", 0.23958568830416294, 0.9889049529564014]], [["Rotate", 0.6463207930684552, 0.8750192129056532], ["Contrast", 0.6865032211768652, 0.8564981333033417]], [["Equalize", 0.8877190311811044, 0.7370995897848609], ["TranslateX", 0.9979660314391368, 0.005683998913244781]], [["Color", 0.6420017551677819, 0.6225337265571229], ["Solarize", 0.8344504978566362, 0.8332856969941151]], [["ShearX", 0.7439332981992567, 0.9747608698582039], ["Equalize", 0.6259189804002959, 0.028017478098245174]], [["TranslateY", 0.39794770293366843, 0.8482966537902709], ["Rotate", 0.9312935630405351, 0.5300586925826072]], [["Cutout", 0.8904075572021911, 0.3522934742068766], ["Equalize", 0.6431186289473937, 0.9930577962126151]], [["Contrast", 0.9183553386089476, 0.44974266209396685], ["TranslateY", 0.8193684583123862, 0.9633741156526566]], [["ShearY", 0.616078299924283, 0.19219314358924766], ["Solarize", 0.1480945914138868, 0.05922109541654652]], [["Solarize", 0.25332455064128157, 0.18853037431947994], ["ShearY", 0.9518390093954243, 0.14603930044061142]], [["Color", 0.8094378664335412, 0.37029830225408433], ["Contrast", 0.29504113617467465, 0.065096365468442]], [["AutoContrast", 0.7075167558685455, 0.7084621693458267], ["Sharpness", 0.03555539453323875, 0.5651948313888351]], [["TranslateY", 0.5969982600930229, 0.9857264201029572], ["Rotate", 0.9898628564873607, 0.1985685534926911]], [["Invert", 0.14915939942810352, 0.6595839632446547], ["Posterize", 0.768535289994361, 0.5997358684618563]], [["Equalize", 0.9162691815967111, 0.3331035307653627], ["Color", 0.8169118187605557, 0.7653910258006366]], [["Rotate", 0.43489185299530897, 0.752215269135173], ["Brightness", 0.1569828560334806, 0.8002808712857853]], [["Invert", 0.931876215328345, 0.029428644395760872], ["Equalize", 0.6330036052674145, 0.7235531014288485]], [["ShearX", 0.5216138393704968, 0.849272958911589], ["AutoContrast", 0.19572688655120263, 0.9786551568639575]], [["ShearX", 0.9899586208275011, 0.22580547500610293], ["Brightness", 0.9831311903178727, 0.5055159610855606]], [["Brightness", 0.29179117009211486, 0.48003584672937294], ["Solarize", 0.7544252317330058, 0.05806581735063043]], [["AutoContrast", 0.8919800329537786, 0.8511261613698553], ["Contrast", 0.49199446084551035, 0.7302297140181429]], [["Cutout", 0.7079723710644835, 0.032565015538375874], ["AutoContrast", 0.8259782090388609, 0.7860708789468442]], [["Posterize", 0.9980262659801914, 0.6725084224935673], ["ShearY", 0.6195568269664682, 0.5444170291816751]], [["Posterize", 0.8687351834713217, 0.9978004914422602], ["Equalize", 0.4532646848325955, 0.6486748015710573]], [["Contrast", 0.2713928776950594, 0.15255249557027806], ["ShearY", 0.9276834387970199, 0.5266542862333478]], [["AutoContrast", 0.5240786618055582, 0.9325642258930253], ["Cutout", 0.38448627892037357, 0.21219415055662394]], [["TranslateX", 0.4299517937295352, 0.20133751201386152], ["TranslateX", 0.6753468310276597, 0.6985621035400441]], [["Rotate", 0.4006472499103597, 0.6704748473357586], ["Equalize", 0.674161668148079, 0.6528530101705237]], [["Equalize", 0.9139902833674455, 0.9015103149680278], ["Sharpness", 0.7289667720691948, 0.7623606352376232]], [["Cutout", 0.5911267429414259, 0.5953141187177585], ["Rotate", 0.5219064817468504, 0.11085141355857986]], [["TranslateX", 0.3620095133946267, 0.26194039409492476], ["Rotate", 0.3929841359545597, 0.4913406720338047]], [["Invert", 0.5175298901458896, 0.001661410821811482], ["Invert", 0.004656581318332242, 0.8157622192213624]], [["AutoContrast", 0.013609693335051465, 0.9318651749409604], ["Invert", 0.8980844358979592, 0.2268511862780368]], [["ShearY", 0.7717126261142194, 0.09975547983707711], ["Equalize", 0.7808494401429572, 0.4141412091009955]], [["TranslateX", 0.5878675721341552, 0.29813268038163376], ["Posterize", 0.21257276051591356, 0.2837285296666412]], [["Brightness", 0.4268335108566488, 0.4723784991635417], ["Cutout", 0.9386262901570471, 0.6597686851494288]], [["ShearX", 0.8259423807590159, 0.6215304795389204], ["Invert", 0.6663365779667443, 0.7729669184580387]], [["ShearY", 0.4801338723951297, 0.5220145420100984], ["Solarize", 0.9165803796596582, 0.04299335502862134]], [["Color", 0.17621114853558817, 0.7092601754635434], ["ShearX", 0.9014406936728542, 0.6028711944367818]], [["Rotate", 0.13073284972300658, 0.9088831512880851], ["ShearX", 0.4228105332316806, 0.7985249783662675]], [["Brightness", 0.9182753692730031, 0.0063635477774044436], ["Color", 0.4279825602663798, 0.28727149118585327]], [["Equalize", 0.578218285372267, 0.9611758542158054], ["Contrast", 0.5471552264150691, 0.8819635504027596]], [["Brightness", 0.3208589067274543, 0.45324733565167497], ["Solarize", 0.5218455808633233, 0.5946097503647126]], [["Equalize", 0.3790381278653, 0.8796082535775276], ["Solarize", 0.4875526773149246, 0.5186585878052613]], [["ShearY", 0.12026461479557571, 0.1336953429068397], ["Posterize", 0.34373988646025766, 0.8557727670803785]], [["Cutout", 0.2396745247507467, 0.8123036135209865], ["Equalize", 0.05022807681008945, 0.6648492261984383]], [["Brightness", 0.35226676470748264, 0.5950011514888855], ["Rotate", 0.27555076067000894, 0.9170063321486026]], [["ShearX", 0.320224630647278, 0.9683584649071976], ["Invert", 0.6905585196648905, 0.5929115667894518]], [["Color", 0.9941395717559652, 0.7474441679798101], ["Sharpness", 0.7559998478658021, 0.6656052889626682]], [["ShearY", 0.4004220568345669, 0.5737646992826074], ["Equalize", 0.9983495213746147, 0.8307907033362303]], [["Color", 0.13726809242038207, 0.9378850119950549], ["Equalize", 0.9853362454752445, 0.42670264496554156]], [["Invert", 0.13514636153298576, 0.13516363849081958], ["Sharpness", 0.2031189356693901, 0.6110226359872745]], [["TranslateX", 0.7360305209630797, 0.41849698571655614], ["Contrast", 0.8972161549144564, 0.7820296625565641]], [["Color", 0.02713118828682548, 0.717110684828096], ["TranslateY", 0.8118759006836348, 0.9120098002024992]], [["Sharpness", 0.2915428949403711, 0.7630303724396518], ["Solarize", 0.22030536162851078, 0.38654526772661757]], [["Equalize", 0.9949114839538582, 0.7193630656062793], ["AutoContrast", 0.00889496657931299, 0.2291400476524672]], [["Rotate", 0.7120948976490488, 0.7804359309791055], ["Cutout", 0.10445418104923654, 0.8022999156052766]], [["Equalize", 0.7941710117902707, 0.8648170634288153], ["Invert", 0.9235642581144047, 0.23810725859722381]], [["Cutout", 0.3669397998623156, 0.42612815083245004], ["Solarize", 0.5896322046441561, 0.40525016166956795]], [["Color", 0.8389858785714184, 0.4805764176488667], ["Rotate", 0.7483931487048825, 0.4731174601400677]], [["Sharpness", 0.19006538611394763, 0.9480745790240234], ["TranslateY", 0.13904429049439282, 0.04117685330615939]], [["TranslateY", 0.9958097661701637, 0.34853788612580905], ["Cutout", 0.2235829624082113, 0.3737887095480745]], [["ShearX", 0.635453761342424, 0.6063917273421382], ["Posterize", 0.8738297843709666, 0.4893042590265556]], [["Brightness", 0.7907245198402727, 0.7082189713070691], ["Color", 0.030313003541849737, 0.6927897798493439]], [["Cutout", 0.6965622481073525, 0.8103522907758203], ["ShearY", 0.6186794303078708, 0.28640671575703547]], [["ShearY", 0.43734910588450226, 0.32549342535621517], ["ShearX", 0.08154980987651872, 0.3286764923112455]], [["AutoContrast", 0.5262462005050853, 0.8175584582465848], ["Contrast", 0.8683217097363655, 0.548776281479276]], [["ShearY", 0.03957878500311707, 0.5102350637943197], ["Rotate", 0.13794708520303778, 0.38035687712954236]], [["Sharpness", 0.634288567312677, 0.6387948309075822], ["AutoContrast", 0.13437288694693272, 0.7150448869023095]], [["Contrast", 0.5198339640088544, 0.9409429390321714], ["Cutout", 0.09489154903321972, 0.6228488803821982]], [["Equalize", 0.8955909061806043, 0.7727336527163008], ["AutoContrast", 0.6459479564441762, 0.7065467781139214]], [["Invert", 0.07214420843537739, 0.15334721382249505], ["ShearX", 0.9242027778363903, 0.5809187849982554]], [["Equalize", 0.9144084379856188, 0.9457539278608998], ["Sharpness", 0.14337499858300173, 0.5978054365425495]], [["Posterize", 0.18894269796951202, 0.14676331276539045], ["Equalize", 0.846204299950047, 0.0720601838168885]], [["Contrast", 0.47354445405741163, 0.1793650330107468], ["Solarize", 0.9086106327264657, 0.7578807802091502]], [["AutoContrast", 0.11805466892967886, 0.6773620948318575], ["TranslateX", 0.584222568299264, 0.9475693349391936]], [["Brightness", 0.5833017701352768, 0.6892593824176294], ["AutoContrast", 0.9073141314561828, 0.5823085733964589]], [["TranslateY", 0.5711231614144834, 0.6436240447620021], ["Contrast", 0.21466964050052473, 0.8042843954486391]], [["Contrast", 0.22967904487976765, 0.2343103109298762], ["Invert", 0.5502897289159286, 0.386181060792375]], [["Invert", 0.7008423439928628, 0.4234003051405053], ["Rotate", 0.77270460187611, 0.6650852696828039]], [["Invert", 0.050618322309703534, 0.24277027926683614], ["TranslateX", 0.789703489736613, 0.5116446685339312]], [["Color", 0.363898083076868, 0.7870323584210503], ["ShearY", 0.009608425513626617, 0.6188625018465327]], [["TranslateY", 0.9447601615216088, 0.8605867115798349], ["Equalize", 0.24139180127003634, 0.9587337957930782]], [["Equalize", 0.3968589440144503, 0.626206375426996], ["Solarize", 0.3215967960673186, 0.826785464835443]], [["TranslateX", 0.06947339047121326, 0.016705969558222122], ["Contrast", 0.6203392406528407, 0.6433525559906872]], [["Solarize", 0.2479835265518212, 0.6335009955617831], ["Sharpness", 0.6260191862978083, 0.18998095149428562]], [["Invert", 0.9818841924943431, 0.03252098144087934], ["TranslateY", 0.9740718042586802, 0.32038951753031475]], [["Solarize", 0.8795784664090814, 0.7014953994354041], ["AutoContrast", 0.8508018319577783, 0.09321935255338443]], [["Color", 0.8067046326105318, 0.13732893832354054], ["Contrast", 0.7358549680271418, 0.7880588355974301]], [["Posterize", 0.5005885536838065, 0.7152229305267599], ["ShearX", 0.6714249591308944, 0.7732232697859908]], [["TranslateY", 0.5657943483353953, 0.04622399873706862], ["AutoContrast", 0.2787442688649845, 0.567024378767143]], [["ShearY", 0.7589839214283295, 0.041071003934029404], ["Equalize", 0.3719852873722692, 0.43285778682687326]], [["Posterize", 0.8841266183653291, 0.42441306955476366], ["Cutout", 0.06578801759412933, 0.5961125797961526]], [["Rotate", 0.4057875004314082, 0.20241115848366442], ["AutoContrast", 0.19331542807918067, 0.7175484678480565]], [["Contrast", 0.20331327116693088, 0.17135387852218742], ["Cutout", 0.6282459410351067, 0.6690015305529187]], [["ShearX", 0.4309850328306535, 0.99321178125828], ["AutoContrast", 0.01809604030453338, 0.693838277506365]], [["Rotate", 0.24343531125298268, 0.5326412444169899], ["Sharpness", 0.8663989992597494, 0.7643990609130789]], [["Rotate", 0.9785019204622459, 0.8941922576710696], ["ShearY", 0.3823185048761075, 0.9258854046017292]], [["ShearY", 0.5502613342963388, 0.6193478797355644], ["Sharpness", 0.2212116534610532, 0.6648232390110979]], [["TranslateY", 0.43222920981513757, 0.5657636397633089], ["ShearY", 0.9153733286073634, 0.4868521171273169]], [["Posterize", 0.12246560519738336, 0.9132288825898972], ["Cutout", 0.6058471327881816, 0.6426901876150983]], [["Color", 0.3693970222695844, 0.038929141432555436], ["Equalize", 0.6228052875653781, 0.05064436511347281]], [["Color", 0.7172600331356893, 0.2824542634766688], ["Color", 0.425293116261649, 0.1796441283313972]], [["Cutout", 0.7539608428122959, 0.9896141728228921], ["Solarize", 0.17811081117364758, 0.9064195503634402]], [["AutoContrast", 0.6761242607012717, 0.6484842446399923], ["AutoContrast", 0.1978135076901828, 0.42166879492601317]], [["ShearY", 0.25901666379802524, 0.4770778270322449], ["Solarize", 0.7640963173407052, 0.7548463227094349]], [["TranslateY", 0.9222487731783499, 0.33658389819616463], ["Equalize", 0.9159112511468139, 0.8877136302394797]], [["TranslateX", 0.8994836977137054, 0.11036053676846591], ["Sharpness", 0.9040333410652747, 0.007266095214664592]], [["Invert", 0.627758632524958, 0.8075245097227242], ["Color", 0.7525387912148516, 0.05950239294733184]], [["TranslateX", 0.43505193292761857, 0.38108822876120796], ["TranslateY", 0.7432578052364004, 0.685678116134759]], [["Contrast", 0.9293507582470425, 0.052266842951356196], ["Posterize", 0.45187123977747456, 0.8228290399726782]], [["ShearX", 0.07240786542746291, 0.8945667925365756], ["Brightness", 0.5305443506561034, 0.12025274552427578]], [["Invert", 0.40157564448143335, 0.5364745514006678], ["Posterize", 0.3316124671813876, 0.43002413237035997]], [["ShearY", 0.7152314630009072, 0.1938339083417453], ["Invert", 0.14102478508140615, 0.41047623580174253]], [["Equalize", 0.19862832613849246, 0.5058521685279254], ["Sharpness", 0.16481208629549782, 0.29126323102770557]], [["Equalize", 0.6951591703541872, 0.7294822018800076], ["ShearX", 0.8726656726111219, 0.3151484225786487]], [["Rotate", 0.17234370554263745, 0.9356543193000078], ["TranslateX", 0.4954374070084091, 0.05496727345849217]], [["Contrast", 0.347405480122842, 0.831553005022885], ["ShearX", 0.28946367213071134, 0.11905898704394013]], [["Rotate", 0.28096672507990683, 0.16181284050307398], ["Color", 0.6554918515385365, 0.8739728050797386]], [["Solarize", 0.05408073374114053, 0.5357087283758337], ["Posterize", 0.42457175211495335, 0.051807130609045515]], [["TranslateY", 0.6216669362331361, 0.9691341207381867], ["Rotate", 0.9833579358130944, 0.12227426932415297]], [["AutoContrast", 0.7572619475282892, 0.8062834082727393], ["Contrast", 0.1447865402875591, 0.40242646573228436]], [["Rotate", 0.7035658783466086, 0.9840285268256428], ["Contrast", 0.04613961510519471, 0.7666683217450163]], [["TranslateX", 0.4580462177951252, 0.6448678609474686], ["AutoContrast", 0.14845695613708987, 0.1581134188537895]], [["Color", 0.06795037145259564, 0.9115552821158709], ["TranslateY", 0.9972953449677655, 0.6791016521791214]], [["Cutout", 0.3586908443690823, 0.11578558293480945], ["Color", 0.49083981719164294, 0.6924851425917189]], [["Brightness", 0.7994717831637873, 0.7887316255321768], ["Posterize", 0.01280463502435425, 0.2799086732858721]], [["ShearY", 0.6733451536131859, 0.8122332639516706], ["AutoContrast", 0.20433889615637357, 0.29023346867819966]], [["TranslateY", 0.709913512385177, 0.6538196931503809], ["Invert", 0.06629795606579203, 0.40913219547548296]], [["Sharpness", 0.4704559834362948, 0.4235993305308414], ["Equalize", 0.7578132044306966, 0.9388824249397175]], [["AutoContrast", 0.5281702802395268, 0.8077253610116979], ["Equalize", 0.856446858814119, 0.0479755681647559]], [["Color", 0.8244145826797791, 0.038409264586238945], ["Equalize", 0.4933123249234237, 0.8251940933672189]], [["TranslateX", 0.23949314158035084, 0.13576027004706692], ["ShearX", 0.8547563771688399, 0.8309262160483606]], [["Cutout", 0.4655680937486001, 0.2819807000622825], ["Contrast", 0.8439552665937905, 0.4843617871587037]], [["TranslateX", 0.19142454476784831, 0.7516148119169537], ["AutoContrast", 0.8677128351329768, 0.34967990912346336]], [["Contrast", 0.2997868299880966, 0.919508054854469], ["AutoContrast", 0.3003418493384957, 0.812314984368542]], [["Invert", 0.1070424236198183, 0.614674386498809], ["TranslateX", 0.5010973510899923, 0.20828478805259465]], [["Contrast", 0.6775882415611454, 0.6938564815591685], ["Cutout", 0.4814634264207498, 0.3086844939744179]], [["TranslateY", 0.939427105020265, 0.02531043619423201], ["Contrast", 0.793754257944812, 0.6676072472565451]], [["Sharpness", 0.09833672397575444, 0.5937214638292085], ["Rotate", 0.32530675291753763, 0.08302275740932441]], [["Sharpness", 0.3096455511562728, 0.6726732004553959], ["TranslateY", 0.43268997648796537, 0.8755012330217743]], [["ShearY", 0.9290771880324833, 0.22114736271319912], ["Equalize", 0.5520199288501478, 0.34269650332060553]], [["AutoContrast", 0.39763980746649374, 0.4597414582725454], ["Contrast", 0.941507852412761, 0.24991270562477041]], [["Contrast", 0.19419400547588095, 0.9127524785329233], ["Invert", 0.40544905179551727, 0.770081532844878]], [["Invert", 0.30473757368608334, 0.23534811781828846], ["Cutout", 0.26090722356706686, 0.5478390909877727]], [["Posterize", 0.49434361308057373, 0.05018423270527428], ["Color", 0.3041910676883317, 0.2603810415446437]], [["Invert", 0.5149061746764011, 0.9507449210221298], ["TranslateY", 0.4458076521892904, 0.8235358255774426]], [["Cutout", 0.7900006753351625, 0.905578861382507], ["Cutout", 0.6707153655762056, 0.8236715672258502]], [["Solarize", 0.8750534386579575, 0.10337670467100568], ["Posterize", 0.6102379615481381, 0.9264503915416868]], [["ShearY", 0.08448689377082852, 0.13981233725811626], ["TranslateX", 0.13979689669329498, 0.768774869872818]], [["TranslateY", 0.35752572266759985, 0.22827299847812488], ["Solarize", 0.3906957174236011, 0.5663314388307709]], [["ShearY", 0.29155240367061563, 0.8427516352971683], ["ShearX", 0.988825367441916, 0.9371258864857649]], [["Posterize", 0.3470780859769458, 0.5467686612321239], ["Rotate", 0.5758606274160093, 0.8843838082656007]], [["Cutout", 0.07825368363221841, 0.3230799425855425], ["Equalize", 0.2319163865298529, 0.42133965674727325]], [["Invert", 0.41972172597448654, 0.34618622513582953], ["ShearX", 0.33638469398198834, 0.9098575535928108]], [["Invert", 0.7322652233340448, 0.7747502957687412], ["Cutout", 0.9643121397298106, 0.7983335094634907]], [["TranslateY", 0.30039942808098496, 0.229018798182827], ["TranslateY", 0.27009499739380194, 0.6435577237846236]], [["Color", 0.38245274994070644, 0.7030758568461645], ["ShearX", 0.4429321461666281, 0.6963787864044149]], [["AutoContrast", 0.8432798685515605, 0.5775214369578088], ["Brightness", 0.7140899735355927, 0.8545854720117658]], [["Rotate", 0.14418935535613786, 0.5637968282213426], ["Color", 0.7115231912479835, 0.32584796564566776]], [["Sharpness", 0.4023501062807533, 0.4162097130412771], ["Brightness", 0.5536372686153666, 0.03004023273348777]], [["TranslateX", 0.7526053265574295, 0.5365938133399961], ["Cutout", 0.07914142706557492, 0.7544953091603148]], [["TranslateY", 0.6932934644882822, 0.5302211727137424], ["Invert", 0.5040606028391255, 0.6074863635108957]], [["Sharpness", 0.5013938602431629, 0.9572417724333157], ["TranslateY", 0.9160516359783026, 0.41798927975391675]], [["ShearY", 0.5130018836722556, 0.30209438428424185], ["Color", 0.15017170588500262, 0.20653495360587826]], [["TranslateX", 0.5293300090022314, 0.6407011888285266], ["Rotate", 0.4809817860439001, 0.3537850070371702]], [["Equalize", 0.42243081336551014, 0.13472721311046565], ["Posterize", 0.4700309639484068, 0.5197704360874883]], [["AutoContrast", 0.40674959899687235, 0.7312824868168921], ["TranslateX", 0.7397527975920833, 0.7068339877944815]], [["TranslateY", 0.5880995184787206, 0.41294111378078946], ["ShearX", 0.3181387627799316, 0.4810010147143413]], [["Color", 0.9898680233928507, 0.13241525577655167], ["Contrast", 0.9824932511238534, 0.5081145010853807]], [["Invert", 0.1591854062582687, 0.9760371953250404], ["Color", 0.9913399302056851, 0.8388709501056177]], [["Rotate", 0.6427451962231163, 0.9486793975292853], ["AutoContrast", 0.8501937877930463, 0.021326757974406196]], [["Contrast", 0.13611684531087598, 0.3050858709483848], ["Posterize", 0.06618644756084646, 0.8776928511951034]], [["TranslateX", 0.41021065663839407, 0.4965319749091702], ["Rotate", 0.07088831484595115, 0.4435516708223345]], [["Sharpness", 0.3151707977154323, 0.28275482520179296], ["Invert", 0.36980384682133804, 0.20813616084536624]], [["Cutout", 0.9979060206661017, 0.39712948644725854], ["Brightness", 0.42451052896163466, 0.942623075649937]], [["Equalize", 0.5300853308425644, 0.010183500830128867], ["AutoContrast", 0.06930788523716991, 0.5403125318991522]], [["Contrast", 0.010385458959237814, 0.2588311035539086], ["ShearY", 0.9347048553928764, 0.10439028366854963]], [["ShearY", 0.9867649486508592, 0.8409258132716434], ["ShearX", 0.48031199530836444, 0.7703375364614137]], [["ShearY", 0.04835889473136512, 0.2671081675890492], ["Brightness", 0.7856432618509617, 0.8032169570159564]], [["Posterize", 0.11112884927351185, 0.7116956530752987], ["TranslateY", 0.7339151092128607, 0.3331241226029017]], [["Invert", 0.13527036207875454, 0.8425980515358883], ["Color", 0.7836395778298139, 0.5517059252678862]], [["Sharpness", 0.012541163521491816, 0.013197550692292892], ["Invert", 0.6295957932861318, 0.43276521236056054]], [["AutoContrast", 0.7681480991225756, 0.3634284648496289], ["Brightness", 0.09708289828517969, 0.45016725043529726]], [["Brightness", 0.5839450499487329, 0.47525965678316795], ["Posterize", 0.43096581990183735, 0.9332382960125196]], [["Contrast", 0.9725334964552795, 0.9142902966863341], ["Contrast", 0.12376116410622995, 0.4355916974126801]], [["TranslateX", 0.8572708473690132, 0.02544522678265526], ["Sharpness", 0.37902120723460364, 0.9606092969833118]], [["TranslateY", 0.8907359001296927, 0.8011363927236099], ["Color", 0.7693777154407178, 0.0936768686746503]], [["Equalize", 0.0002657688243309364, 0.08190798535970034], ["Rotate", 0.5215478065240905, 0.5773519995038368]], [["TranslateY", 0.3383007813932477, 0.5733428274739165], ["Sharpness", 0.2436110797174722, 0.4757790814590501]], [["Cutout", 0.0957402176213592, 0.8914395928996034], ["Cutout", 0.4959915628586883, 0.25890349461645246]], [["AutoContrast", 0.594787300189186, 0.9627455357634459], ["ShearY", 0.5136027621132064, 0.10419602450259002]], [["Solarize", 0.4684077211553732, 0.6592850629431414], ["Sharpness", 0.2382385935956325, 0.6589291408243176]], [["Cutout", 0.4478786947325877, 0.6893616643143388], ["TranslateX", 0.2761781720270474, 0.21750622627277727]], [["Sharpness", 0.39476077929016484, 0.930902796668923], ["Cutout", 0.9073012208742808, 0.9881122386614257]], [["TranslateY", 0.0933719180021565, 0.7206252503441172], ["ShearX", 0.5151400441789256, 0.6307540083648309]], [["AutoContrast", 0.7772689258806401, 0.8159317013156503], ["AutoContrast", 0.5932793713915097, 0.05262217353927168]], [["Equalize", 0.38017352056118914, 0.8084724050448412], ["ShearY", 0.7239725628380852, 0.4246314890359326]], [["Cutout", 0.741157483503503, 0.13244380646497977], ["Invert", 0.03395378056675935, 0.7140036618098844]], [["Rotate", 0.0662727247460636, 0.7099861732415447], ["Rotate", 0.3168532707508249, 0.3553167425022127]], [["AutoContrast", 0.7429303516734129, 0.07117444599776435], ["Posterize", 0.5379537435918104, 0.807221330263993]], [["TranslateY", 0.9788586874795164, 0.7967243851346594], ["Invert", 0.4479103376922362, 0.04260360776727545]], [["Cutout", 0.28318121763188997, 0.7748680701406292], ["AutoContrast", 0.9109258369403016, 0.17126397858002085]], [["Color", 0.30183727885272027, 0.46718354750112456], ["TranslateX", 0.9628952256033627, 0.10269543754135535]], [["AutoContrast", 0.6316709389784041, 0.84287698792044], ["Brightness", 0.5544761629904337, 0.025264772745200004]], [["Rotate", 0.08803313299532567, 0.306059720523696], ["Invert", 0.5222165872425064, 0.045935208620454304]], [["TranslateY", 0.21912346831923835, 0.48529224559004436], ["TranslateY", 0.15466734731903942, 0.8929485418495068]], [["ShearX", 0.17141022847016563, 0.8607600402165531], ["ShearX", 0.6890511341106859, 0.7540899265679949]], [["Invert", 0.9417455522972059, 0.9021733684991224], ["Solarize", 0.7693107057723746, 0.7268007946568782]], [["Posterize", 0.02376991543373752, 0.6768442864453844], ["Rotate", 0.7736875065112697, 0.6706331753139825]], [["Contrast", 0.3623841610390669, 0.15023657344457686], ["Equalize", 0.32975472189318666, 0.05629246869510651]], [["Sharpness", 0.7874882420165824, 0.49535778020457066], ["Posterize", 0.09485578893387558, 0.6170768580482466]], [["Brightness", 0.7099280202949585, 0.021523012961427335], ["Posterize", 0.2076371467666719, 0.17168118578815206]], [["Color", 0.8546367645761538, 0.832011891505731], ["Equalize", 0.6429734783051777, 0.2618995960561532]], [["Rotate", 0.8780793721476224, 0.5920897827664297], ["ShearX", 0.5338303685064825, 0.8605424531336439]], [["Sharpness", 0.7504493806631884, 0.9723552387375258], ["Sharpness", 0.3206385634203266, 0.45127845905824693]], [["ShearX", 0.23794709526711355, 0.06257530645720066], ["Solarize", 0.9132374030587093, 0.6240819934824045]], [["Sharpness", 0.790583587969259, 0.28551171786655405], ["Contrast", 0.39872982844590554, 0.09644706751019538]], [["Equalize", 0.30681999237432944, 0.5645045018157916], ["Posterize", 0.525966242669736, 0.7360106111256014]], [["TranslateX", 0.4881014179825114, 0.6317220208872226], ["ShearX", 0.2935158995550958, 0.23104608987381758]], [["Rotate", 0.49977116738568395, 0.6610761068306319], ["TranslateY", 0.7396566602715687, 0.09386747830045217]], [["ShearY", 0.5909773790018789, 0.16229529902832718], ["Equalize", 0.06461394468918358, 0.6661349001143908]], [["TranslateX", 0.7218443721851834, 0.04435720302810153], ["Cutout", 0.986686540951642, 0.734771197038724]], [["ShearX", 0.5353800096911666, 0.8120139502148365], ["Equalize", 0.4613239578449774, 0.5159528929124512]], [["Color", 0.0871713897628631, 0.7708895183198486], ["Solarize", 0.5811386808912219, 0.35260648120785887]], [["Posterize", 0.3910857927477053, 0.4329219555775561], ["Color", 0.9115983668789468, 0.6043069944145293]], [["Posterize", 0.07493067637060635, 0.4258000066006725], ["AutoContrast", 0.4740957581389772, 0.49069587151651295]], [["Rotate", 0.34086200894268937, 0.9812149332288828], ["Solarize", 0.6801012471371733, 0.17271491146753837]], [["Color", 0.20542270872895207, 0.5532087457727624], ["Contrast", 0.2718692536563381, 0.20313287569510108]], [["Equalize", 0.05199827210980934, 0.0832859890912212], ["AutoContrast", 0.8092395764794107, 0.7778945136511004]], [["Sharpness", 0.1907689513066838, 0.7705754572256907], ["Color", 0.3911178658498049, 0.41791326933095485]], [["Solarize", 0.19611855804748257, 0.2407807485604081], ["AutoContrast", 0.5343964972940493, 0.9034209455548394]], [["Color", 0.43586520148538865, 0.4711164626521439], ["ShearY", 0.28635408186820555, 0.8417816793020271]], [["Cutout", 0.09818482420382535, 0.1649767430954796], ["Cutout", 0.34582392911178494, 0.3927982995799828]], [["ShearX", 0.001253882705272269, 0.48661629027584596], ["Solarize", 0.9229221435457137, 0.44374894836659073]], [["Contrast", 0.6829734655718668, 0.8201750485099037], ["Cutout", 0.7886756837648936, 0.8423285219631946]], [["TranslateY", 0.857017093561528, 0.3038537151773969], ["Invert", 0.12809228606383538, 0.23637166191748027]], [["Solarize", 0.9829027723424164, 0.9723093910674763], ["Color", 0.6346495302126811, 0.5405494753107188]], [["AutoContrast", 0.06868643520377715, 0.23758659417688077], ["AutoContrast", 0.6648225411500879, 0.5618315648260103]], [["Invert", 0.44202305603311676, 0.9945938909685547], ["Equalize", 0.7991650497684454, 0.16014142656347097]], [["AutoContrast", 0.8778631604769588, 0.03951977631894088], ["ShearY", 0.8495160088963707, 0.35771447321250416]], [["Color", 0.5365078341001592, 0.21102444169782308], ["ShearX", 0.7168869678248874, 0.3904298719872734]], [["TranslateX", 0.6517203786101899, 0.6467598990650437], ["Invert", 0.26552491504364517, 0.1210812827294625]], [["Posterize", 0.35196021684368994, 0.8420648319941891], ["Invert", 0.7796829363930631, 0.9520895999240896]], [["Sharpness", 0.7391572148971984, 0.4853940393452846], ["TranslateX", 0.7641915295592839, 0.6351349057666782]], [["Posterize", 0.18485880221115913, 0.6117603277356728], ["Rotate", 0.6541660490605724, 0.5704041108375348]], [["TranslateY", 0.27517423188070533, 0.6610080904072458], ["Contrast", 0.6091250547289317, 0.7702443247557892]], [["Equalize", 0.3611798581067118, 0.6623615672642768], ["TranslateX", 0.9537265090885917, 0.06352772509358584]], [["ShearX", 0.09720029389103535, 0.7800423126320308], ["Invert", 0.30314352455858884, 0.8519925470889914]], [["Brightness", 0.06931529763458055, 0.57760829499712], ["Cutout", 0.637251974467394, 0.7184346129191052]], [["AutoContrast", 0.5026722100286064, 0.32025257156541886], ["Contrast", 0.9667478703047919, 0.14178519432669368]], [["Equalize", 0.5924463845816984, 0.7187610262181517], ["TranslateY", 0.7059479079159405, 0.06551471830655187]], [["Sharpness", 0.18161164512332928, 0.7576138481173385], ["Brightness", 0.19191138767695282, 0.7865880269424701]], [["Brightness", 0.36780861866078696, 0.0677855546737901], ["AutoContrast", 0.8491446654142264, 0.09217782099938121]], [["TranslateY", 0.06011399855120858, 0.8374487034710264], ["TranslateY", 0.8373922962070498, 0.1991295720254297]], [["Posterize", 0.702559916122481, 0.30257509683007755], ["Rotate", 0.249899495398891, 0.9370437251176267]], [["ShearX", 0.9237874098232075, 0.26241907483351146], ["Brightness", 0.7221766836146657, 0.6880749752986671]], [["Cutout", 0.37994098189193104, 0.7836874473657957], ["ShearX", 0.9212861960976824, 0.8140948561570449]], [["Posterize", 0.2584098274786417, 0.7990847652004848], ["Invert", 0.6357731737590063, 0.1066304859116326]], [["Sharpness", 0.4412790857539922, 0.9692465283229825], ["Color", 0.9857401617339051, 0.26755393929808713]], [["Equalize", 0.22348671644912665, 0.7370019910830038], ["Posterize", 0.5396106339575417, 0.5559536849843303]], [["Equalize", 0.8742967663495852, 0.2797122599926307], ["Rotate", 0.4697322053105951, 0.8769872942579476]], [["Sharpness", 0.44279911640509206, 0.07729581896071613], ["Cutout", 0.3589177366154631, 0.2704031551235969]], [["TranslateX", 0.614216412574085, 0.47929659784170453], ["Brightness", 0.6686234118438007, 0.05700784068205689]], [["ShearY", 0.17920614630857634, 0.4699685075827862], ["Color", 0.38251870810870003, 0.7262706923005887]], [["Solarize", 0.4951799001144561, 0.212775278026479], ["TranslateX", 0.8666105646463097, 0.6750496637519537]], [["Color", 0.8110864170849051, 0.5154263861958484], ["Sharpness", 0.2489044083898776, 0.3763372541462343]], [["Cutout", 0.04888193613483871, 0.06041664638981603], ["Color", 0.06438587718683708, 0.5797881428892969]], [["Rotate", 0.032427448352152166, 0.4445797818376559], ["Posterize", 0.4459357828482998, 0.5879865187630777]], [["ShearX", 0.1617179557693058, 0.050796802246318884], ["Cutout", 0.8142465452060423, 0.3836391305618707]], [["TranslateY", 0.1806857249209416, 0.36697730355422675], ["Rotate", 0.9897576550818276, 0.7483432452225264]], [["Brightness", 0.18278016458098223, 0.952352527690299], ["Cutout", 0.3269735224453044, 0.3924869905012752]], [["ShearX", 0.870832707718742, 0.3214743207190739], ["Cutout", 0.6805560681792573, 0.6984188155282459]], [["TranslateX", 0.4157118388833776, 0.3964216288135384], ["TranslateX", 0.3253012682285006, 0.624835513104391]], [["Contrast", 0.7678168037628158, 0.31033802162621793], ["ShearX", 0.27022424855977134, 0.3773245605126201]], [["TranslateX", 0.37812621869017593, 0.7657993810740699], ["Rotate", 0.18081890120092914, 0.8893511219618171]], [["Posterize", 0.8735859716088367, 0.18243793043074286], ["TranslateX", 0.90435994250313, 0.24116383818819453]], [["Invert", 0.06666709253664793, 0.3881076083593933], ["TranslateX", 0.3783333964963522, 0.14411014979589543]], [["Equalize", 0.8741147867162096, 0.14203839235846816], ["TranslateX", 0.7801536758037405, 0.6952401607812743]], [["Cutout", 0.6095335117944475, 0.5679026063718094], ["Posterize", 0.06433868172233115, 0.07139559616012303]], [["TranslateY", 0.3020364047315408, 0.21459810361176246], ["Cutout", 0.7097677414888889, 0.2942144632587549]], [["Brightness", 0.8223662419048653, 0.195700694016108], ["Invert", 0.09345407040803999, 0.779843655582099]], [["TranslateY", 0.7353462929356228, 0.0468520680237382], ["Cutout", 0.36530918247940425, 0.3897292909049672]], [["Invert", 0.9676896451721213, 0.24473302189463453], ["Invert", 0.7369271521408992, 0.8193267003356975]], [["Sharpness", 0.8691871972054326, 0.4441713912682772], ["ShearY", 0.47385584832119887, 0.23521684584675429]], [["ShearY", 0.9266946026184021, 0.7611986713358834], ["TranslateX", 0.6195820760253926, 0.14661428669483678]], [["Sharpness", 0.08470870576026868, 0.3380219099907229], ["TranslateX", 0.3062343307496658, 0.7135777338095889]], [["Sharpness", 0.5246448204194909, 0.3193061215236702], ["ShearX", 0.8160637208508432, 0.9720697396582731]], [["Posterize", 0.5249259956549405, 0.3492042382504774], ["Invert", 0.8183138799547441, 0.11107271762524618]], [["TranslateY", 0.210869733350744, 0.7138905840721885], ["Sharpness", 0.7773226404450125, 0.8005353621959782]], [["Posterize", 0.33067522385556025, 0.32046239220630124], ["AutoContrast", 0.18918147708798405, 0.4646281070474484]], [["TranslateX", 0.929502026131094, 0.8029128121556285], ["Invert", 0.7319794306118105, 0.5421878712623392]], [["ShearX", 0.25645940834182723, 0.42754710760160963], ["ShearX", 0.44640695310173306, 0.8132185532296811]], [["Color", 0.018436846416536312, 0.8439313862001113], ["Sharpness", 0.3722867661453415, 0.5103570873163251]], [["TranslateX", 0.7285989086776543, 0.4809027697099264], ["TranslateY", 0.9740807004893643, 0.8241085438636939]], [["Posterize", 0.8721868989693397, 0.5700907310383815], ["Posterize", 0.4219074410577852, 0.8032643572845402]], [["Contrast", 0.9811380092558266, 0.8498397471632105], ["Sharpness", 0.8380884329421594, 0.18351306571903125]], [["TranslateY", 0.3878939366762001, 0.4699103438753077], ["Invert", 0.6055556353233807, 0.8774727658400134]], [["TranslateY", 0.052317005261018346, 0.39471450378745787], ["ShearX", 0.8612486845942395, 0.28834103278807466]], [["Color", 0.511993351208063, 0.07251427040525904], ["Solarize", 0.9898097047354855, 0.299761565689576]], [["Equalize", 0.2721248231619904, 0.6870975927455507], ["Cutout", 0.8787327242363994, 0.06228061428917098]], [["Invert", 0.8931880335225408, 0.49720931867378193], ["Posterize", 0.9619698792159256, 0.17859639696940088]], [["Posterize", 0.0061688075074411985, 0.08082938731035938], ["Brightness", 0.27745128028826993, 0.8638528796903816]], [["ShearY", 0.9140200609222026, 0.8240421430867707], ["Invert", 0.651734417415332, 0.08871906369930926]], [["Color", 0.45585010413511196, 0.44705070078574316], ["Color", 0.26394624901633146, 0.11242877788650807]], [["ShearY", 0.9200278466372522, 0.2995901331149652], ["Cutout", 0.8445407215116278, 0.7410524214287446]], [["ShearY", 0.9950483746990132, 0.112964468262847], ["ShearY", 0.4118332303218585, 0.44839613407553636]], [["Contrast", 0.7905821952255192, 0.23360046159385106], ["Posterize", 0.8611787233956044, 0.8984260048943528]], [["TranslateY", 0.21448061359312853, 0.8228112806838331], ["Contrast", 0.8992297266152983, 0.9179231590570998]], [["Invert", 0.3924194798946006, 0.31830516468371495], ["Rotate", 0.8399556845248508, 0.3764892022932781]], [["Cutout", 0.7037916990046816, 0.9214620769502728], ["AutoContrast", 0.02913794613018239, 0.07808607528954048]], [["ShearY", 0.6041490474263381, 0.6094184590800105], ["Equalize", 0.2932954517354919, 0.5840888946081727]], [["ShearX", 0.6056801676269449, 0.6948580442549543], ["Cutout", 0.3028001021044615, 0.15117101733894078]], [["Brightness", 0.8011486803860253, 0.18864079729374195], ["Solarize", 0.014965327213230961, 0.8842620292527029]], [["Invert", 0.902244007904273, 0.5634673798052033], ["Equalize", 0.13422913507398349, 0.4110956745883727]], [["TranslateY", 0.9981773319103838, 0.09568550987216096], ["Color", 0.7627662124105109, 0.8494409737419493]], [["Cutout", 0.3013527640416782, 0.03377226729898486], ["ShearX", 0.5727964831614619, 0.8784196638222834]], [["TranslateX", 0.6050722426803684, 0.3650103962378708], ["TranslateX", 0.8392084589130886, 0.6479816470292911]], [["Rotate", 0.5032806606500023, 0.09276980118866307], ["TranslateY", 0.7800234515261191, 0.18896454379343308]], [["Invert", 0.9266027256244017, 0.8246111062199752], ["Contrast", 0.12112023357797697, 0.33870762271759436]], [["Brightness", 0.8688784756993134, 0.17263759696106606], ["ShearX", 0.5133700431071326, 0.6686811994542494]], [["Invert", 0.8347840440941976, 0.03774897445901726], ["Brightness", 0.24925057499276548, 0.04293631677355758]], [["Color", 0.5998145279485104, 0.4820093200092529], ["TranslateY", 0.6709586184077769, 0.07377334081382858]], [["AutoContrast", 0.7898846202957984, 0.325293526672498], ["Contrast", 0.5156435596826767, 0.2889223168660645]], [["ShearX", 0.08147389674998307, 0.7978924681113669], ["Contrast", 0.7270003309106291, 0.009571215234092656]], [["Sharpness", 0.417607614440786, 0.9532566433338661], ["Posterize", 0.7186586546796782, 0.6936509907073302]], [["ShearX", 0.9555300215926675, 0.1399385550263872], ["Color", 0.9981041061848231, 0.5037462398323248]], [["Equalize", 0.8003487831375474, 0.5413759363796945], ["ShearY", 0.0026607045117773565, 0.019262273030984933]], [["TranslateY", 0.04845391502469176, 0.10063445212118283], ["Cutout", 0.8273170186786745, 0.5045257728554577]], [["TranslateX", 0.9690985344978033, 0.505202991815533], ["TranslateY", 0.7255326592928096, 0.02103609500701631]], [["Solarize", 0.4030771176836736, 0.8424237871457034], ["Cutout", 0.28705805963928965, 0.9601617893682582]], [["Sharpness", 0.16865290353070606, 0.6899673563468826], ["Posterize", 0.3985430034869616, 0.6540651997730774]], [["ShearY", 0.21395578485362032, 0.09519358818949009], ["Solarize", 0.6692821708524135, 0.6462523623552485]], [["AutoContrast", 0.912360598054091, 0.029800239085051583], ["Invert", 0.04319256403746308, 0.7712501517098587]], [["ShearY", 0.9081969961839055, 0.4581560239984739], ["AutoContrast", 0.5313894814729159, 0.5508393335751848]], [["ShearY", 0.860528568424097, 0.8196987216301588], ["Posterize", 0.41134650331494205, 0.3686632018978778]], [["AutoContrast", 0.8753670810078598, 0.3679438326304749], ["Invert", 0.010444228965415858, 0.9581244779208277]], [["Equalize", 0.07071836206680682, 0.7173594756186462], ["Brightness", 0.06111434312497388, 0.16175064669049277]], [["AutoContrast", 0.10522219073562122, 0.9768776621069855], ["TranslateY", 0.2744795945215529, 0.8577967957127298]], [["AutoContrast", 0.7628146493166175, 0.996157376418147], ["Contrast", 0.9255565598518469, 0.6826126662976868]], [["TranslateX", 0.017225816199011312, 0.2470332491402908], ["Solarize", 0.44048494909493807, 0.4492422515972162]], [["ShearY", 0.38885252627795064, 0.10272256704901939], ["Equalize", 0.686154959829183, 0.8973517148655337]], [["Rotate", 0.29628991573592967, 0.16639926575004715], ["ShearX", 0.9013782324726413, 0.0838318162771563]], [["Color", 0.04968391374688563, 0.6138600739645352], ["Invert", 0.11177127838716283, 0.10650198522261578]], [["Invert", 0.49655016367624016, 0.8603374164829688], ["ShearY", 0.40625439617553727, 0.4516437918820778]], [["TranslateX", 0.15015718916062992, 0.13867777502116208], ["Brightness", 0.3374464418810188, 0.7613355669536931]], [["Invert", 0.644644393321966, 0.19005804481199562], ["AutoContrast", 0.2293259789431853, 0.30335723256340186]], [["Solarize", 0.004968793254801596, 0.5370892072646645], ["Contrast", 0.9136902637865596, 0.9510587477779084]], [["Rotate", 0.38991518440867123, 0.24796987467455756], ["Sharpness", 0.9911180315669776, 0.5265657122981591]], [["Solarize", 0.3919646484436238, 0.6814994037194909], ["Sharpness", 0.4920838987787103, 0.023425724294012018]], [["TranslateX", 0.25107587874378867, 0.5414936560189212], ["Cutout", 0.7932919623814599, 0.9891303444820169]], [["Brightness", 0.07863012174272999, 0.045175652208389594], ["Solarize", 0.889609658064552, 0.8228793315963948]], [["Cutout", 0.20477096178169596, 0.6535063675027364], ["ShearX", 0.9216318577173639, 0.2908690977359947]], [["Contrast", 0.7035118947423187, 0.45982709058312454], ["Contrast", 0.7130268070749464, 0.8635123354235471]], [["Sharpness", 0.26319477541228997, 0.7451278726847078], ["Rotate", 0.8170499362173754, 0.13998593411788207]], [["Rotate", 0.8699365715164192, 0.8878057721750832], ["Equalize", 0.06682350555715044, 0.7164702080630689]], [["ShearY", 0.3137466057521987, 0.6747433496011368], ["Rotate", 0.42118828936218133, 0.980121180104441]], [["Solarize", 0.8470375049950615, 0.15287589264139223], ["Cutout", 0.14438435054693055, 0.24296463267973512]], [["TranslateY", 0.08822241792224905, 0.36163911974799356], ["TranslateY", 0.11729726813270003, 0.6230889726445291]], [["ShearX", 0.7720112337718541, 0.2773292905760122], ["Sharpness", 0.756290929398613, 0.27830353710507705]], [["Color", 0.33825031007968287, 0.4657590047522816], ["ShearY", 0.3566628994713067, 0.859750504071925]], [["TranslateY", 0.06830147433378053, 0.9348778582086664], ["TranslateX", 0.15509346516378553, 0.26320778885339435]], [["Posterize", 0.20266751150740858, 0.008351463842578233], ["Sharpness", 0.06506971109417259, 0.7294471760284555]], [["TranslateY", 0.6278911394418829, 0.8702181892620695], ["Invert", 0.9367073860264247, 0.9219230428944211]], [["Sharpness", 0.1553425337673321, 0.17601557714491345], ["Solarize", 0.7040449681338888, 0.08764313147327729]], [["Equalize", 0.6082233904624664, 0.4177428549911376], ["AutoContrast", 0.04987405274618151, 0.34516208204700916]], [["Brightness", 0.9616085936167699, 0.14561237331885468], ["Solarize", 0.8927707736296572, 0.31176907850205704]], [["Brightness", 0.6707778304730988, 0.9046457117525516], ["Brightness", 0.6801448953060988, 0.20015313057149042]], [["Color", 0.8292680845499386, 0.5181603879593888], ["Brightness", 0.08549161770369762, 0.6567870536463203]], [["ShearY", 0.267802208078051, 0.8388133819588173], ["Sharpness", 0.13453409120796123, 0.10028351311149486]], [["Posterize", 0.775796593610272, 0.05359034561289766], ["Cutout", 0.5067360625733027, 0.054451986840317934]], [["TranslateX", 0.5845238647690084, 0.7507147553486293], ["Brightness", 0.2642051786121197, 0.2578358927056452]], [["Cutout", 0.10787517610922692, 0.8147986902794228], ["Contrast", 0.2190149206329539, 0.902210615462459]], [["TranslateX", 0.5663614214181296, 0.05309965916414028], ["ShearX", 0.9682797885154938, 0.41791929533938466]], [["ShearX", 0.2345325577621098, 0.383780128037189], ["TranslateX", 0.7298083748149163, 0.644325797667087]], [["Posterize", 0.5138725709682734, 0.7901809917259563], ["AutoContrast", 0.7966018627776853, 0.14529337543427345]], [["Invert", 0.5973031989249785, 0.417399314592829], ["Solarize", 0.9147539948653116, 0.8221272315548086]], [["Posterize", 0.601596043336383, 0.18969646160963938], ["Color", 0.7527275484079655, 0.431793831326888]], [["Equalize", 0.6731483454430538, 0.7866786558207602], ["TranslateX", 0.97574396899191, 0.5970255778044692]], [["Cutout", 0.15919495850169718, 0.8916094305850562], ["Invert", 0.8351348834751027, 0.4029937360314928]], [["Invert", 0.5894085405226027, 0.7283806854157764], ["Brightness", 0.3973976860470554, 0.949681121498567]], [["AutoContrast", 0.3707914135327408, 0.21192068592079616], ["ShearX", 0.28040127351140676, 0.6754553511344856]], [["Solarize", 0.07955132378694896, 0.15073572961927306], ["ShearY", 0.5735850168851625, 0.27147326850217746]], [["Equalize", 0.678653949549764, 0.8097796067861455], ["Contrast", 0.2283048527510083, 0.15507804874474185]], [["Equalize", 0.286013868374536, 0.186785848694501], ["Posterize", 0.16319021740810458, 0.1201304443285659]], [["Sharpness", 0.9601590830563757, 0.06267915026513238], ["AutoContrast", 0.3813920685124327, 0.294224403296912]], [["Brightness", 0.2703246632402241, 0.9168405377492277], ["ShearX", 0.6156009855831097, 0.4955986055846403]], [["Color", 0.9065504424987322, 0.03393612216080133], ["ShearY", 0.6768595880405884, 0.9981068127818191]], [["Equalize", 0.28812842368483904, 0.300387487349145], ["ShearY", 0.28812248704858345, 0.27105076231533964]], [["Brightness", 0.6864882730513477, 0.8205553299102412], ["Cutout", 0.45995236371265424, 0.5422030370297759]], [["Color", 0.34941404877084326, 0.25857961830158516], ["AutoContrast", 0.3451390878441899, 0.5000938249040454]], [["Invert", 0.8268247541815854, 0.6691380821226468], ["Cutout", 0.46489193601530476, 0.22620873109485895]], [["Rotate", 0.17879730528062376, 0.22670425330593935], ["Sharpness", 0.8692795688221834, 0.36586055020855723]], [["Brightness", 0.31203975139659634, 0.6934046293010939], ["Cutout", 0.31649437872271236, 0.08078625004157935]], [["Cutout", 0.3119482836150119, 0.6397160035509996], ["Contrast", 0.8311248624784223, 0.22897510169718616]], [["TranslateX", 0.7631157841429582, 0.6482890521284557], ["Brightness", 0.12681196272427664, 0.3669813784257344]], [["TranslateX", 0.06027722649179801, 0.3101104512201861], ["Sharpness", 0.5652076706249394, 0.05210008400968136]], [["AutoContrast", 0.39213552101583127, 0.5047021194355596], ["ShearY", 0.7164003055682187, 0.8063370761002899]], [["Solarize", 0.9574307011238342, 0.21472064809226854], ["AutoContrast", 0.8102612285047174, 0.716870148067014]], [["Rotate", 0.3592634277567387, 0.6452602893051465], ["AutoContrast", 0.27188430331411506, 0.06003099168464854]], [["Cutout", 0.9529536554825503, 0.5285505311027461], ["Solarize", 0.08478231903311029, 0.15986449762728216]], [["TranslateY", 0.31176130458018936, 0.5642853506158253], ["Equalize", 0.008890883901317648, 0.5146121040955942]], [["Color", 0.40773645085566157, 0.7110398926612682], ["Color", 0.18233100156439364, 0.7830036002758337]], [["Posterize", 0.5793809197821732, 0.043748553135581236], ["Invert", 0.4479962016131668, 0.7349663010359488]], [["TranslateX", 0.1994882312299382, 0.05216859488899439], ["Rotate", 0.48288726352035416, 0.44713829026777585]], [["Posterize", 0.22122838185154603, 0.5034546841241283], ["TranslateX", 0.2538745835410222, 0.6129055170893385]], [["Color", 0.6786559960640814, 0.4529749369803212], ["Equalize", 0.30215879674415336, 0.8733394611096772]], [["Contrast", 0.47316062430673456, 0.46669538897311447], ["Invert", 0.6514906551984854, 0.3053339444067804]], [["Equalize", 0.6443202625334524, 0.8689731394616441], ["Color", 0.7549183794057628, 0.8889001426329578]], [["Solarize", 0.616709740662654, 0.7792180816399313], ["ShearX", 0.9659155537406062, 0.39436937531179495]], [["Equalize", 0.23694011299406226, 0.027711152164392128], ["TranslateY", 0.1677339686527083, 0.3482126536808231]], [["Solarize", 0.15234175951790285, 0.7893840414281341], ["TranslateX", 0.2396395768284183, 0.27727219214979715]], [["Contrast", 0.3792017455380605, 0.32323660409845334], ["Contrast", 0.1356037413846466, 0.9127772969992305]], [["ShearX", 0.02642732222284716, 0.9184662576502115], ["Equalize", 0.11504884472142995, 0.8957638893097964]], [["TranslateY", 0.3193812913345325, 0.8828100030493128], ["ShearY", 0.9374975727563528, 0.09909415611083694]], [["AutoContrast", 0.025840721736048122, 0.7941037581373024], ["TranslateY", 0.498518003323313, 0.5777122846572548]], [["ShearY", 0.6042199307830248, 0.44809668754508836], ["Cutout", 0.3243978207701482, 0.9379740926294765]], [["ShearY", 0.6858549297583574, 0.9993252035788924], ["Sharpness", 0.04682428732773203, 0.21698099707915652]], [["ShearY", 0.7737469436637263, 0.8810127181224531], ["ShearY", 0.8995655445246451, 0.4312416220354539]], [["TranslateY", 0.4953094136709374, 0.8144161580138571], ["Solarize", 0.26301211718928097, 0.518345311180405]], [["Brightness", 0.8820246486031275, 0.571075863786249], ["ShearX", 0.8586669146703955, 0.0060476383595142735]], [["Sharpness", 0.20519233710982254, 0.6144574759149729], ["Posterize", 0.07976625267460813, 0.7480145046726968]], [["ShearY", 0.374075419680195, 0.3386105402023202], ["ShearX", 0.8228083637082115, 0.5885174783155361]], [["Brightness", 0.3528780713814561, 0.6999884884306623], ["Sharpness", 0.3680348120526238, 0.16953358258959617]], [["Brightness", 0.24891223104442084, 0.7973853494920095], ["TranslateX", 0.004256803835524736, 0.0470216343108546]], [["Posterize", 0.1947344282646012, 0.7694802711054367], ["Cutout", 0.9594385534844785, 0.5469744140592429]], [["Invert", 0.19012504762806026, 0.7816140211434693], ["TranslateY", 0.17479746932338402, 0.024249345245078602]], [["Rotate", 0.9669262055946796, 0.510166180775991], ["TranslateX", 0.8990602034610352, 0.6657802719304693]], [["ShearY", 0.5453049050407278, 0.8476872739603525], ["Cutout", 0.14226529093962592, 0.15756960661106634]], [["Equalize", 0.5895291156113004, 0.6797218994447763], ["TranslateY", 0.3541442192192753, 0.05166001155849864]], [["Equalize", 0.39530681662726097, 0.8448335365081087], ["Brightness", 0.6785483272734143, 0.8805568647038574]], [["Cutout", 0.28633258271917905, 0.7750870268336066], ["Equalize", 0.7221097824537182, 0.5865506280531162]], [["Posterize", 0.9044429629421187, 0.4620266401793388], ["Invert", 0.1803008045494473, 0.8073190766288534]], [["Sharpness", 0.7054649148075851, 0.3877207948962055], ["TranslateX", 0.49260224225927285, 0.8987462620731029]], [["Sharpness", 0.11196934729294483, 0.5953704422694938], ["Contrast", 0.13969334315069737, 0.19310569898434204]], [["Posterize", 0.5484346101051778, 0.7914140118600685], ["Brightness", 0.6428044691630473, 0.18811316670808076]], [["Invert", 0.22294834094984717, 0.05173157689962704], ["Cutout", 0.6091129168510456, 0.6280845506243643]], [["AutoContrast", 0.5726444076195267, 0.2799840903601295], ["Cutout", 0.3055752727786235, 0.591639807512993]], [["Brightness", 0.3707116723204462, 0.4049175910826627], ["Rotate", 0.4811601625588309, 0.2710760253723644]], [["ShearY", 0.627791719653608, 0.6877498291550205], ["TranslateX", 0.8751753308366824, 0.011164650018719358]], [["Posterize", 0.33832547954522263, 0.7087039872581657], ["Posterize", 0.6247474435007484, 0.7707784192114796]], [["Contrast", 0.17620186308493468, 0.9946224854942095], ["Solarize", 0.5431896088395964, 0.5867904203742308]], [["ShearX", 0.4667959516719652, 0.8938082224109446], ["TranslateY", 0.7311343008292865, 0.6829842246020277]], [["ShearX", 0.6130281467237769, 0.9924010909612302], ["Brightness", 0.41039241699696916, 0.9753218875311392]], [["TranslateY", 0.0747250386427123, 0.34602725521067534], ["Rotate", 0.5902597465515901, 0.361094672021087]], [["Invert", 0.05234890878959486, 0.36914978664919407], ["Sharpness", 0.42140532878231374, 0.19204058551048275]], [["ShearY", 0.11590485361909497, 0.6518540857972316], ["Invert", 0.6482444740361704, 0.48256237896163945]], [["Rotate", 0.4931329446923608, 0.037076242417301675], ["Contrast", 0.9097939772412852, 0.5619594905306389]], [["Posterize", 0.7311032479626216, 0.4796364593912915], ["Color", 0.13912123993932402, 0.03997286439663705]], [["AutoContrast", 0.6196602944085344, 0.2531430457527588], ["Rotate", 0.5583937060431972, 0.9893379795224023]], [["AutoContrast", 0.8847753125072959, 0.19123028952580057], ["TranslateY", 0.494361716097206, 0.14232297727461696]], [["Invert", 0.6212360716340707, 0.033898871473033165], ["AutoContrast", 0.30839896957008295, 0.23603569542166247]], [["Equalize", 0.8255583546605049, 0.613736933157845], ["AutoContrast", 0.6357166629525485, 0.7894617347709095]], [["Brightness", 0.33840706322846814, 0.07917167871493658], ["ShearY", 0.15693175752528676, 0.6282773652129153]], [["Cutout", 0.7550520024859294, 0.08982367300605598], ["ShearX", 0.5844942417320858, 0.36051195083380105]]] + return p + + +def fa_reduced_imagenet(): + p = [[["ShearY", 0.14143816458479197, 0.513124791615952], ["Sharpness", 0.9290316227291179, 0.9788406212603302]], [["Color", 0.21502874228385338, 0.3698477943880306], ["TranslateY", 0.49865058747734736, 0.4352676987103321]], [["Brightness", 0.6603452126485386, 0.6990174510500261], ["Cutout", 0.7742953773992511, 0.8362550883640804]], [["Posterize", 0.5188375788270497, 0.9863648925446865], ["TranslateY", 0.8365230108655313, 0.6000972236440252]], [["ShearY", 0.9714994964711299, 0.2563663552809896], ["Equalize", 0.8987567223581153, 0.1181761775609772]], [["Sharpness", 0.14346409304565366, 0.5342189791746006], ["Sharpness", 0.1219714162835897, 0.44746801278319975]], [["TranslateX", 0.08089260772173967, 0.028011721602479833], ["TranslateX", 0.34767877352421406, 0.45131294688688794]], [["Brightness", 0.9191164585327378, 0.5143232242627864], ["Color", 0.9235247849934283, 0.30604586249462173]], [["Contrast", 0.4584173187505879, 0.40314219914942756], ["Rotate", 0.550289356406774, 0.38419022293237126]], [["Posterize", 0.37046156420799325, 0.052693291117634544], ["Cutout", 0.7597581409366909, 0.7535799791937421]], [["Color", 0.42583964114658746, 0.6776641859552079], ["ShearY", 0.2864805671096011, 0.07580175477739545]], [["Brightness", 0.5065952125552232, 0.5508640233704984], ["Brightness", 0.4760021616081475, 0.3544313318097987]], [["Posterize", 0.5169630851995185, 0.9466018906715961], ["Posterize", 0.5390336503396841, 0.1171015788193209]], [["Posterize", 0.41153170909576176, 0.7213063942615204], ["Rotate", 0.6232230424824348, 0.7291984098675746]], [["Color", 0.06704687234714028, 0.5278429246040438], ["Sharpness", 0.9146652195810183, 0.4581415618941407]], [["ShearX", 0.22404644446773492, 0.6508620171913467], ["Brightness", 0.06421961538672451, 0.06859528721039095]], [["Rotate", 0.29864103693134797, 0.5244313199644495], ["Sharpness", 0.4006161706584276, 0.5203708477368657]], [["AutoContrast", 0.5748186910788027, 0.8185482599354216], ["Posterize", 0.9571441684265188, 0.1921474117448481]], [["ShearY", 0.5214786760436251, 0.8375629059785009], ["Invert", 0.6872393349333636, 0.9307694335024579]], [["Contrast", 0.47219838080793364, 0.8228524484275648], ["TranslateY", 0.7435518856840543, 0.5888865560614439]], [["Posterize", 0.10773482839638836, 0.6597021018893648], ["Contrast", 0.5218466423129691, 0.562985661685268]], [["Rotate", 0.4401753067886466, 0.055198255925702475], ["Rotate", 0.3702153509335602, 0.5821574425474759]], [["TranslateY", 0.6714729117832363, 0.7145542887432927], ["Equalize", 0.0023263758097700205, 0.25837341854887885]], [["Cutout", 0.3159707561240235, 0.19539664199170742], ["TranslateY", 0.8702824829864558, 0.5832348977243467]], [["AutoContrast", 0.24800812729140026, 0.08017301277245716], ["Brightness", 0.5775505849482201, 0.4905904775616114]], [["Color", 0.4143517886294533, 0.8445937742921498], ["ShearY", 0.28688910858536587, 0.17539366839474402]], [["Brightness", 0.6341134194059947, 0.43683815933640435], ["Brightness", 0.3362277685899835, 0.4612826163288225]], [["Sharpness", 0.4504035748829761, 0.6698294470467474], ["Posterize", 0.9610055612671645, 0.21070714173174876]], [["Posterize", 0.19490421920029832, 0.7235798208354267], ["Rotate", 0.8675551331308305, 0.46335565746433094]], [["Color", 0.35097958351003306, 0.42199181561523186], ["Invert", 0.914112788087429, 0.44775583211984815]], [["Cutout", 0.223575616055454, 0.6328591417299063], ["TranslateY", 0.09269465212259387, 0.5101073959070608]], [["Rotate", 0.3315734525975911, 0.9983593458299167], ["Sharpness", 0.12245416662856974, 0.6258689139914664]], [["ShearY", 0.696116760180471, 0.6317805202283014], ["Color", 0.847501151593963, 0.4440116609830195]], [["Solarize", 0.24945891607225948, 0.7651150206105561], ["Cutout", 0.7229677092930331, 0.12674657348602494]], [["TranslateX", 0.43461945065713675, 0.06476571036747841], ["Color", 0.6139316940180952, 0.7376264330632316]], [["Invert", 0.1933003530637138, 0.4497819016184308], ["Invert", 0.18391634069983653, 0.3199769100951113]], [["Color", 0.20418296626476137, 0.36785101882029814], ["Posterize", 0.624658293920083, 0.8390081535735991]], [["Sharpness", 0.5864963540530814, 0.586672446690273], ["Posterize", 0.1980280647652339, 0.222114611452575]], [["Invert", 0.3543654961628104, 0.5146369635250309], ["Equalize", 0.40751271919434434, 0.4325310837291978]], [["ShearY", 0.22602859359451877, 0.13137880879778158], ["Posterize", 0.7475029061591305, 0.803900538461099]], [["Sharpness", 0.12426276165599924, 0.5965912716602046], ["Invert", 0.22603903038966913, 0.4346802001255868]], [["TranslateY", 0.010307035630661765, 0.16577665156754046], ["Posterize", 0.4114319141395257, 0.829872913683949]], [["TranslateY", 0.9353069865746215, 0.5327821671247214], ["Color", 0.16990443486261103, 0.38794866007484197]], [["Cutout", 0.1028174322829021, 0.3955952903458266], ["ShearY", 0.4311995281335693, 0.48024695395374734]], [["Posterize", 0.1800334334284686, 0.0548749478418862], ["Brightness", 0.7545808536793187, 0.7699080551646432]], [["Color", 0.48695305373084197, 0.6674269768464615], ["ShearY", 0.4306032279086781, 0.06057690550239343]], [["Brightness", 0.4919399683825053, 0.677338905806407], ["Brightness", 0.24112708387760828, 0.42761103121157656]], [["Posterize", 0.4434818644882532, 0.9489450593207714], ["Posterize", 0.40957675116385955, 0.015664946759584186]], [["Posterize", 0.41307949855153797, 0.6843276552020272], ["Rotate", 0.8003545094091291, 0.7002300783416026]], [["Color", 0.7038570031770905, 0.4697612983649519], ["Sharpness", 0.9700016496081002, 0.25185103545948884]], [["AutoContrast", 0.714641656154856, 0.7962423001719023], ["Sharpness", 0.2410097684093468, 0.5919171048019731]], [["TranslateX", 0.8101567644494714, 0.7156447005337443], ["Solarize", 0.5634727831229329, 0.8875158446846]], [["Sharpness", 0.5335258857303261, 0.364743126378182], ["Color", 0.453280875871377, 0.5621962714743068]], [["Cutout", 0.7423678127672542, 0.7726370777867049], ["Invert", 0.2806161382641934, 0.6021111986900146]], [["TranslateY", 0.15190341320343761, 0.3860373175487939], ["Cutout", 0.9980805818665679, 0.05332384819400854]], [["Posterize", 0.36518675678786605, 0.2935819027397963], ["TranslateX", 0.26586180351840005, 0.303641300745208]], [["Brightness", 0.19994509744377761, 0.90813953707639], ["Equalize", 0.8447217761297836, 0.3449396603478335]], [["Sharpness", 0.9294773669936768, 0.999713346583839], ["Brightness", 0.1359744825665662, 0.1658489221872924]], [["TranslateX", 0.11456529257659381, 0.9063795878367734], ["Equalize", 0.017438134319894553, 0.15776887259743755]], [["ShearX", 0.9833726383270114, 0.5688194948373335], ["Equalize", 0.04975615490994345, 0.8078130016227757]], [["Brightness", 0.2654654830488695, 0.8989789725280538], ["TranslateX", 0.3681535065952329, 0.36433345713161036]], [["Rotate", 0.04956524209892327, 0.5371942433238247], ["ShearY", 0.0005527499145153714, 0.56082571605602]], [["Rotate", 0.7918337108932019, 0.5906896260060501], ["Posterize", 0.8223967034091191, 0.450216998388943]], [["Color", 0.43595106766978337, 0.5253013785221605], ["Sharpness", 0.9169421073531799, 0.8439997639348893]], [["TranslateY", 0.20052300197155504, 0.8202662448307549], ["Sharpness", 0.2875792108435686, 0.6997181624527842]], [["Color", 0.10568089980973616, 0.3349467065132249], ["Brightness", 0.13070947282207768, 0.5757725013960775]], [["AutoContrast", 0.3749999712869779, 0.6665578760607657], ["Brightness", 0.8101178402610292, 0.23271946112218125]], [["Color", 0.6473605933679651, 0.7903409763232029], ["ShearX", 0.588080941572581, 0.27223524148254086]], [["Cutout", 0.46293361616697304, 0.7107761001833921], ["AutoContrast", 0.3063766931658412, 0.8026114219854579]], [["Brightness", 0.7884854981520251, 0.5503669863113797], ["Brightness", 0.5832456158675261, 0.5840349298921661]], [["Solarize", 0.4157539625058916, 0.9161905834309929], ["Sharpness", 0.30628197221802017, 0.5386291658995193]], [["Sharpness", 0.03329610069672856, 0.17066672983670506], ["Invert", 0.9900547302690527, 0.6276238841220477]], [["Solarize", 0.551015648982762, 0.6937104775938737], ["Color", 0.8838491591064375, 0.31596634380795385]], [["AutoContrast", 0.16224182418148447, 0.6068227969351896], ["Sharpness", 0.9599468096118623, 0.4885289719905087]], [["TranslateY", 0.06576432526133724, 0.6899544605400214], ["Posterize", 0.2177096480169678, 0.9949164789616582]], [["Solarize", 0.529820544480292, 0.7576047224165541], ["Sharpness", 0.027047878909321643, 0.45425231553970685]], [["Sharpness", 0.9102526010473146, 0.8311987141993857], ["Invert", 0.5191838751826638, 0.6906136644742229]], [["Solarize", 0.4762773516008588, 0.7703654263842423], ["Color", 0.8048437792602289, 0.4741523094238038]], [["Sharpness", 0.7095055508594206, 0.7047344238075169], ["Sharpness", 0.5059623654132546, 0.6127255499234886]], [["TranslateY", 0.02150725921966186, 0.3515764519224378], ["Posterize", 0.12482170119714735, 0.7829851754051393]], [["Color", 0.7983830079184816, 0.6964694521670339], ["Brightness", 0.3666527856286296, 0.16093151636495978]], [["AutoContrast", 0.6724982375829505, 0.536777706678488], ["Sharpness", 0.43091754837597646, 0.7363240924241439]], [["Brightness", 0.2889770401966227, 0.4556557902380539], ["Sharpness", 0.8805303296690755, 0.6262218017754902]], [["Sharpness", 0.5341939854581068, 0.6697109101429343], ["Rotate", 0.6806606655137529, 0.4896914517968317]], [["Sharpness", 0.5690509737059344, 0.32790632371915096], ["Posterize", 0.7951894258661069, 0.08377850335209162]], [["Color", 0.6124132978216081, 0.5756485920709012], ["Brightness", 0.33053544654445344, 0.23321841707002083]], [["TranslateX", 0.0654795026615917, 0.5227246924310244], ["ShearX", 0.2932320531132063, 0.6732066478183716]], [["Cutout", 0.6226071187083615, 0.01009274433736012], ["ShearX", 0.7176799968189801, 0.3758780240463811]], [["Rotate", 0.18172339508029314, 0.18099184896819184], ["ShearY", 0.7862658331645667, 0.295658135767252]], [["Contrast", 0.4156099177015862, 0.7015784500878446], ["Sharpness", 0.6454135310009, 0.32335858947955287]], [["Color", 0.6215885089922037, 0.6882673235388836], ["Brightness", 0.3539881732605379, 0.39486736455795496]], [["Invert", 0.8164816716866418, 0.7238192000817796], ["Sharpness", 0.3876355847343607, 0.9870077619731956]], [["Brightness", 0.1875628712629315, 0.5068115936257], ["Sharpness", 0.8732419122060423, 0.5028019258530066]], [["Sharpness", 0.6140734993408259, 0.6458239834366959], ["Rotate", 0.5250107862824867, 0.533419456933602]], [["Sharpness", 0.5710893143725344, 0.15551651073007305], ["ShearY", 0.6548487860151722, 0.021365083044319146]], [["Color", 0.7610250354649954, 0.9084452893074055], ["Brightness", 0.6934611792619156, 0.4108071412071374]], [["ShearY", 0.07512550098923898, 0.32923768385754293], ["ShearY", 0.2559588911696498, 0.7082337365398496]], [["Cutout", 0.5401319018926146, 0.004750568603408445], ["ShearX", 0.7473354415031975, 0.34472481968368773]], [["Rotate", 0.02284154583679092, 0.1353450082435801], ["ShearY", 0.8192458031684238, 0.2811653613473772]], [["Contrast", 0.21142896718139154, 0.7230739568811746], ["Sharpness", 0.6902690582665707, 0.13488436112901683]], [["Posterize", 0.21701219600958138, 0.5900695769640687], ["Rotate", 0.7541095031505971, 0.5341162375286219]], [["Posterize", 0.5772853064792737, 0.45808311743269936], ["Brightness", 0.14366050177823675, 0.4644871239446629]], [["Cutout", 0.8951718842805059, 0.4970074074310499], ["Equalize", 0.3863835903119882, 0.9986531042150006]], [["Equalize", 0.039411354473938925, 0.7475477254908457], ["Sharpness", 0.8741966378291861, 0.7304822679596362]], [["Solarize", 0.4908704265218634, 0.5160677350249471], ["Color", 0.24961813832742435, 0.09362352627360726]], [["Rotate", 7.870457075154214e-05, 0.8086950025500952], ["Solarize", 0.10200484521793163, 0.12312889222989265]], [["Contrast", 0.8052564975559727, 0.3403813036543645], ["Solarize", 0.7690158533600184, 0.8234626822018851]], [["AutoContrast", 0.680362728854513, 0.9415320040873628], ["TranslateY", 0.5305871824686941, 0.8030609611614028]], [["Cutout", 0.1748050257378294, 0.06565343731910589], ["TranslateX", 0.1812738872339903, 0.6254461448344308]], [["Brightness", 0.4230502644722749, 0.3346463682905031], ["ShearX", 0.19107198973659312, 0.6715789128604919]], [["ShearX", 0.1706528684548394, 0.7816570201200446], ["TranslateX", 0.494545185948171, 0.4710810058360291]], [["TranslateX", 0.42356251508933324, 0.23865307292867322], ["TranslateX", 0.24407503619326745, 0.6013778508137331]], [["AutoContrast", 0.7719512185744232, 0.3107905373009763], ["ShearY", 0.49448082925617176, 0.5777951230577671]], [["Cutout", 0.13026983827940525, 0.30120438757485657], ["Brightness", 0.8857896834516185, 0.7731541459513939]], [["AutoContrast", 0.6422800349197934, 0.38637401090264556], ["TranslateX", 0.25085431400995084, 0.3170642592664873]], [["Sharpness", 0.22336654455367122, 0.4137774852324138], ["ShearY", 0.22446851054920894, 0.518341735882535]], [["Color", 0.2597579403253848, 0.7289643913060193], ["Sharpness", 0.5227416670468619, 0.9239943674030637]], [["Cutout", 0.6835337711563527, 0.24777620448593812], ["AutoContrast", 0.37260245353051846, 0.4840361183247263]], [["Posterize", 0.32756602788628375, 0.21185124493743707], ["ShearX", 0.25431504951763967, 0.19585996561416225]], [["AutoContrast", 0.07930627591849979, 0.5719381348340309], ["AutoContrast", 0.335512380071304, 0.4208050118308541]], [["Rotate", 0.2924360268257798, 0.5317629242879337], ["Sharpness", 0.4531050021499891, 0.4102650087199528]], [["Equalize", 0.5908862210984079, 0.468742362277498], ["Brightness", 0.08571766548550425, 0.5629320703375056]], [["Cutout", 0.52751122383816, 0.7287774744737556], ["Equalize", 0.28721628275296274, 0.8075179887475786]], [["AutoContrast", 0.24208377391366226, 0.34616549409607644], ["TranslateX", 0.17454707403766834, 0.5278055700078459]], [["Brightness", 0.5511881924749478, 0.999638675514418], ["Equalize", 0.14076197797220913, 0.2573030693317552]], [["ShearX", 0.668731433926434, 0.7564253049646743], ["Color", 0.63235486543845, 0.43954436063340785]], [["ShearX", 0.40511960873276237, 0.5710419512142979], ["Contrast", 0.9256769948746423, 0.7461350716211649]], [["Cutout", 0.9995917204023061, 0.22908419326246265], ["TranslateX", 0.5440902956629469, 0.9965570051216295]], [["Color", 0.22552987172228894, 0.4514558960849747], ["Sharpness", 0.638058150559443, 0.9987829481002615]], [["Contrast", 0.5362775837534763, 0.7052133185951871], ["ShearY", 0.220369845547023, 0.7593922994775721]], [["ShearX", 0.0317785822935219, 0.775536785253455], ["TranslateX", 0.7939510227015061, 0.5355620618496535]], [["Cutout", 0.46027969917602196, 0.31561199122527517], ["Color", 0.06154066467629451, 0.5384660000729091]], [["Sharpness", 0.7205483743301113, 0.552222392539886], ["Posterize", 0.5146496404711752, 0.9224333144307473]], [["ShearX", 0.00014547730356910538, 0.3553954298642108], ["TranslateY", 0.9625736029090676, 0.57403418640424]], [["Posterize", 0.9199917903297341, 0.6690259107633706], ["Posterize", 0.0932558110217602, 0.22279303372106138]], [["Invert", 0.25401453476874863, 0.3354329544078385], ["Posterize", 0.1832673201325652, 0.4304718799821412]], [["TranslateY", 0.02084122674367607, 0.12826181437197323], ["ShearY", 0.655862534043703, 0.3838330909470975]], [["Contrast", 0.35231797644104523, 0.3379356652070079], ["Cutout", 0.19685599014304822, 0.1254328595280942]], [["Sharpness", 0.18795594984191433, 0.09488678946484895], ["ShearX", 0.33332876790679306, 0.633523782574133]], [["Cutout", 0.28267175940290246, 0.7901991550267817], ["Contrast", 0.021200195312951198, 0.4733128702798515]], [["ShearX", 0.966231043411256, 0.7700673327786812], ["TranslateX", 0.7102390777763321, 0.12161245817120675]], [["Cutout", 0.5183324259533826, 0.30766086003013055], ["Color", 0.48399078150128927, 0.4967477809069189]], [["Sharpness", 0.8160855187385873, 0.47937658961644], ["Posterize", 0.46360395447862535, 0.7685454058155061]], [["ShearX", 0.10173571421694395, 0.3987290690178754], ["TranslateY", 0.8939980277379345, 0.5669994143735713]], [["Posterize", 0.6768089584801844, 0.7113149244621721], ["Posterize", 0.054896856043358935, 0.3660837250743921]], [["AutoContrast", 0.5915576211896306, 0.33607718177676493], ["Contrast", 0.3809408206617828, 0.5712201773913784]], [["AutoContrast", 0.012321347472748323, 0.06379072432796573], ["Rotate", 0.0017964439160045656, 0.7598026295973337]], [["Contrast", 0.6007100085192627, 0.36171972473370206], ["Invert", 0.09553573684975913, 0.12218510774295901]], [["AutoContrast", 0.32848604643836266, 0.2619457656206414], ["Invert", 0.27082113532501784, 0.9967965642293485]], [["AutoContrast", 0.6156282120903395, 0.9422706516080884], ["Sharpness", 0.4215509247379262, 0.4063347716503587]], [["Solarize", 0.25059210436331264, 0.7215305521159305], ["Invert", 0.1654465185253614, 0.9605851884186778]], [["AutoContrast", 0.4464438610980994, 0.685334175815482], ["Cutout", 0.24358625461158645, 0.4699066834058694]], [["Rotate", 0.5931657741857909, 0.6813978655574067], ["AutoContrast", 0.9259100547738681, 0.4903201223870492]], [["Color", 0.8203976071280751, 0.9777824466585101], ["Posterize", 0.4620669369254169, 0.2738895968716055]], [["Contrast", 0.13754352055786848, 0.3369433962088463], ["Posterize", 0.48371187792441916, 0.025718004361451302]], [["Rotate", 0.5208233630704999, 0.1760188899913535], ["TranslateX", 0.49753461392937226, 0.4142935276250922]], [["Cutout", 0.5967418240931212, 0.8028675552639539], ["Cutout", 0.20021854152659121, 0.19426330549590076]], [["ShearY", 0.549583567386676, 0.6601326640171705], ["Cutout", 0.6111813470383047, 0.4141935587984994]], [["Brightness", 0.6354891977535064, 0.31591459747846745], ["AutoContrast", 0.7853952208711621, 0.6555861906702081]], [["AutoContrast", 0.7333725370546154, 0.9919410576081586], ["Cutout", 0.9984177877923588, 0.2938253683694291]], [["Color", 0.33219296307742263, 0.6378995578424113], ["AutoContrast", 0.15432820754183288, 0.7897899838932103]], [["Contrast", 0.5905289460222578, 0.8158577207653422], ["Cutout", 0.3980284381203051, 0.43030531250317217]], [["TranslateX", 0.452093693346745, 0.5251475931559115], ["Rotate", 0.991422504871258, 0.4556503729269001]], [["Color", 0.04560406292983776, 0.061574671308480766], ["Brightness", 0.05161079440128734, 0.6718398142425688]], [["Contrast", 0.02913302416506853, 0.14402056093217708], ["Rotate", 0.7306930378774588, 0.47088249057922094]], [["Solarize", 0.3283072384190169, 0.82680847744367], ["Invert", 0.21632614168418854, 0.8792241691482687]], [["Equalize", 0.4860808352478527, 0.9440534949023064], ["Cutout", 0.31395897639184694, 0.41805859306017523]], [["Rotate", 0.2816043232522335, 0.5451282807926706], ["Color", 0.7388520447173302, 0.7706503658143311]], [["Color", 0.9342776719536201, 0.9039981381514299], ["Rotate", 0.6646389177840164, 0.5147917008383647]], [["Cutout", 0.08929430082050335, 0.22416445996932374], ["Posterize", 0.454485751267457, 0.500958345348237]], [["TranslateX", 0.14674201106374488, 0.7018633472428202], ["Sharpness", 0.6128796723832848, 0.743535235614809]], [["TranslateX", 0.5189900164469432, 0.6491132403587601], ["Contrast", 0.26309555778227806, 0.5976857969656114]], [["Solarize", 0.23569808291972655, 0.3315781686591778], ["ShearY", 0.07292078937544964, 0.7460326987587573]], [["ShearY", 0.7090542757477153, 0.5246437008439621], ["Sharpness", 0.9666919148538443, 0.4841687888767071]], [["Solarize", 0.3486952615189488, 0.7012877201721799], ["Invert", 0.1933387967311534, 0.9535472742828175]], [["AutoContrast", 0.5393460721514914, 0.6924005011697713], ["Cutout", 0.16988156769247176, 0.3667207571712882]], [["Rotate", 0.5815329514554719, 0.5390406879316949], ["AutoContrast", 0.7370538341589625, 0.7708822194197815]], [["Color", 0.8463701017918459, 0.9893491045831084], ["Invert", 0.06537367901579016, 0.5238468509941635]], [["Contrast", 0.8099771812443645, 0.39371603893945184], ["Posterize", 0.38273629875646487, 0.46493786058573966]], [["Color", 0.11164686537114032, 0.6771450570033168], ["Posterize", 0.27921361289661406, 0.7214300893597819]], [["Contrast", 0.5958265906571906, 0.5963959447666958], ["Sharpness", 0.2640889223630885, 0.3365870842641453]], [["Color", 0.255634146724125, 0.5610029792926452], ["ShearY", 0.7476893976084721, 0.36613194760395557]], [["ShearX", 0.2167581882130063, 0.022978065071245002], ["TranslateX", 0.1686864409720319, 0.4919575435512007]], [["Solarize", 0.10702753776284957, 0.3954707963684698], ["Contrast", 0.7256100635368403, 0.48845259655719686]], [["Sharpness", 0.6165615058519549, 0.2624079463213861], ["ShearX", 0.3804820351860919, 0.4738994677544202]], [["TranslateX", 0.18066394808448177, 0.8174509422318228], ["Solarize", 0.07964569396290502, 0.45495935736800974]], [["Sharpness", 0.2741884021129658, 0.9311045302358317], ["Cutout", 0.0009101326429323388, 0.5932102256756948]], [["Rotate", 0.8501796375826188, 0.5092564038282137], ["Brightness", 0.6520146983999912, 0.724091283316938]], [["Brightness", 0.10079744898900078, 0.7644088017429471], ["AutoContrast", 0.33540215138213575, 0.1487538541758792]], [["ShearY", 0.10632545944757177, 0.9565164562996977], ["Rotate", 0.275833816849538, 0.6200731548023757]], [["Color", 0.6749819274397422, 0.41042188598168844], ["AutoContrast", 0.22396590966461932, 0.5048018491863738]], [["Equalize", 0.5044277111650255, 0.2649182381110667], ["Brightness", 0.35715133289571355, 0.8653260893016869]], [["Cutout", 0.49083594426355326, 0.5602781291093129], ["Posterize", 0.721795488514384, 0.5525847430754974]], [["Sharpness", 0.5081835448947317, 0.7453323423804428], ["TranslateX", 0.11511932212234266, 0.4337766796030984]], [["Solarize", 0.3817050641766593, 0.6879004573473403], ["Invert", 0.0015041436267447528, 0.9793134066888262]], [["AutoContrast", 0.5107410439697935, 0.8276720355454423], ["Cutout", 0.2786270701864015, 0.43993387208414564]], [["Rotate", 0.6711202569428987, 0.6342930903972932], ["Posterize", 0.802820231163559, 0.42770002619222053]], [["Color", 0.9426854321337312, 0.9055431782458764], ["AutoContrast", 0.3556422423506799, 0.2773922428787449]], [["Contrast", 0.10318991257659992, 0.30841372533347416], ["Posterize", 0.4202264962677853, 0.05060395018085634]], [["Invert", 0.549305630337048, 0.886056156681853], ["Cutout", 0.9314157033373055, 0.3485836940307909]], [["ShearX", 0.5642891775895684, 0.16427372934801418], ["Invert", 0.228741164726475, 0.5066345406806475]], [["ShearY", 0.5813123201003086, 0.33474363490586106], ["Equalize", 0.11803439432255824, 0.8583936440614798]], [["Sharpness", 0.1642809706111211, 0.6958675237301609], ["ShearY", 0.5989560762277414, 0.6194018060415276]], [["Rotate", 0.05092104774529638, 0.9358045394527796], ["Cutout", 0.6443254331615441, 0.28548414658857657]], [["Brightness", 0.6986036769232594, 0.9618046340942727], ["Sharpness", 0.5564490243465492, 0.6295231286085622]], [["Brightness", 0.42725649792574105, 0.17628028916784244], ["Equalize", 0.4425109360966546, 0.6392872650036018]], [["ShearY", 0.5758622795525444, 0.8773349286588288], ["ShearX", 0.038525646435423666, 0.8755366512394268]], [["Sharpness", 0.3704459924265827, 0.9236361456197351], ["Color", 0.6379842432311235, 0.4548767717224531]], [["Contrast", 0.1619523824549347, 0.4506528800882731], ["AutoContrast", 0.34513874426188385, 0.3580290330996726]], [["Contrast", 0.728699731513527, 0.6932238009822878], ["Brightness", 0.8602917375630352, 0.5341445123280423]], [["Equalize", 0.3574552353044203, 0.16814745124536548], ["Rotate", 0.24191717169379262, 0.3279497108179034]], [["ShearY", 0.8567478695576244, 0.37746117240238164], ["ShearX", 0.9654125389830487, 0.9283047610798827]], [["ShearY", 0.4339052480582405, 0.5394548246617406], ["Cutout", 0.5070570647967001, 0.7846286976687882]], [["AutoContrast", 0.021620100406875065, 0.44425839772845227], ["AutoContrast", 0.33978157614075183, 0.47716564815092244]], [["Contrast", 0.9727600659025666, 0.6651758819229426], ["Brightness", 0.9893133904996626, 0.39176397622636105]], [["Equalize", 0.283428620586305, 0.18727922861893637], ["Rotate", 0.3556063466797136, 0.3722839913107821]], [["ShearY", 0.7276172841941864, 0.4834188516302227], ["ShearX", 0.010783217950465884, 0.9756458772142235]], [["ShearY", 0.2901753295101581, 0.5684700238749064], ["Cutout", 0.655585564610337, 0.9490071307790201]], [["AutoContrast", 0.008507193981450278, 0.4881150103902877], ["AutoContrast", 0.6561989723231185, 0.3715071329838596]], [["Contrast", 0.7702505530948414, 0.6961371266519999], ["Brightness", 0.9953051630261895, 0.3861962467326121]], [["Equalize", 0.2805270012472756, 0.17715406116880994], ["Rotate", 0.3111256593947474, 0.15824352183820073]], [["Brightness", 0.9888680802094193, 0.4856236485253163], ["ShearX", 0.022370252047332284, 0.9284975906226682]], [["ShearY", 0.4065719044318099, 0.7468528006921563], ["AutoContrast", 0.19494427109708126, 0.8613186475174786]], [["AutoContrast", 0.023296727279367765, 0.9170949567425306], ["AutoContrast", 0.11663051100921168, 0.7908646792175343]], [["AutoContrast", 0.7335191671571732, 0.4958357308292425], ["Color", 0.7964964008349845, 0.4977687544324929]], [["ShearX", 0.19905221600021472, 0.3033081933150046], ["Equalize", 0.9383410219319321, 0.3224669877230161]], [["ShearX", 0.8265450331466404, 0.6509091423603757], ["Sharpness", 0.7134181178748723, 0.6472835976443643]], [["ShearY", 0.46962439525486044, 0.223433110541722], ["Rotate", 0.7749806946212373, 0.5337060376916906]], [["Posterize", 0.1652499695106796, 0.04860659068586126], ["Brightness", 0.6644577712782511, 0.4144528269429337]], [["TranslateY", 0.6220449565731829, 0.4917495676722932], ["Posterize", 0.6255000355409635, 0.8374266890984867]], [["AutoContrast", 0.4887160797052227, 0.7106426020530529], ["Sharpness", 0.7684218571497236, 0.43678474722954763]], [["Invert", 0.13178101535845366, 0.8301141976359813], ["Color", 0.002820877424219378, 0.49444413062487075]], [["TranslateX", 0.9920683666478188, 0.5862245842588877], ["Posterize", 0.5536357075855376, 0.5454300367281468]], [["Brightness", 0.8150181219663427, 0.1411060258870707], ["Sharpness", 0.8548823004164599, 0.77008691072314]], [["Brightness", 0.9580478020413399, 0.7198667636628974], ["ShearY", 0.8431585033377366, 0.38750016565010803]], [["Solarize", 0.2331505347152334, 0.25754361489084787], ["TranslateY", 0.447431373734262, 0.5782399531772253]], [["TranslateY", 0.8904927998691309, 0.25872872455072315], ["AutoContrast", 0.7129888139716263, 0.7161603231650524]], [["ShearY", 0.6336216800247362, 0.5247508616674911], ["Cutout", 0.9167315119726633, 0.2060557387978919]], [["ShearX", 0.001661782345968199, 0.3682225725445044], ["Solarize", 0.12303352043754572, 0.5014989548584458]], [["Brightness", 0.9723625105116246, 0.6555444729681099], ["Contrast", 0.5539208721135375, 0.7819973409318487]], [["Equalize", 0.3262607499912611, 0.0006745572802121513], ["Contrast", 0.35341551623767103, 0.36814689398886347]], [["ShearY", 0.7478539900243613, 0.37322078030129185], ["TranslateX", 0.41558847793529247, 0.7394615158544118]], [["Invert", 0.13735541232529067, 0.5536403864332143], ["Cutout", 0.5109718190377135, 0.0447509485253679]], [["AutoContrast", 0.09403602327274725, 0.5909250807862687], ["ShearY", 0.53234060616395, 0.5316981359469398]], [["ShearX", 0.5651922367876323, 0.6794110241313183], ["Posterize", 0.7431624856363638, 0.7896861463783287]], [["Brightness", 0.30949179379286806, 0.7650569096019195], ["Sharpness", 0.5461629122105034, 0.6814369444005866]], [["Sharpness", 0.28459340191768434, 0.7802208350806028], ["Rotate", 0.15097973114238117, 0.5259683294104645]], [["ShearX", 0.6430803693700531, 0.9333735880102375], ["Contrast", 0.7522209520030653, 0.18831747966185058]], [["Contrast", 0.4219455937915647, 0.29949769435499646], ["Color", 0.6925322933509542, 0.8095523885795443]], [["ShearX", 0.23553236193043048, 0.17966207900468323], ["AutoContrast", 0.9039700567886262, 0.21983629944639108]], [["ShearX", 0.19256223146671514, 0.31200739880443584], ["Sharpness", 0.31962196883294713, 0.6828107668550425]], [["Cutout", 0.5947690279080912, 0.21728220253899178], ["Rotate", 0.6757188879871141, 0.489460599679474]], [["ShearY", 0.18365897125470526, 0.3988571115918058], ["Brightness", 0.7727489489504, 0.4790369956329955]], [["Contrast", 0.7090301084131432, 0.5178303607560537], ["ShearX", 0.16749258277688506, 0.33061773301592356]], [["ShearX", 0.3706690885419934, 0.38510677124319415], ["AutoContrast", 0.8288356276501032, 0.16556487668770264]], [["TranslateY", 0.16758043046445614, 0.30127092823893986], ["Brightness", 0.5194636577132354, 0.6225165310621702]], [["Cutout", 0.6087289363049726, 0.10439287037803044], ["Rotate", 0.7503452083033819, 0.7425316019981433]], [["ShearY", 0.24347189588329932, 0.5554979486672325], ["Brightness", 0.9468115239174161, 0.6132449358023568]], [["Brightness", 0.7144508395807994, 0.4610594769966929], ["ShearX", 0.16466683833092968, 0.3382903812375781]], [["Sharpness", 0.27743648684265465, 0.17200038071656915], ["Color", 0.47404262107546236, 0.7868991675614725]], [["Sharpness", 0.8603993513633618, 0.324604728411791], ["TranslateX", 0.3331597130403763, 0.9369586812977804]], [["Color", 0.1535813630595832, 0.4700116846558207], ["Color", 0.5435647971896318, 0.7639291483525243]], [["Brightness", 0.21486188101947656, 0.039347277341450576], ["Cutout", 0.7069526940684954, 0.39273934115015696]], [["ShearY", 0.7267130888840517, 0.6310800726389485], ["AutoContrast", 0.662163190824139, 0.31948540372237766]], [["ShearX", 0.5123132117185981, 0.1981015909438834], ["AutoContrast", 0.9009347363863067, 0.26790399126924036]], [["Brightness", 0.24245061453231648, 0.2673478678291436], ["ShearX", 0.31707976089283946, 0.6800582845544948]], [["Cutout", 0.9257780138367764, 0.03972673526848819], ["Rotate", 0.6807858944518548, 0.46974332280612097]], [["ShearY", 0.1543443071262312, 0.6051682587030671], ["Brightness", 0.9758203119828304, 0.4941406868162414]], [["Contrast", 0.07578049236491124, 0.38953819133407647], ["ShearX", 0.20194918288164293, 0.4141510791947318]], [["Color", 0.27826402243792286, 0.43517491081531157], ["AutoContrast", 0.6159269026143263, 0.2021846783488046]], [["AutoContrast", 0.5039377966534692, 0.19241507605941105], ["Invert", 0.5563931144385394, 0.7069728937319112]], [["Sharpness", 0.19031632433810566, 0.26310171056096743], ["Color", 0.4724537593175573, 0.6715201448387876]], [["ShearY", 0.2280910467786642, 0.33340559088059313], ["ShearY", 0.8858560034869303, 0.2598627441471076]], [["ShearY", 0.07291814128021593, 0.5819462692986321], ["Cutout", 0.27605696060512147, 0.9693427371868695]], [["Posterize", 0.4249871586563321, 0.8256952014328607], ["Posterize", 0.005907466926447169, 0.8081353382152597]], [["Brightness", 0.9071305290601128, 0.4781196213717954], ["Posterize", 0.8996214311439275, 0.5540717376630279]], [["Brightness", 0.06560728936236392, 0.9920627849065685], ["TranslateX", 0.04530789794044952, 0.5318568944702607]], [["TranslateX", 0.6800263601084814, 0.4611536772507228], ["Rotate", 0.7245888375283157, 0.0914772551375381]], [["Sharpness", 0.879556061897963, 0.42272481462067535], ["TranslateX", 0.4600350422524085, 0.5742175429334919]], [["AutoContrast", 0.5005776243176145, 0.22597121331684505], ["Invert", 0.10763286370369299, 0.6841782704962373]], [["Sharpness", 0.7422908472000116, 0.6850324203882405], ["TranslateX", 0.3832914614128403, 0.34798646673324896]], [["ShearY", 0.31939465302679326, 0.8792088167639516], ["Brightness", 0.4093604352811235, 0.21055483197261338]], [["AutoContrast", 0.7447595860998638, 0.19280222555998586], ["TranslateY", 0.317754779431227, 0.9983454520593591]], [["Equalize", 0.27706973689750847, 0.6447455020660622], ["Contrast", 0.5626579126863761, 0.7920049962776781]], [["Rotate", 0.13064369451773816, 0.1495367590684905], ["Sharpness", 0.24893941981801215, 0.6295943894521504]], [["ShearX", 0.6856269993063254, 0.5167938584189854], ["Sharpness", 0.24835352574609537, 0.9990550493102627]], [["AutoContrast", 0.461654115871693, 0.43097388896245004], ["Cutout", 0.366359682416437, 0.08011826474215511]], [["AutoContrast", 0.993892672935951, 0.2403608711236933], ["ShearX", 0.6620817870694181, 0.1744814077869482]], [["ShearY", 0.6396747719986443, 0.15031017143644265], ["Brightness", 0.9451954879495629, 0.26490678840264714]], [["Color", 0.19311480787397262, 0.15712300697448575], ["Posterize", 0.05391448762015258, 0.6943963643155474]], [["Sharpness", 0.6199669674684085, 0.5412492335319072], ["Invert", 0.14086213450149815, 0.2611850277919339]], [["Posterize", 0.5533129268803405, 0.5332478159319912], ["ShearX", 0.48956244029096635, 0.09223930853562916]], [["ShearY", 0.05871590849449765, 0.19549715278943228], ["TranslateY", 0.7208521362741379, 0.36414003004659434]], [["ShearY", 0.7316263417917531, 0.0629747985768501], ["Contrast", 0.036359793501448245, 0.48658745414898386]], [["Rotate", 0.3301497610942963, 0.5686622043085637], ["ShearX", 0.40581487555676843, 0.5866127743850192]], [["ShearX", 0.6679039628249283, 0.5292270693200821], ["Sharpness", 0.25901391739310703, 0.9778360586541461]], [["AutoContrast", 0.27373222012596854, 0.14456771405730712], ["Contrast", 0.3877220783523938, 0.7965158941894336]], [["Solarize", 0.29440905483979096, 0.06071633809388455], ["Equalize", 0.5246736285116214, 0.37575084834661976]], [["TranslateY", 0.2191269464520395, 0.7444942293988484], ["Posterize", 0.3840878524812771, 0.31812671711741247]], [["Solarize", 0.25159267140731356, 0.5833264622559661], ["Brightness", 0.07552262572348738, 0.33210648549288435]], [["AutoContrast", 0.9770099298399954, 0.46421915310428197], ["AutoContrast", 0.04707358934642503, 0.24922048012183493]], [["Cutout", 0.5379685806621965, 0.02038212605928355], ["Brightness", 0.5900728303717965, 0.28807872931416956]], [["Sharpness", 0.11596624872886108, 0.6086947716949325], ["AutoContrast", 0.34876470059667525, 0.22707897759730578]], [["Contrast", 0.276545513135698, 0.8822580384226156], ["Rotate", 0.04874027684061846, 0.6722214281612163]], [["ShearY", 0.595839851757025, 0.4389866852785822], ["Equalize", 0.5225492356128832, 0.2735290854063459]], [["Sharpness", 0.9918029636732927, 0.9919926583216121], ["Sharpness", 0.03672376137997366, 0.5563865980047012]], [["AutoContrast", 0.34169589759999847, 0.16419911552645738], ["Invert", 0.32995953043129234, 0.15073174739720568]], [["Posterize", 0.04600255098477292, 0.2632612790075844], ["TranslateY", 0.7852153329831825, 0.6990722310191976]], [["AutoContrast", 0.4414653815356372, 0.2657468780017082], ["Posterize", 0.30647061536763337, 0.3688222724948656]], [["Contrast", 0.4239361091421837, 0.6076562806342001], ["Cutout", 0.5780707784165284, 0.05361325256745192]], [["Sharpness", 0.7657895907855394, 0.9842407321667671], ["Sharpness", 0.5416352696151596, 0.6773681575200902]], [["AutoContrast", 0.13967381098331305, 0.10787258006315015], ["Posterize", 0.5019536507897069, 0.9881978222469807]], [["Brightness", 0.030528346448984903, 0.31562058762552847], ["TranslateY", 0.0843808140595676, 0.21019213305350526]], [["AutoContrast", 0.6934579165006736, 0.2530484168209199], ["Rotate", 0.0005751408130693636, 0.43790043943210005]], [["TranslateX", 0.611258547664328, 0.25465240215894935], ["Sharpness", 0.5001446909868196, 0.36102204109889413]], [["Contrast", 0.8995127327150193, 0.5493190695343996], ["Brightness", 0.242708780669213, 0.5461116653329015]], [["AutoContrast", 0.3751825351022747, 0.16845985803896962], ["Cutout", 0.25201103287363663, 0.0005893331783358435]], [["ShearX", 0.1518985779435941, 0.14768180777304504], ["Color", 0.85133530274324, 0.4006641163378305]], [["TranslateX", 0.5489668255504668, 0.4694591826554948], ["Rotate", 0.1917354490155893, 0.39993269385802177]], [["ShearY", 0.6689267479532809, 0.34304285013663577], ["Equalize", 0.24133154048883143, 0.279324043138247]], [["Contrast", 0.3412544002099494, 0.20217358823930232], ["Color", 0.8606984790510235, 0.14305503544676373]], [["Cutout", 0.21656155695311988, 0.5240101349572595], ["Brightness", 0.14109877717636352, 0.2016827341210295]], [["Sharpness", 0.24764371218833872, 0.19655480259925423], ["Posterize", 0.19460398862039913, 0.4975414350200679]], [["Brightness", 0.6071850094982323, 0.7270716448607151], ["Solarize", 0.111786402398499, 0.6325641684614275]], [["Contrast", 0.44772949532200856, 0.44267502710695955], ["AutoContrast", 0.360117506402693, 0.2623958228760273]], [["Sharpness", 0.8888131688583053, 0.936897400764746], ["Sharpness", 0.16080674198274894, 0.5681119841445879]], [["AutoContrast", 0.8004456226590612, 0.1788600469525269], ["Brightness", 0.24832285390647374, 0.02755350284841604]], [["ShearY", 0.06910320102646594, 0.26076407321544054], ["Contrast", 0.8633703022354964, 0.38968514704043056]], [["AutoContrast", 0.42306251382780613, 0.6883260271268138], ["Rotate", 0.3938724346852023, 0.16740881249086037]], [["Contrast", 0.2725343884286728, 0.6468194318074759], ["Sharpness", 0.32238942646494745, 0.6721149242783824]], [["AutoContrast", 0.942093919956842, 0.14675331481712853], ["Posterize", 0.5406276708262192, 0.683901182218153]], [["Cutout", 0.5386811894643584, 0.04498833938429728], ["Posterize", 0.17007257321724775, 0.45761177118620633]], [["Contrast", 0.13599408935104654, 0.53282738083886], ["Solarize", 0.26941667995081114, 0.20958261079465895]], [["Color", 0.6600788518606634, 0.9522228302165842], ["Invert", 0.0542722262516899, 0.5152431169321683]], [["Contrast", 0.5328934819727553, 0.2376220512388278], ["Posterize", 0.04890422575781711, 0.3182233123739474]], [["AutoContrast", 0.9289628064340965, 0.2976678437448435], ["Color", 0.20936893798507963, 0.9649612821434217]], [["Cutout", 0.9019423698575457, 0.24002036989728096], ["Brightness", 0.48734445615892974, 0.047660899809176316]], [["Sharpness", 0.09347824275711591, 0.01358686275590612], ["Posterize", 0.9248539660538934, 0.4064232632650468]], [["Brightness", 0.46575675383704634, 0.6280194775484345], ["Invert", 0.17276207634499413, 0.21263495428839635]], [["Brightness", 0.7238014711679732, 0.6178946027258592], ["Equalize", 0.3815496086340364, 0.07301281068847276]], [["Contrast", 0.754557393588416, 0.895332753570098], ["Color", 0.32709957750707447, 0.8425486003491515]], [["Rotate", 0.43406698081696576, 0.28628263254953723], ["TranslateY", 0.43949548709125374, 0.15927082198238685]], [["Brightness", 0.0015838339831640708, 0.09341692553352654], ["AutoContrast", 0.9113966907329718, 0.8345900469751112]], [["ShearY", 0.46698796308585017, 0.6150701348176804], ["Invert", 0.14894062704815722, 0.2778388046184728]], [["Color", 0.30360499169455957, 0.995713092016834], ["Contrast", 0.2597016288524961, 0.8654420870658932]], [["Brightness", 0.9661642031891435, 0.7322006407169436], ["TranslateY", 0.4393502786333408, 0.33934762664274265]], [["Color", 0.9323638351992302, 0.912776309755293], ["Brightness", 0.1618274755371618, 0.23485741708056307]], [["Color", 0.2216470771158821, 0.3359240197334976], ["Sharpness", 0.6328691811471494, 0.6298393874452548]], [["Solarize", 0.4772769142265505, 0.7073470698713035], ["ShearY", 0.2656114148206966, 0.31343097010487253]], [["Solarize", 0.3839017339304234, 0.5985505779429036], ["Equalize", 0.002412059429196589, 0.06637506181196245]], [["Contrast", 0.12751196553017863, 0.46980311434237976], ["Sharpness", 0.3467487455865491, 0.4054907610444406]], [["AutoContrast", 0.9321813669127206, 0.31328471589533274], ["Rotate", 0.05801738717432747, 0.36035756254444273]], [["TranslateX", 0.52092390458353, 0.5261722561643886], ["Contrast", 0.17836804476171306, 0.39354333443158535]], [["Posterize", 0.5458100909925713, 0.49447244994482603], ["Brightness", 0.7372536822363605, 0.5303409097463796]], [["Solarize", 0.1913974941725724, 0.5582966653986761], ["Equalize", 0.020733669175727026, 0.9377467166472878]], [["Equalize", 0.16265732137763889, 0.5206282340874929], ["Sharpness", 0.2421533133595281, 0.506389065871883]], [["AutoContrast", 0.9787324801448523, 0.24815051941486466], ["Rotate", 0.2423487151245957, 0.6456493129745148]], [["TranslateX", 0.6809867726670327, 0.6949687002397612], ["Contrast", 0.16125673359747458, 0.7582679978218987]], [["Posterize", 0.8212000950994955, 0.5225012157831872], ["Brightness", 0.8824891856626245, 0.4499216779709508]], [["Solarize", 0.12061313332505218, 0.5319371283368052], ["Equalize", 0.04120865969945108, 0.8179402157299602]], [["Rotate", 0.11278256686005855, 0.4022686554165438], ["ShearX", 0.2983451019112792, 0.42782525461812604]], [["ShearY", 0.8847385513289983, 0.5429227024179573], ["Rotate", 0.21316428726607445, 0.6712120087528564]], [["TranslateX", 0.46448081241068717, 0.4746090648963252], ["Brightness", 0.19973580961271142, 0.49252862676553605]], [["Posterize", 0.49664100539481526, 0.4460713166484651], ["Brightness", 0.6629559985581529, 0.35192346529003693]], [["Color", 0.22710733249173676, 0.37943185764616194], ["ShearX", 0.015809774971472595, 0.8472080190835669]], [["Contrast", 0.4187366322381491, 0.21621979869256666], ["AutoContrast", 0.7631045030367304, 0.44965231251615134]], [["Sharpness", 0.47240637876720515, 0.8080091811749525], ["Cutout", 0.2853425420104144, 0.6669811510150936]], [["Posterize", 0.7830320527127324, 0.2727062685529881], ["Solarize", 0.527834000867504, 0.20098218845222998]], [["Contrast", 0.366380535288225, 0.39766001659663075], ["Cutout", 0.8708808878088891, 0.20669525734273086]], [["ShearX", 0.6815427281122932, 0.6146858582671569], ["AutoContrast", 0.28330622372053493, 0.931352024154997]], [["AutoContrast", 0.8668174463154519, 0.39961453880632863], ["AutoContrast", 0.5718557712359253, 0.6337062930797239]], [["ShearY", 0.8923152519411871, 0.02480062504737446], ["Cutout", 0.14954159341231515, 0.1422219808492364]], [["Rotate", 0.3733718175355636, 0.3861928572224287], ["Sharpness", 0.5651126520194574, 0.6091103847442831]], [["Posterize", 0.8891714191922857, 0.29600154265251016], ["TranslateY", 0.7865351723963945, 0.5664998548985523]], [["TranslateX", 0.9298214806998273, 0.729856565052017], ["AutoContrast", 0.26349082482341846, 0.9638882609038888]], [["Sharpness", 0.8387378377527128, 0.42146721129032494], ["AutoContrast", 0.9860522000876452, 0.4200699464169384]], [["ShearY", 0.019609159303115145, 0.37197835936879514], ["Cutout", 0.22199340461754258, 0.015932573201085848]], [["Rotate", 0.43871085583928443, 0.3283504258860078], ["Sharpness", 0.6077702068037776, 0.6830305349618742]], [["Contrast", 0.6160211756538094, 0.32029451083389626], ["Cutout", 0.8037631428427006, 0.4025688837399259]], [["TranslateY", 0.051637820936985435, 0.6908417834391846], ["Sharpness", 0.7602756948473368, 0.4927111506643095]], [["Rotate", 0.4973618638052235, 0.45931479729281227], ["TranslateY", 0.04701789716427618, 0.9408779705948676]], [["Rotate", 0.5214194592768602, 0.8371249272013652], ["Solarize", 0.17734812472813338, 0.045020798970228315]], [["ShearX", 0.7457999920079351, 0.19025612553075893], ["Sharpness", 0.5994846101703786, 0.5665094068864229]], [["Contrast", 0.6172655452900769, 0.7811432139704904], ["Cutout", 0.09915620454670282, 0.3963692287596121]], [["TranslateX", 0.2650112299235817, 0.7377261946165307], ["AutoContrast", 0.5019539734059677, 0.26905046992024506]], [["Contrast", 0.6646299821370135, 0.41667784809592945], ["Cutout", 0.9698457154992128, 0.15429001887703997]], [["Sharpness", 0.9467079029475773, 0.44906457469098204], ["Cutout", 0.30036908747917396, 0.4766149689663106]], [["Equalize", 0.6667517691051055, 0.5014839828447363], ["Solarize", 0.4127890336820831, 0.9578274770236529]], [["Cutout", 0.6447384874120834, 0.2868806107728985], ["Cutout", 0.4800990488106021, 0.4757538246206956]], [["Solarize", 0.12560195032363236, 0.5557473475801568], ["Equalize", 0.019957161871490228, 0.5556797187823773]], [["Contrast", 0.12607637375759484, 0.4300633627435161], ["Sharpness", 0.3437273670109087, 0.40493203127714417]], [["AutoContrast", 0.884353334807183, 0.5880138314357569], ["Rotate", 0.9846032404597116, 0.3591877296622974]], [["TranslateX", 0.6862295865975581, 0.5307482119690076], ["Contrast", 0.19439251187251982, 0.3999195825722808]], [["Posterize", 0.4187641835025246, 0.5008988942651585], ["Brightness", 0.6665805605402482, 0.3853288204214253]], [["Posterize", 0.4507470690013903, 0.4232437206624681], ["TranslateX", 0.6054107416317659, 0.38123828040922203]], [["AutoContrast", 0.29562338573283276, 0.35608605102687474], ["TranslateX", 0.909954785390274, 0.20098894888066549]], [["Contrast", 0.6015278411777212, 0.6049140992035096], ["Cutout", 0.47178713636517855, 0.5333747244651914]], [["TranslateX", 0.490851976691112, 0.3829593925141144], ["Sharpness", 0.2716675173824095, 0.5131696240367152]], [["Posterize", 0.4190558294646337, 0.39316689077269873], ["Rotate", 0.5018526072725914, 0.295712490156129]], [["AutoContrast", 0.29624715560691617, 0.10937329832409388], ["Posterize", 0.8770505275992637, 0.43117765012206943]], [["Rotate", 0.6649970092751698, 0.47767131373391974], ["ShearX", 0.6257923540490786, 0.6643337040198358]], [["Sharpness", 0.5553620705849509, 0.8467799429696928], ["Cutout", 0.9006185811918932, 0.3537270716262]], [["ShearY", 0.0007619678283789788, 0.9494591850536303], ["Invert", 0.24267733654007673, 0.7851608409575828]], [["Contrast", 0.9730916198112872, 0.404670123321921], ["Sharpness", 0.5923587793251186, 0.7405792404430281]], [["Cutout", 0.07393909593373034, 0.44569630026328344], ["TranslateX", 0.2460593252211425, 0.4817527814541055]], [["Brightness", 0.31058654119340867, 0.7043749950260936], ["ShearX", 0.7632161538947713, 0.8043681264908555]], [["AutoContrast", 0.4352334371415373, 0.6377550087204297], ["Rotate", 0.2892714673415678, 0.49521052050510556]], [["Equalize", 0.509071051375276, 0.7352913414974414], ["ShearX", 0.5099959429711828, 0.7071566714593619]], [["Posterize", 0.9540506532512889, 0.8498853304461906], ["ShearY", 0.28199061357155397, 0.3161715627214629]], [["Posterize", 0.6740855359097433, 0.684004694936616], ["Posterize", 0.6816720350737863, 0.9654766942980918]], [["Solarize", 0.7149344531717328, 0.42212789795181643], ["Brightness", 0.686601460864528, 0.4263050070610551]], [["Cutout", 0.49577164991501, 0.08394890892056037], ["Rotate", 0.5810369852730606, 0.3320732965776973]], [["TranslateY", 0.1793755480490623, 0.6006520265468684], ["Brightness", 0.3769016576438939, 0.7190746300828186]], [["TranslateX", 0.7226363597757153, 0.3847027238123509], ["Brightness", 0.7641713191794035, 0.36234003077512544]], [["TranslateY", 0.1211227055347106, 0.6693523474608023], ["Brightness", 0.13011180247738063, 0.5126647617294864]], [["Equalize", 0.1501070550869129, 0.0038548909451806557], ["Posterize", 0.8266535939653881, 0.5502199643499207]], [["Sharpness", 0.550624117428359, 0.2023044586648523], ["Brightness", 0.06291556314780017, 0.7832635398703937]], [["Color", 0.3701578205508141, 0.9051537973590863], ["Contrast", 0.5763972727739397, 0.4905511239739898]], [["Rotate", 0.7678527224046323, 0.6723066265307555], ["Solarize", 0.31458533097383207, 0.38329324335154524]], [["Brightness", 0.292050127929522, 0.7047582807953063], ["ShearX", 0.040541891910333805, 0.06639328601282746]], [["TranslateY", 0.4293891393238555, 0.6608516902234284], ["Sharpness", 0.7794685477624004, 0.5168044063408147]], [["Color", 0.3682450402286552, 0.17274523597220048], ["ShearY", 0.3936056470397763, 0.5702597289866161]], [["Equalize", 0.43436990310624657, 0.9207072627823626], ["Contrast", 0.7608688260846083, 0.4759023148841439]], [["Brightness", 0.7926088966143935, 0.8270093925674497], ["ShearY", 0.4924174064969461, 0.47424347505831244]], [["Contrast", 0.043917555279430476, 0.15861903591675125], ["ShearX", 0.30439480405505853, 0.1682659341098064]], [["TranslateY", 0.5598255583454538, 0.721352536005039], ["Posterize", 0.9700921973303752, 0.6882015184440126]], [["AutoContrast", 0.3620887415037668, 0.5958176322317132], ["TranslateX", 0.14213781552733287, 0.6230799786459947]], [["Color", 0.490366889723972, 0.9863152892045195], ["Color", 0.817792262022319, 0.6755656429452775]], [["Brightness", 0.7030707021937771, 0.254633187122679], ["Color", 0.13977318232688843, 0.16378180123959793]], [["AutoContrast", 0.2933247831326118, 0.6283663376211102], ["Sharpness", 0.85430478154147, 0.9753613184208796]], [["Rotate", 0.6674299955457268, 0.48571208708018976], ["Contrast", 0.47491370175907016, 0.6401079552479657]], [["Sharpness", 0.37589579644127863, 0.8475131989077025], ["TranslateY", 0.9985149867598191, 0.057815729375099975]], [["Equalize", 0.0017194373841596389, 0.7888361311461602], ["Contrast", 0.6779293670669408, 0.796851411454113]], [["TranslateY", 0.3296782119072306, 0.39765117357271834], ["Sharpness", 0.5890554357001884, 0.6318339473765834]], [["Posterize", 0.25423810893163856, 0.5400430289894207], ["Sharpness", 0.9273643918988342, 0.6480913470982622]], [["Cutout", 0.850219975768305, 0.4169812455601289], ["Solarize", 0.5418755745870089, 0.5679666650495466]], [["Brightness", 0.008881361977310959, 0.9282562314720516], ["TranslateY", 0.7736066471553994, 0.20041167606029642]], [["Brightness", 0.05382537581401925, 0.6405265501035952], ["Contrast", 0.30484329473639593, 0.5449338155734242]], [["Color", 0.613257119787967, 0.4541503912724138], ["Brightness", 0.9061572524724674, 0.4030159294447347]], [["Brightness", 0.02739111568942537, 0.006028056532326534], ["ShearX", 0.17276751958646486, 0.05967365780621859]], [["TranslateY", 0.4376298213047888, 0.7691816164456199], ["Sharpness", 0.8162292718857824, 0.6054926462265117]], [["Color", 0.37963069679121214, 0.5946919433483344], ["Posterize", 0.08485417284005387, 0.5663580913231766]], [["Equalize", 0.49785780226818316, 0.9999137109183761], ["Sharpness", 0.7685879484682496, 0.6260846154212211]], [["AutoContrast", 0.4190931409670763, 0.2374852525139795], ["Posterize", 0.8797422264608563, 0.3184738541692057]], [["Rotate", 0.7307269024632872, 0.41523609600701106], ["ShearX", 0.6166685870692289, 0.647133807748274]], [["Sharpness", 0.5633713231039904, 0.8276694754755876], ["Cutout", 0.8329340776895764, 0.42656043027424073]], [["ShearY", 0.14934828370884312, 0.8622510773680372], ["Invert", 0.25925989086863277, 0.8813283584888576]], [["Contrast", 0.9457071292265932, 0.43228655518614034], ["Sharpness", 0.8485316947644338, 0.7590298998732413]], [["AutoContrast", 0.8386103589399184, 0.5859583131318076], ["Solarize", 0.466758711343543, 0.9956215363818983]], [["Rotate", 0.9387133710926467, 0.19180564509396503], ["Rotate", 0.5558247609706255, 0.04321698692007105]], [["ShearX", 0.3608716600695567, 0.15206159451532864], ["TranslateX", 0.47295292905710146, 0.5290760596129888]], [["TranslateX", 0.8357685981547495, 0.5991305115727084], ["Posterize", 0.5362929404188211, 0.34398525441943373]], [["ShearY", 0.6751984031632811, 0.6066293622133011], ["Contrast", 0.4122723990263818, 0.4062467515095566]], [["Color", 0.7515349936021702, 0.5122124665429213], ["Contrast", 0.03190514292904123, 0.22903520154660545]], [["Contrast", 0.5448962625054385, 0.38655673938910545], ["AutoContrast", 0.4867400684894492, 0.3433111101096984]], [["Rotate", 0.0008372434310827959, 0.28599951781141714], ["Equalize", 0.37113686925530087, 0.5243929348114981]], [["Color", 0.720054993488857, 0.2010177651701808], ["TranslateX", 0.23036196506059398, 0.11152764304368781]], [["Cutout", 0.859134208332423, 0.6727345740185254], ["ShearY", 0.02159833505865088, 0.46390076266538544]], [["Sharpness", 0.3428232157391428, 0.4067874527486514], ["Brightness", 0.5409415136577347, 0.3698432231874003]], [["Solarize", 0.27303978936454776, 0.9832186173589548], ["ShearY", 0.08831127213044043, 0.4681870331149774]], [["TranslateY", 0.2909309268736869, 0.4059460811623174], ["Sharpness", 0.6425125139803729, 0.20275737203293587]], [["Contrast", 0.32167626214661627, 0.28636162794046977], ["Invert", 0.4712405253509603, 0.7934644799163176]], [["Color", 0.867993060896951, 0.96574321666213], ["Color", 0.02233897320328512, 0.44478933557303063]], [["AutoContrast", 0.1841254751814967, 0.2779992148017741], ["Color", 0.3586283093530607, 0.3696246850445087]], [["Posterize", 0.2052935984046965, 0.16796913860308244], ["ShearX", 0.4807226832843722, 0.11296747254563266]], [["Cutout", 0.2016411266364791, 0.2765295444084803], ["Brightness", 0.3054112810424313, 0.695924264931216]], [["Rotate", 0.8405872184910479, 0.5434142541450815], ["Cutout", 0.4493615138203356, 0.893453735250007]], [["Contrast", 0.8433310507685494, 0.4915423577963278], ["ShearX", 0.22567799557913246, 0.20129892537008834]], [["Contrast", 0.045954277103674224, 0.5043900167190442], ["Cutout", 0.5552992473054611, 0.14436447810888237]], [["AutoContrast", 0.7719296115130478, 0.4440417544621306], ["Sharpness", 0.13992809206158283, 0.7988278670709781]], [["Color", 0.7838574233513952, 0.5971351401625151], ["TranslateY", 0.13562290583925385, 0.2253039635819158]], [["Cutout", 0.24870301109385806, 0.6937886690381568], ["TranslateY", 0.4033400068952813, 0.06253378991880915]], [["TranslateX", 0.0036059390486775644, 0.5234723884081843], ["Solarize", 0.42724862530733526, 0.8697702564187633]], [["Equalize", 0.5446026737834311, 0.9367992979112202], ["ShearY", 0.5943478903735789, 0.42345889214100046]], [["ShearX", 0.18611885697957506, 0.7320849092947314], ["ShearX", 0.3796416430900566, 0.03817761920009881]], [["Posterize", 0.37636778506979124, 0.26807924785236537], ["Brightness", 0.4317372554383255, 0.5473346211870932]], [["Brightness", 0.8100436240916665, 0.3817612088285007], ["Brightness", 0.4193974619003253, 0.9685902764026623]], [["Contrast", 0.701776402197012, 0.6612786008858009], ["Color", 0.19882787177960912, 0.17275597188875483]], [["Color", 0.9538303302832989, 0.48362384535228686], ["ShearY", 0.2179980837345602, 0.37027290936457313]], [["TranslateY", 0.6068028691503798, 0.3919346523454841], ["Cutout", 0.8228303342563138, 0.18372280287814613]], [["Equalize", 0.016416758802906828, 0.642838949194916], ["Cutout", 0.5761717838655257, 0.7600661153497648]], [["Color", 0.9417761826818639, 0.9916074035986558], ["Equalize", 0.2524209308597042, 0.6373703468715077]], [["Brightness", 0.75512589439513, 0.6155072321007569], ["Contrast", 0.32413476940254515, 0.4194739830159837]], [["Sharpness", 0.3339450765586968, 0.9973297539194967], ["AutoContrast", 0.6523930242124429, 0.1053482471037186]], [["ShearX", 0.2961391955838801, 0.9870036064904368], ["ShearY", 0.18705025965909403, 0.4550895821154484]], [["TranslateY", 0.36956447983807883, 0.36371471767143543], ["Sharpness", 0.6860051967688487, 0.2850190720087796]], [["Cutout", 0.13017742151902967, 0.47316674150067195], ["Invert", 0.28923829959551883, 0.9295585654924601]], [["Contrast", 0.7302368472279086, 0.7178974949876642], ["TranslateY", 0.12589674152030433, 0.7485392909494947]], [["Color", 0.6474693117772619, 0.5518269515590674], ["Contrast", 0.24643004970708016, 0.3435581358079418]], [["Contrast", 0.5650327855750835, 0.4843031798040887], ["Brightness", 0.3526684005761239, 0.3005305004600969]], [["Rotate", 0.09822284968122225, 0.13172798244520356], ["Equalize", 0.38135066977857157, 0.5135129123554154]], [["Contrast", 0.5902590645585712, 0.2196062383730596], ["ShearY", 0.14188379126120954, 0.1582612142182743]], [["Cutout", 0.8529913814417812, 0.89734031211874], ["Color", 0.07293767043078672, 0.32577659205278897]], [["Equalize", 0.21401668971453247, 0.040015259500028266], ["ShearY", 0.5126400895338797, 0.4726484828276388]], [["Brightness", 0.8269430025954498, 0.9678362841865166], ["ShearY", 0.17142069814830432, 0.4726727848289514]], [["Brightness", 0.699707089334018, 0.2795501395789335], ["ShearX", 0.5308818178242845, 0.10581814221896294]], [["Equalize", 0.32519644258946145, 0.15763390340309183], ["TranslateX", 0.6149090364414208, 0.7454832565718259]], [["AutoContrast", 0.5404508567155423, 0.7472387762067986], ["Equalize", 0.05649876539221024, 0.5628180219887216]]] + return p diff --git a/autoPyTorch/components/preprocessing/image_preprocessing/augmentation_transforms.py b/autoPyTorch/components/preprocessing/image_preprocessing/augmentation_transforms.py new file mode 100644 index 000000000..fb716c905 --- /dev/null +++ b/autoPyTorch/components/preprocessing/image_preprocessing/augmentation_transforms.py @@ -0,0 +1,439 @@ +# Copyright 2018 The TensorFlow Authors All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Transforms used in the Augmentation Policies.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import random +import numpy as np +# pylint:disable=g-multiple-import +from PIL import ImageOps, ImageEnhance, ImageFilter, Image +# pylint:enable=g-multiple-import + + +IMAGE_SIZE = 32 +# What is the dataset mean and std of the images on the training set +MEANS = [0.49139968, 0.48215841, 0.44653091] +STDS = [0.24703223, 0.24348513, 0.26158784] +PARAMETER_MAX = 10 # What is the max 'level' a transform could be predicted + + +def random_flip(x): + """Flip the input x horizontally with 50% probability.""" + if np.random.rand(1)[0] > 0.5: + return np.fliplr(x) + return x + + +def zero_pad_and_crop(img, amount=4): + """Zero pad by `amount` zero pixels on each side then take a random crop. + Args: + img: numpy image that will be zero padded and cropped. + amount: amount of zeros to pad `img` with horizontally and verically. + Returns: + The cropped zero padded img. The returned numpy array will be of the same + shape as `img`. + """ + padded_img = np.zeros((img.shape[0] + amount * 2, img.shape[1] + amount * 2, + img.shape[2])) + padded_img[amount:img.shape[0] + amount, amount: + img.shape[1] + amount, :] = img + top = np.random.randint(low=0, high=2 * amount) + left = np.random.randint(low=0, high=2 * amount) + new_img = padded_img[top:top + img.shape[0], left:left + img.shape[1], :] + return new_img + + +def create_cutout_mask(img_height, img_width, num_channels, size): + """Creates a zero mask used for cutout of shape `img_height` x `img_width`. + Args: + img_height: Height of image cutout mask will be applied to. + img_width: Width of image cutout mask will be applied to. + num_channels: Number of channels in the image. + size: Size of the zeros mask. + Returns: + A mask of shape `img_height` x `img_width` with all ones except for a + square of zeros of shape `size` x `size`. This mask is meant to be + elementwise multiplied with the original image. Additionally returns + the `upper_coord` and `lower_coord` which specify where the cutout mask + will be applied. + """ + if size>1: + print("SIZE AND CHANNELS", size, num_channels) + print("IMAGE HEIGTH AND WIDTH", img_height, img_width) + assert img_height == img_width + + # Sample center where cutout mask will be applied + height_loc = np.random.randint(low=0, high=img_height) + width_loc = np.random.randint(low=0, high=img_width) + print("HEIGHT LOC AND WIDTH LOC HEIGTH AND WIDTH", height_loc, width_loc) + + # Determine upper right and lower left corners of patch + upper_coord = (max(0, height_loc - size // 2), max(0, width_loc - size // 2)) + lower_coord = (min(img_height, height_loc + size // 2), + min(img_width, width_loc + size // 2)) + print("UPPER AND LOWER COORD", upper_coord, lower_coord) + mask_height = lower_coord[0] - upper_coord[0] + mask_width = lower_coord[1] - upper_coord[1] + print("MASK HEIGTH AND WIDTH", mask_height, mask_width) + assert mask_height > 0 + assert mask_width > 0 + + mask = np.ones((img_height, img_width, num_channels)) + zeros = np.zeros((mask_height, mask_width, num_channels)) + mask[upper_coord[0]:lower_coord[0], upper_coord[1]:lower_coord[1], :] = ( + zeros) + + else: + height_loc = np.random.randint(low=0, high=img_height) + width_loc = np.random.randint(low=0, high=img_width) + upper_coord = (height_loc,width_loc) + lower_coord = upper_coord + mask = np.ones((img_height, img_width, num_channels)) + mask[height_loc, width_loc] = 0 + + return mask, upper_coord, lower_coord + + +def cutout_numpy(img, size=16): + """Apply cutout with mask of shape `size` x `size` to `img`. + The cutout operation is from the paper https://arxiv.org/abs/1708.04552. + This operation applies a `size`x`size` mask of zeros to a random location + within `img`. + Args: + img: Numpy image that cutout will be applied to. + size: Height/width of the cutout mask that will be + Returns: + A numpy tensor that is the result of applying the cutout mask to `img`. + """ + img_height, img_width, num_channels = (img.shape[0], img.shape[1], + img.shape[2]) + assert len(img.shape) == 3 + mask, _, _ = create_cutout_mask(img_height, img_width, num_channels, size) + return img * mask + + +def float_parameter(level, maxval): + """Helper function to scale `val` between 0 and maxval . + Args: + level: Level of the operation that will be between [0, `PARAMETER_MAX`]. + maxval: Maximum value that the operation can have. This will be scaled + to level/PARAMETER_MAX. + Returns: + A float that results from scaling `maxval` according to `level`. + """ + return float(level) * maxval / PARAMETER_MAX + + +def int_parameter(level, maxval): + """Helper function to scale `val` between 0 and maxval . + Args: + level: Level of the operation that will be between [0, `PARAMETER_MAX`]. + maxval: Maximum value that the operation can have. This will be scaled + to level/PARAMETER_MAX. + Returns: + An int that results from scaling `maxval` according to `level`. + """ + return int(level * maxval / PARAMETER_MAX) + + +def pil_wrap(img): + """Convert the `img` numpy tensor to a PIL Image.""" + return Image.fromarray( + np.uint8((img * STDS + MEANS) * 255.0)).convert('RGBA') + + +def pil_unwrap(pil_img): + """Converts the PIL img to a numpy array.""" + pic_array = (np.array(pil_img.getdata()).reshape((32, 32, 4)) / 255.0) + i1, i2 = np.where(pic_array[:, :, 3] == 0) + pic_array = (pic_array[:, :, :3] - MEANS) / STDS + pic_array[i1, i2] = [0, 0, 0] + return pic_array + + +def apply_policy(policy, img): + """Apply the `policy` to the numpy `img`. + Args: + policy: A list of tuples with the form (name, probability, level) where + `name` is the name of the augmentation operation to apply, `probability` + is the probability of applying the operation and `level` is what strength + the operation to apply. + img: Numpy image that will have `policy` applied to it. + Returns: + The result of applying `policy` to `img`. + """ + pil_img = img # pil_wrap(img) + + for xform in policy: + assert len(xform) == 3 + name, probability, level = xform + xform_fn = NAME_TO_TRANSFORM[name].pil_transformer(probability, level) + pil_img = xform_fn(pil_img) + return pil_img #pil_unwrap(pil_img) + + +class TransformFunction(object): + """Wraps the Transform function for pretty printing options.""" + + def __init__(self, func, name): + self.f = func + self.name = name + + def __repr__(self): + return '<' + self.name + '>' + + def __call__(self, pil_img): + return self.f(pil_img) + + +class TransformT(object): + """Each instance of this class represents a specific transform.""" + + def __init__(self, name, xform_fn): + self.name = name + self.xform = xform_fn + + def pil_transformer(self, probability, level): + + def return_function(im): + if random.random() < probability: + im = self.xform(im, level) + return im + + name = self.name + '({:.1f},{})'.format(probability, level) + return TransformFunction(return_function, name) + + def do_transform(self, image, level): + f = self.pil_transformer(PARAMETER_MAX, level) + return pil_unwrap(f(pil_wrap(image))) + + +################## Transform Functions ################## +identity = TransformT('identity', lambda pil_img, level: pil_img) +flip_lr = TransformT( + 'FlipLR', + lambda pil_img, level: pil_img.transpose(Image.FLIP_LEFT_RIGHT)) +flip_ud = TransformT( + 'FlipUD', + lambda pil_img, level: pil_img.transpose(Image.FLIP_TOP_BOTTOM)) +# pylint:disable=g-long-lambda +auto_contrast = TransformT( + 'AutoContrast', + lambda pil_img, level: ImageOps.autocontrast( + pil_img.convert('RGB')).convert('RGBA')) +equalize = TransformT( + 'Equalize', + lambda pil_img, level: ImageOps.equalize( + pil_img.convert('RGB')).convert('RGBA')) +invert = TransformT( + 'Invert', + lambda pil_img, level: ImageOps.invert( + pil_img.convert('RGB')).convert('RGBA')) +# pylint:enable=g-long-lambda +blur = TransformT( + 'Blur', lambda pil_img, level: pil_img.filter(ImageFilter.BLUR)) +smooth = TransformT( + 'Smooth', + lambda pil_img, level: pil_img.filter(ImageFilter.SMOOTH)) + + +def _rotate_impl(pil_img, level): + """Rotates `pil_img` from -30 to 30 degrees depending on `level`.""" + degrees = int_parameter(level, 30) + if random.random() > 0.5: + degrees = -degrees + return pil_img.rotate(degrees) + + +rotate = TransformT('Rotate', _rotate_impl) + + +def _posterize_impl(pil_img, level): + """Applies PIL Posterize to `pil_img`.""" + level = int_parameter(level, 4) + return ImageOps.posterize(pil_img.convert('RGB'), 4 - level).convert('RGBA') + + +posterize = TransformT('Posterize', _posterize_impl) + + +def _shear_x_impl(pil_img, level): + """Applies PIL ShearX to `pil_img`. + The ShearX operation shears the image along the horizontal axis with `level` + magnitude. + Args: + pil_img: Image in PIL object. + level: Strength of the operation specified as an Integer from + [0, `PARAMETER_MAX`]. + Returns: + A PIL Image that has had ShearX applied to it. + """ + level = float_parameter(level, 0.3) + if random.random() > 0.5: + level = -level + return pil_img.transform(pil_img.size, Image.AFFINE, (1, level, 0, 0, 1, 0)) + + +shear_x = TransformT('ShearX', _shear_x_impl) + + +def _shear_y_impl(pil_img, level): + """Applies PIL ShearY to `pil_img`. + The ShearY operation shears the image along the vertical axis with `level` + magnitude. + Args: + pil_img: Image in PIL object. + level: Strength of the operation specified as an Integer from + [0, `PARAMETER_MAX`]. + Returns: + A PIL Image that has had ShearX applied to it. + """ + level = float_parameter(level, 0.3) + if random.random() > 0.5: + level = -level + return pil_img.transform(pil_img.size, Image.AFFINE, (1, 0, 0, level, 1, 0)) + + +shear_y = TransformT('ShearY', _shear_y_impl) + + +def _translate_x_impl(pil_img, level): + """Applies PIL TranslateX to `pil_img`. + Translate the image in the horizontal direction by `level` + number of pixels. + Args: + pil_img: Image in PIL object. + level: Strength of the operation specified as an Integer from + [0, `PARAMETER_MAX`]. + Returns: + A PIL Image that has had TranslateX applied to it. + """ + level = int_parameter(level, 10) + if random.random() > 0.5: + level = -level + return pil_img.transform(pil_img.size, Image.AFFINE, (1, 0, level, 0, 1, 0)) + + +translate_x = TransformT('TranslateX', _translate_x_impl) + + +def _translate_y_impl(pil_img, level): + """Applies PIL TranslateY to `pil_img`. + Translate the image in the vertical direction by `level` + number of pixels. + Args: + pil_img: Image in PIL object. + level: Strength of the operation specified as an Integer from + [0, `PARAMETER_MAX`]. + Returns: + A PIL Image that has had TranslateY applied to it. + """ + level = int_parameter(level, 10) + if random.random() > 0.5: + level = -level + return pil_img.transform(pil_img.size, Image.AFFINE, (1, 0, 0, 0, 1, level)) + + +translate_y = TransformT('TranslateY', _translate_y_impl) + + +def _crop_impl(pil_img, level, interpolation=Image.BILINEAR): + """Applies a crop to `pil_img` with the size depending on the `level`.""" + cropped = pil_img.crop((level, level, IMAGE_SIZE - level, IMAGE_SIZE - level)) + resized = cropped.resize((IMAGE_SIZE, IMAGE_SIZE), interpolation) + return resized + + +crop_bilinear = TransformT('CropBilinear', _crop_impl) + + +def _solarize_impl(pil_img, level): + """Applies PIL Solarize to `pil_img`. + Translate the image in the vertical direction by `level` + number of pixels. + Args: + pil_img: Image in PIL object. + level: Strength of the operation specified as an Integer from + [0, `PARAMETER_MAX`]. + Returns: + A PIL Image that has had Solarize applied to it. + """ + level = int_parameter(level, 256) + return ImageOps.solarize(pil_img.convert('RGB'), 256 - level).convert('RGBA') + + +solarize = TransformT('Solarize', _solarize_impl) + + +def _cutout_pil_impl(pil_img, level): + """Apply cutout to pil_img at the specified level.""" + size = int_parameter(level, 20) + if size <= 0: + return pil_img + img_height, img_width, num_channels = (32, 32, 3) + _, upper_coord, lower_coord = ( + create_cutout_mask(img_height, img_width, num_channels, size)) + pixels = pil_img.load() # create the pixel map + for i in range(upper_coord[0], lower_coord[0]): # for every col: + for j in range(upper_coord[1], lower_coord[1]): # For every row + pixels[i, j] = (125, 122, 113, 0) # set the colour accordingly + return pil_img + +cutout = TransformT('Cutout', _cutout_pil_impl) + + +def _enhancer_impl(enhancer): + """Sets level to be between 0.1 and 1.8 for ImageEnhance transforms of PIL.""" + def impl(pil_img, level): + v = float_parameter(level, 1.8) + .1 # going to 0 just destroys it + return enhancer(pil_img).enhance(v) + return impl + + +color = TransformT('Color', _enhancer_impl(ImageEnhance.Color)) +contrast = TransformT('Contrast', _enhancer_impl(ImageEnhance.Contrast)) +brightness = TransformT('Brightness', _enhancer_impl( + ImageEnhance.Brightness)) +sharpness = TransformT('Sharpness', _enhancer_impl(ImageEnhance.Sharpness)) + +ALL_TRANSFORMS = [ + flip_lr, + flip_ud, + auto_contrast, + equalize, + invert, + rotate, + posterize, + crop_bilinear, + solarize, + color, + contrast, + brightness, + sharpness, + shear_x, + shear_y, + translate_x, + translate_y, + cutout, + blur, + smooth +] + +NAME_TO_TRANSFORM = {t.name: t for t in ALL_TRANSFORMS} +TRANSFORM_NAMES = NAME_TO_TRANSFORM.keys() diff --git a/autoPyTorch/components/preprocessing/image_preprocessing/operations.py b/autoPyTorch/components/preprocessing/image_preprocessing/operations.py new file mode 100644 index 000000000..0a1d72add --- /dev/null +++ b/autoPyTorch/components/preprocessing/image_preprocessing/operations.py @@ -0,0 +1,283 @@ +import numpy as np +import math +import random +import os + +from PIL import Image, ImageOps, ImageEnhance + +class Operation(object): + """ + Base class of all operations. + """ + def __init__(self, prob, magnitude): + self.prob = prob + self.magnitude = magnitude + + def __str__(self): + return self.__class__.__name__ + + def __call__(self, image): + raise NotImplementedError("Need to instantiate a subclass of this class!") + +class Equalize(Operation): + """ + Equalize the image histogram. + """ + def __init__(self, prob, magnitude): + super(Equalize, self).__init__(prob, None) + + def __call__(self, image): + if random.uniform(0, 1) > self.prob: + return image + else: + return ImageOps.equalize(image) + +class Invert(Operation): + """ + Invert the pixels of the image. + """ + def __init__(self, prob, magnitude): + super(Invert, self).__init__(prob, None) + + def __call__(self, image): + if random.uniform(0, 1) > self.prob: + return image + else: + return ImageOps.invert(image) + +class AutoContrast(Operation): + """ + Maximize the image contrast, by making the darkest pixel black and + the lightest pixel white. + """ + def __init__(self, prob, magnitude): + super(AutoContrast, self).__init__(prob, None) + + def __call__(self, image): + if random.uniform(0, 1) > self.prob: + return image + else: + return ImageOps.autocontrast(image) + +class Posterize(Operation): + """ + Reduce the number of bits for each pixel magnitude bits. + """ + def __init__(self, prob, magnitude): + super(Posterize, self).__init__(prob, magnitude) + + def __call__(self, image): + if random.uniform(0, 1) > self.prob: + return image + else: + magnitude_range = np.linspace(4, 8, 10) + bits = int(round(magnitude_range[self.magnitude])) + return ImageOps.posterize(image, bits) + +class Solarize(Operation): + """ + Invert all pixels above a threshold value of magnitude. + """ + def __init__(self, prob, magnitude): + super(Solarize, self).__init__(prob, magnitude) + + def __call__(self, image): + if random.uniform(0, 1) > self.prob: + return image + else: + magnitude_range = np.linspace(0, 256, 10) + threshold = magnitude_range[self.magnitude] + return ImageOps.solarize(image, threshold) + +class Contrast(Operation): + """ + Control the contrast of the image. + A magnitude=0 gives a gray image, + whereas magnitude=1 gives the original image. + """ + def __init__(self, prob, magnitude): + super(Contrast, self).__init__(prob, magnitude) + + def __call__(self, image): + if random.uniform(0, 1) > self.prob: + return image + else: + magnitude_range = np.linspace(0.1, 1.9, 10) + factor = magnitude_range[self.magnitude] + enhancer = ImageEnhance.Contrast(image) + return enhancer.enhance(factor) + +class Color(Operation): + """ + Adjust the color balance of the image, + in a manner similar to the controls on a colour TV set. + A magnitude=0 gives a black & white image, + whereas magnitude=1 gives the original image. + """ + def __init__(self, prob, magnitude): + super(Color, self).__init__(prob, magnitude) + + def __call__(self, image): + if random.uniform(0, 1) > self.prob: + return image + else: + magnitude_range = np.linspace(0.1, 1.9, 10) + factor = magnitude_range[self.magnitude] + enhancer = ImageEnhance.Color(image) + return enhancer.enhance(factor) + +class Brightness(Operation): + """ + Adjust the brightness of the image. + A magnitude=0 gives a black image, + whereas magnitude=1 gives the original image. + """ + def __init__(self, prob, magnitude): + super(Brightness, self).__init__(prob, magnitude) + + def __call__(self, image): + if random.uniform(0, 1) > self.prob: + return image + else: + magnitude_range = np.linspace(0.1, 1.9, 10) + factor = magnitude_range[self.magnitude] + enhancer = ImageEnhance.Brightness(image) + return enhancer.enhance(factor) + +class Sharpness(Operation): + """ + Adjust the sharpness of the image. + A magnitude=0 gives a blurred image, + whereas magnitude=1 gives the original image. + """ + def __init__(self, prob, magnitude): + super(Sharpness, self).__init__(prob, magnitude) + + def __call__(self, image): + if random.uniform(0, 1) > self.prob: + return image + else: + magnitude_range = np.linspace(0.1, 1.9, 10) + factor = magnitude_range[self.magnitude] + enhancer = ImageEnhance.Sharpness(image) + return enhancer.enhance(factor) + +class Rotate(Operation): + """ + Rotate the image magnitude degrees. + """ + def __init(self, prob, magnitude): + super(Rotate, self).__init__(prob, magnitude) + + def __call__(self, image): + if random.uniform(0, 1) > self.prob: + return image + else: + magnitude_range = np.linspace(-30, 30, 10) + degrees = magnitude_range[self.magnitude] + return image.rotate(degrees, expand=False, resample=Image.BICUBIC) + +class TranslateX(Operation): + """ + Translate the image in the horizontal axis + direction by magnitude number of pixels. + """ + def __init__(self, prob, magnitude): + super(TranslateX, self).__init__(prob, magnitude) + + def __call__(self, image): + if random.uniform(0, 1) > self.prob: + return image + else: + magnitude_range = np.linspace(-15, 15, 10) + pixels = magnitude_range[self.magnitude] + return image.transform(image.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0)) + +class TranslateY(Operation): + """ + Translate the image in the vertical axis + direction by magnitude number of pixels. + """ + def __init__(self, prob, magnitude): + super(TranslateY, self).__init__(prob, magnitude) + + def __call__(self, image): + if random.uniform(0, 1) > self.prob: + return image + else: + magnitude_range = np.linspace(-15, 15, 10) + pixels = magnitude_range[self.magnitude] + return image.transform(image.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels)) + + +class ShearX(Operation): + """ + Shear image along horizontal axis with rate magnitude. + """ + def __init__(self, prob, magnitude): + super(ShearX, self).__init__(prob, magnitude) + + def __call__(self, image): + if random.uniform(0, 1) > self.prob: + return image + else: + magnitude_range = np.linspace(-0.3, 0.3, 10) + rate = magnitude_range[self.magnitude] + + w, h = image.size + + phi = math.tan(abs(rate)) + shift_in_pixels = phi * h + matrix_offset = shift_in_pixels + if rate <= 0: + matrix_offset = 0 + phi = -1 * phi + + transform_matrix = (1, phi, -matrix_offset, 0, 1, 0) + + image = image.transform((int(round(w + shift_in_pixels)), h), + Image.AFFINE, + transform_matrix) + + if rate <= 0: + image = image.crop((0, 0, w, h)) + else: + image = image.crop((abs(shift_in_pixels), 0, w + abs(shift_in_pixels), h)) + + return image + +class ShearY(Operation): + """ + Shear image along vertical axis with rate magnitude. + """ + def __init__(self, prob, magnitude): + super(ShearY, self).__init__(prob, magnitude) + + def __call__(self, image): + if random.uniform(0, 1) > self.prob: + return image + else: + magnitude_range = np.linspace(-0.3, 0.3, 10) + rate = magnitude_range[self.magnitude] + + w, h = image.size + + phi = math.tan(abs(rate)) + shift_in_pixels = phi * h + matrix_offset = shift_in_pixels + if rate <= 0: + matrix_offset = 0 + phi = -1 * phi + + transform_matrix = (1, 0, 0, phi, 1, -matrix_offset) + + image = image.transform((w, int(round(h + shift_in_pixels))), + Image.AFFINE, + transform_matrix) + + if rate <= 0: + image = image.crop((0, 0, w, h)) + else: + image = image.crop((0, abs(shift_in_pixels), w, h + abs(shift_in_pixels))) + + return image diff --git a/autoPyTorch/components/preprocessing/image_preprocessing/transforms.py b/autoPyTorch/components/preprocessing/image_preprocessing/transforms.py new file mode 100644 index 000000000..11effac21 --- /dev/null +++ b/autoPyTorch/components/preprocessing/image_preprocessing/transforms.py @@ -0,0 +1,177 @@ +from __future__ import absolute_import + +from torchvision.transforms import * +from .augmentation_transforms import * + +import random +import math +import torch +import numpy as np + +from .operations import * + + +class RandomErasing(object): + """ + Class that performs Random Erasing in Random Erasing Data Augmentation by Zhong et al. + + Args: + probability: The probability that the operation will be performed. + sl: min erasing area + sh: max erasing area + r1: min aspect ratio + mean: erasing value + """ + + def __init__(self, probability=0.5, sl=0.02, sh=0.4, r1=0.3, mean=[0.4914, 0.4822, 0.4465]): + self.probability = probability + self.sl = sl + self.sh = sh + self.r1 = r1 + self.mean = mean + + def __call__(self, img): + if random.uniform(0, 1) > self.probability: + return img + + for attempt in range(100): + area = img.size()[1] * img.size()[2] + + target_area = random.uniform(self.sl, self.sh) * area + aspect_ratio = random.uniform(self.r1, 1/self.r1) + + h = int(round(math.sqrt(target_area * aspect_ratio))) + w = int(round(math.sqrt(target_area / aspect_ratio))) + + if w < img.size()[2] and h < img.size()[1]: + x1 = random.randint(0, img.size()[1] - h) + y1 = random.randint(0, img.size()[2] - w) + if img.size()[0] == 3: + img[0, x1:x1+h, y1:y1+w] = self.mean[0] + img[1, x1:x1+h, y1:y1+w] = self.mean[1] + img[2, x1:x1+h, y1:y1+w] = self.mean[2] + else: + img[0, x1:x1+h, y1:y1+w] = self.mean[1] + return img + + return img + + +class Cutout(object): + """ + Randomly mask out one or more patches from an image. + Args: + n_holes (int): Number of patches to cut out of each image. + length (int): The length (in pixels) of each square patch. + """ + def __init__(self, n_holes, length, probability): + self.n_holes = n_holes + self.length = length + self.probability = probability + + def __call__(self, img): + """ + Args: + img (Tensor): Tensor image of size (C, H, W). + Returns: + Tensor: Image with n_holes of dimension length x length cut out of it. + """ + if random.uniform(0, 1) > self.probability: + return img + + h = img.size(1) + w = img.size(2) + + mask = np.ones((h, w), np.float32) + + for n in range(self.n_holes): + y = np.random.randint(h) + x = np.random.randint(w) + + y1 = int(np.clip(y - self.length / 2, 0, h)) + y2 = int(np.clip(y + self.length / 2, 0, h)) + x1 = int(np.clip(x - self.length / 2, 0, w)) + x2 = int(np.clip(x + self.length / 2, 0, w)) + + mask[y1: y2, x1: x2] = 0. + + mask = torch.from_numpy(mask) + mask = mask.expand_as(img) + img = img * mask + + return img + + +class AutoAugment(object): + + def __init__(self): + pass + + def __call__(self, img): + """ + Args: + img (Tensor): Tensor image of size (C, H, W). + """ + + # + # ImageNet policies proposed in https://arxiv.org/abs/1805.09501 + # + policies = [ + [('Posterize', 0.4, 8), ('Rotate', 0.6,9)], + [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], + [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], + [('Posterize', 0.6, 7), ('Posterize', 0.6, 3)], + [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], + [('Equalize', 0.4, 4), ('Rotate', 0.8, 8)], + [('Solarize', 0.6, 3), ('Equalize', 0.6, 7)], + [('Posterize', 0.8, 5), ('Equalize', 1.0, 2)], + [('Rotate', 0.2, 3), ('Solarize', 0.6, 8)], + [('Equalize', 0.6, 8), ('Posterize', 0.4, 6)], + [('Rotate', 0.8, 8), ('Color', 0.4, 0)], + [('Rotate', 0.4, 9), ('Equalize', 0.6, 2)], + [('Equalize', 0.0, 7), ('Equalize', 0.8, 8)], + [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], + [('Color', 0.6, 4), ('Contrast', 1.0, 8)], + [('Rotate', 0.8, 8), ('Color', 1.0, 2)], + [('Color', 0.8, 8), ('Solarize', 0.8, 7)], + [('Sharpness', 0.4, 7), ('Invert', 0.6, 8)], + [('ShearX', 0.6, 5), ('Equalize', 1.0, 9)], + [('Color', 0.4, 0), ('Equalize', 0.6, 3)], + [('Equalize', 0.4, 7), ('Solarize', 0.2, 4)], + [('Solarize', 0.6, 5), ('AutoContrast', 0.6, 5)], + [('Invert', 0.6, 4), ('Equalize', 1.0, 8)], + [('Color', 0.6, 4), ('Contrast', 1.0, 8)], + [('Equalize', 0.8, 8), ('Equalize', 0.6, 3)], + ] + + policy = random.choice(policies) + + img = apply_policy(policy, img) + + return img.convert('RGB') + + +class FastAutoAugment(object): + + # + # ImageNet policies proposed in https://arxiv.org/abs/1905.00397 + # + + + def __init__(self): + + from .archive import fa_reduced_cifar10 + + self.policies = fa_reduced_cifar10() + + def __call__(self, img): + """ + Args: + img (Tensor): Tensor image of size (C, H, W). + """ + + policy = random.choice(self.policies) + + img = apply_policy(policy, img) + + return img.convert('RGB') diff --git a/autoPyTorch/components/preprocessing/loss_weight_strategies.py b/autoPyTorch/components/preprocessing/loss_weight_strategies.py index 39e9ec4a6..cddce005a 100644 --- a/autoPyTorch/components/preprocessing/loss_weight_strategies.py +++ b/autoPyTorch/components/preprocessing/loss_weight_strategies.py @@ -8,19 +8,27 @@ class LossWeightStrategyWeighted(): def __call__(self, pipeline_config, X, Y): - + counts = np.sum(Y, axis=0) total_weight = Y.shape[0] - weight_per_class = total_weight / Y.shape[1] - weights = (np.ones(Y.shape[1]) * weight_per_class) / np.maximum(counts, 1) + if len(Y.shape) > 1: + weight_per_class = total_weight / Y.shape[1] + weights = (np.ones(Y.shape[1]) * weight_per_class) / np.maximum(counts, 1) + else: + classes, counts = np.unique(Y, axis=0, return_counts=True) + classes, counts = classes[::-1], counts[::-1] + weight_per_class = total_weight / classes.shape[0] + weights = (np.ones(classes.shape[0]) * weight_per_class) / counts + return weights class LossWeightStrategyWeightedBinary(): def __call__(self, pipeline_config, X, Y): - + counts_one = np.sum(Y, axis=0) counts_zero = counts_one + (-Y.shape[0]) weights = counts_zero / np.maximum(counts_one, 1) return weights + diff --git a/autoPyTorch/components/training/image/__init__.py b/autoPyTorch/components/training/image/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/autoPyTorch/components/training/image/base_training.py b/autoPyTorch/components/training/image/base_training.py new file mode 100644 index 000000000..637a6396f --- /dev/null +++ b/autoPyTorch/components/training/image/base_training.py @@ -0,0 +1,172 @@ +import ConfigSpace +from torch.autograd import Variable + +class BaseTrainingTechnique(): + def __init__(self, training_components=None): + """Initialize the training technique. Should be called in a fit Method of a Pipeline node. + + Keyword Arguments: + training_components {dict} -- Maps a names to a training components necessary for this training technique (default: {None}) + """ + + self.training_components = training_components or dict() + + # VIRTUAL + def set_up(self, training_components, pipeline_config, logger): + """Set up the training component + + Arguments: + training_components {dict} -- All training components of training. + pipeline_config {dict} -- Configuration of the Pipeline. + logger {Logger} -- Logger. + """ + + self.logger = logger + + # VIRTUAL + def before_train_batches(self, training_components, log, epoch): + """Function that gets called before the train_batches method of each epoch in training. + + Arguments: + training_components {dict} -- All training components used in training. + log {dict} -- The log of the current epoch. + epoch {int} -- The current epoch of training. + """ + + pass + + # VIRTUAL + def after_train_batches(self, training_components, log, epoch): + """Function that gets called after the train_batches method of each epoch in training. + Is able to stop training by returning True. + + Arguments: + training_components {dict} -- All training components used in training. + log {dict} -- The log of the current epoch. + epoch {int} -- The current epoch of training. + + Returns: + bool -- If training should be stopped. + """ + + return False + + # VIRTUAL + def during_train_batches(self, batch_loss, training_components): + """Function that gets called in the train_batches method of training. + Is able to cancel the current epoch by returning True. + + Arguments: + batch_loss {tensor} -- The batch loss of the current batch. + training_components {dict} -- All training components used in training. + + Returns: + bool -- If the current epoch should be canceled. + """ + + return False + + # VIRTUAL + def select_log(self, logs, training_components): + """Select one log from the list of all epoch logs. + + Arguments: + logs {list} -- A list of log. For each epoch of training there is one entry. + training_components {dict} -- All training components used in training. + + Returns: + log -- The selected log. Return None if undecided. + """ + + return False + + # VIRTUAL + def needs_eval_on_valid_each_epoch(self): + """Specify if the training technique needs the network to be evaluated on the validation set. + + Returns: + bool -- If the network should be evaluated on the validation set. + """ + + return False + + # VIRTUAL + def needs_eval_on_train_each_epoch(self): + """Specify if the training technique needs the network to be evaluated on the training set. + + Returns: + bool -- If the network should be evaluated on the training set. + """ + + + return False + + # VIRTUAL + @staticmethod + def get_pipeline_config_options(): + """Return a list of ConfigOption used for this training technique. + + Returns: + list -- A list of ConfigOptions. + """ + + return [] + + +class BaseBatchLossComputationTechnique(): + + # VIRTUAL + def set_up(self, pipeline_config, hyperparameter_config, logger): + """Initialize the batch loss computation technique. + + Arguments: + pipeline_config {dict} -- The configuration of the pipeline. + hyperparameter_config {dict} -- The hyperparameter config sampled by BOHB. + logger {Logger} -- Logger. + """ + self.logger = logger + + # VIRTUAL + def prepare_data(self, X_batch, y_batch): + """Method that gets called, before batch is but into network. + + Arguments: + X_batch {tensor} -- The features of the batch. + y_batch {tensor} -- The targets of the batch. + """ + + return X_batch, {'y_batch' : y_batch} + + # VIRTUAL + def criterion(self, y_batch): + return lambda criterion, pred: criterion(pred, y_batch) + + # VIRTUAL + def evaluate(self, metric, y_pred, y_batch): + return metric(y_pred, y_batch) + + + # VIRTUAL + @staticmethod + def get_pipeline_config_options(): + """A list of ConfigOptions used for this technique. + + Returns: + list -- A list of ConfigOptions for this technique. + """ + + return [] + + # VIRTUAL + @staticmethod + def get_hyperparameter_search_space(**pipeline_config): + """Get the hyperparameter config space for this technique. + + Returns: + ConfigurationSpace -- The hyperparameter config space for this technique + """ + + return ConfigSpace.ConfigurationSpace() + + + \ No newline at end of file diff --git a/autoPyTorch/components/training/image/budget_types.py b/autoPyTorch/components/training/image/budget_types.py new file mode 100644 index 000000000..ee7becba6 --- /dev/null +++ b/autoPyTorch/components/training/image/budget_types.py @@ -0,0 +1,50 @@ +from autoPyTorch.components.training.image.base_training import BaseTrainingTechnique +import time + +class BudgetTypeTime(BaseTrainingTechnique): + default_min_budget = 120 + default_max_budget = 6000 + compensate = 10 # will be modified by cv + + # OVERRIDE + def set_up(self, training_components, pipeline_config, logger): + super(BudgetTypeTime, self).set_up(training_components, pipeline_config, logger) + self.end_time = training_components["budget"] - self.compensate + training_components["fit_start_time"] + self.start_time = time.time() + + if self.start_time >= self.end_time: + raise Exception("Budget exhausted before training started") + + # OVERRIDE + def during_train_batches(self, batch_loss, training_components): + return time.time() >= self.end_time + + # OVERRIDE + def after_train_batches(self, training_components, log, epoch): + elapsed = time.time() - self.start_time + training_components["network"].budget_trained = elapsed + self.logger.debug("Budget used: " + str(elapsed) + "/" + str(self.end_time - self.start_time)) + + if time.time() >= self.end_time: + self.logger.debug("Budget exhausted!") + return True + return False + +class BudgetTypeEpochs(BaseTrainingTechnique): + default_min_budget = 5 + default_max_budget = 150 + + # OVERRIDE + def set_up(self, training_components, pipeline_config, logger): + super(BudgetTypeEpochs, self).set_up(training_components, pipeline_config, logger) + self.target = training_components["budget"] + + # OVERRIDE + def after_train_batches(self, training_components, log, epoch): + training_components["network"].budget_trained = epoch + self.logger.debug("Budget used: " + str(epoch) + "/" + str(self.target)) + + if epoch >= self.target: + self.logger.debug("Budget exhausted!") + return True + return False diff --git a/autoPyTorch/components/training/image/checkpoints/__init__.py b/autoPyTorch/components/training/image/checkpoints/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/autoPyTorch/components/training/image/checkpoints/load_specific.py b/autoPyTorch/components/training/image/checkpoints/load_specific.py new file mode 100644 index 000000000..493d71119 --- /dev/null +++ b/autoPyTorch/components/training/image/checkpoints/load_specific.py @@ -0,0 +1,58 @@ + +import os +import math +import torch +import torch.nn as nn + + +import logging + + +def load_model(model, checkpoint): + + if checkpoint is None: + return model + + pretrained_state = checkpoint['state'] + model_state = model.state_dict() + + pretrained_state = { k:v for k,v in pretrained_state.items() if k in model_state and v.size() == model_state[k].size() } + logging.getLogger('autonet').debug('=> Resuming model using ' + str(len(pretrained_state.keys())) + '/' + str(len(model_state.keys())) + ' parameters') + model_state.update(pretrained_state) + model.load_state_dict(model_state) + + return model + +# def load_optimizer(optimizer, checkpoint, device): + +# if checkpoint is None: +# return optimizer + +# opti_state = optimizer.state_dict() +# pretrained_state = checkpoint['optimizer'] + +# logging.getLogger('autonet').debug(str(len(pretrained_state['state']))) +# logging.getLogger('autonet').debug(str(len(opti_state['param_groups'][0]['params']))) +# logging.getLogger('autonet').debug(str(len(pretrained_state['param_groups'][0]['params']))) +# logging.getLogger('autonet').debug(str(set(pretrained_state['param_groups'][0]['params']).intersection(set(opti_state['param_groups'][0]['params'])))) + + +# pretrained_state = {k: pretrained_state[k] for state in opti_state.items() for k, v in enumerate(state) if state in pretrained_state and k in pretrained_state[state] and v.size() == opti_state[state][k].size()} +# logging.getLogger('autonet').debug('=> Resuming optimizer using ' + str(len(pretrained_state.keys())) + '/' + str(len(opti_state.keys()))) +# opti_state.update(pretrained_state) +# optimizer.load_state_dict(opti_state) + +# for state in optimizer.state.values(): +# for k, v in state.items(): +# if isinstance(v, torch.Tensor): +# state[k] = v.to(device) +# return optimizer + +# def load_scheduler(scheduler, checkpoint): + +# if checkpoint is None: +# return scheduler + +# loaded_scheduler = checkpoint['scheduler'] +# loaded_scheduler.optimizer = scheduler.optimizer +# return loaded_scheduler \ No newline at end of file diff --git a/autoPyTorch/components/training/image/checkpoints/save_load.py b/autoPyTorch/components/training/image/checkpoints/save_load.py new file mode 100644 index 000000000..014662f1c --- /dev/null +++ b/autoPyTorch/components/training/image/checkpoints/save_load.py @@ -0,0 +1,39 @@ +import torch +import os + +import logging + + +def get_checkpoint_name(config_id, budget): + return 'checkpoint_' + str(config_id) + '_Budget_' + str(int(budget)) + '.pt' + +def get_checkpoint_dir(working_directory): + return os.path.join(working_directory, 'checkpoints') + +def save_checkpoint(path, config_id, budget, model, optimizer, scheduler): + + name = get_checkpoint_name(config_id, budget) + os.makedirs(path, exist_ok=True) + + path = os.path.join(path, name) + + torch.save({ + 'state': model.state_dict(), + }, open(path, 'wb')) + + logging.getLogger('autonet').debug('=> Model {} saved to {}'.format(str(type(model)), path)) + return path + + +def load_checkpoint(path, config_id, budget): + name = get_checkpoint_name(config_id, budget) + + path = os.path.join(path, name) + if not os.path.exists(path): + return None + + logging.getLogger('autonet').debug('=> Loading checkpoint ' + path) + checkpoint = torch.load(path) + return checkpoint + + diff --git a/autoPyTorch/components/training/image/early_stopping.py b/autoPyTorch/components/training/image/early_stopping.py new file mode 100644 index 000000000..51f5f7678 --- /dev/null +++ b/autoPyTorch/components/training/image/early_stopping.py @@ -0,0 +1,84 @@ +from autoPyTorch.components.training.image.base_training import BaseTrainingTechnique +from autoPyTorch.utils.config.config_option import ConfigOption, to_bool + +class EarlyStopping(BaseTrainingTechnique): + """ Stop training when there is no improvement on the validation set for a specified number of epochs. + Is able to take a snapshot of the parameters, where the performance of the validation set is best. + There is no further split of the data. Therefore the validation performance reported to BOHB will become an optimistic estimator. + """ + + # OVERRIDE + def set_up(self, training_components, pipeline_config, logger): + super(EarlyStopping, self).set_up(training_components, pipeline_config, logger) + self.reset_parameters = pipeline_config["early_stopping_reset_parameters"] + self.minimize = pipeline_config["minimize"] + self.patience = pipeline_config["early_stopping_patience"] + + # does not work with e.g. cosine anealing with warm restarts + if "lr_scheduler" in training_components and not training_components["lr_scheduler"].allows_early_stopping: + self.patience = float("inf") + + # initialize current best performance to +/- infinity + if training_components["network"].current_best_epoch_performance is None: + training_components["network"].current_best_epoch_performance = float("inf") + if not self.minimize: + training_components["network"].current_best_epoch_performance = -float("inf") + + self.logger.debug("Using Early stopping with patience: " + str(self.patience)) + self.logger.debug("Reset Parameters to parameters with best validation performance: " + str(self.reset_parameters)) + + # OVERRIDE + def after_train_batches(self, training_components, log, epoch): + if "val_" + training_components["train_metric_name"] not in log: + if self.patience < float("inf"): + self.logger.debug("No Early stopping because no validation set performance available") + return False + if self.reset_parameters and ("lr_scheduler" not in training_components or not training_components["lr_scheduler"].snapshot_before_restart): + log["best_parameters"] = False + current_performance = log["val_" + training_components["train_metric_name"]] + + # new best performance + if ((self.minimize and current_performance < training_components["network"].current_best_epoch_performance) or + (not self.minimize and current_performance > training_components["network"].current_best_epoch_performance)): + training_components["network"].num_epochs_no_progress = 0 + training_components["network"].current_best_epoch_performance = current_performance + self.logger.debug("New best performance!") + + if self.reset_parameters and ("lr_scheduler" not in training_components or not training_components["lr_scheduler"].snapshot_before_restart): + self.logger.debug("Early stopping takes snapshot of current parameters") + log["best_parameters"] = True + training_components["network"].snapshot() + + # do early stopping + elif training_components["network"].num_epochs_no_progress > self.patience: + self.logger.debug("Early stopping patience exhausted. Stopping Early!") + training_components["network"].stopped_early = True + return True + + # no improvement + else: + self.logger.debug("No improvement") + training_components["network"].num_epochs_no_progress += 1 + return False + + # OVERRIDE + def select_log(self, logs, training_components): + # select the log where a snapshot has been taken + if self.reset_parameters and ("lr_scheduler" not in training_components or not training_components["lr_scheduler"].snapshot_before_restart): + self.logger.debug("Using logs of parameters with best validation performance") + logs = [log for log in logs if log["best_parameters"]] or logs + logs = logs[-1] + return logs + return False + + def needs_eval_on_valid_each_epoch(self): + return self.reset_parameters or self.patience < float("inf") + + # OVERRIDE + @staticmethod + def get_pipeline_config_options(): + options = [ + ConfigOption("early_stopping_patience", default=float("inf"), type=float), + ConfigOption("early_stopping_reset_parameters", default=False, type=to_bool) + ] + return options diff --git a/autoPyTorch/components/training/image/lr_scheduling.py b/autoPyTorch/components/training/image/lr_scheduling.py new file mode 100644 index 000000000..e207a2665 --- /dev/null +++ b/autoPyTorch/components/training/image/lr_scheduling.py @@ -0,0 +1,39 @@ +from autoPyTorch.components.training.image.base_training import BaseTrainingTechnique + +class LrScheduling(BaseTrainingTechnique): + """Schedule the learning rate with given learning rate scheduler. + The learning rate scheduler is usually set in a LrSchedulerSelector pipeline node. + """ + + # OVERRIDE + def after_train_batches(self, training_components, log, epoch): + + # do one step of lr scheduling + if callable(getattr(training_components["lr_scheduler"], "get_lr", None)): + log['lr'] = training_components["lr_scheduler"].get_lr()[0] + try: + training_components["lr_scheduler"].step(epoch=(epoch + 1), metrics=log['loss']) + except: + training_components["lr_scheduler"].step(epoch=(epoch + 1)) + self.logger.debug("Perform learning rate scheduling") + + # check if lr scheduler has converged, if possible + if not training_components["lr_scheduler"].snapshot_before_restart: + return False + training_components["lr_scheduler"].get_lr() + log["lr_scheduler_converged"] = False + if training_components["lr_scheduler"].restarted_at == (epoch + 1): + self.logger.debug("Learning rate scheduler converged. Taking Snapshot of models parameters.") + training_components["network"].snapshot() + log["lr_scheduler_converged"] = True + return False + + def select_log(self, logs, training_components): + + # select the log where the lr scheduler has converged, if possible. + if training_components["lr_scheduler"].snapshot_before_restart: + self.logger.debug("Using logs where lr scheduler converged") + logs = [log for log in logs if log["lr_scheduler_converged"]] or logs + logs = logs[-1] + return logs + return False diff --git a/autoPyTorch/components/training/image/mixup.py b/autoPyTorch/components/training/image/mixup.py new file mode 100644 index 000000000..7fb6d3309 --- /dev/null +++ b/autoPyTorch/components/training/image/mixup.py @@ -0,0 +1,32 @@ +from autoPyTorch.components.training.image.base_training import BaseBatchLossComputationTechnique +import numpy as np +from torch.autograd import Variable +import ConfigSpace +import torch + +class Mixup(BaseBatchLossComputationTechnique): + def set_up(self, pipeline_config, hyperparameter_config, logger): + super(Mixup, self).set_up(pipeline_config, hyperparameter_config, logger) + self.alpha = hyperparameter_config["alpha"] + + def prepare_data(self, x, y): + + lam = np.random.beta(self.alpha, self.alpha) if self.alpha > 0. else 1. + batch_size = x.size()[0] + index = torch.randperm(batch_size).cuda() if x.is_cuda else torch.randperm(batch_size) + + mixed_x = lam * x + (1 - lam) * x[index, :] + y_a, y_b = y, y[index] + return mixed_x, { 'y_a': y_a, 'y_b': y_b, 'lam' : lam } + + def criterion(self, y_a, y_b, lam): + return lambda criterion, pred: lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b) + + def evaluate(self, metric, y_pred, y_a, y_b, lam): + return lam * metric(y_pred, y_a) + (1 - lam) * metric(y_pred, y_b) + + @staticmethod + def get_hyperparameter_search_space(**pipeline_config): + cs = ConfigSpace.ConfigurationSpace() + cs.add_hyperparameter(ConfigSpace.hyperparameters.UniformFloatHyperparameter("alpha", lower=0, upper=1, default_value=1)) + return cs diff --git a/autoPyTorch/components/training/image/trainer.py b/autoPyTorch/components/training/image/trainer.py new file mode 100644 index 000000000..c8653752d --- /dev/null +++ b/autoPyTorch/components/training/image/trainer.py @@ -0,0 +1,251 @@ +import time +import os +import torch +import torch.nn as nn + +import random +from torch.autograd import Variable +from .checkpoints.save_load import save_checkpoint + +# from util.transforms import mixup_data, mixup_criterion +# from checkpoints import save_checkpoint + +class Trainer(object): + def __init__(self, loss_computation, model, criterion, budget, optimizer, scheduler, budget_type, device, images_to_plot=0, checkpoint_path=None, config_id=None): + self.checkpoint_path = checkpoint_path + self.config_id = config_id + + self.scheduler = scheduler + # if self.scheduler and not hasattr(self.scheduler, 'cumulative_time'): + # self.scheduler.cumulative_time = 0 + self.optimizer = optimizer + self.device = device + + self.budget = budget + self.loss_computation = loss_computation + + self.images_plot_count = images_to_plot + + self.budget_type = budget_type + self.cumulative_time = 0 + + self.train_loss_sum = 0 + self.train_iterations = 0 + + self.latest_checkpoint = None + + try: + if torch.cuda.device_count() > 1: + model = nn.DataParallel(model) + self.model = model.to(self.device) + except: + print("CUDA unavailable, continue using CPU.") + self.model = model.to("cpu") + + try: + self.criterion = criterion.to(self.device) + except: + print("No criterion specified.") + self.criterion = None + + def train(self, epoch, train_loader, metrics): + ''' + Trains the model for a single epoch + ''' + + # train_size = int(0.9 * len(train_loader.dataset.train_data) / self.config.batch_size) + loss_sum = 0.0 + N = 0 + + # print('\33[1m==> Training epoch # {}\033[0m'.format(str(epoch))) + + + classified = [] + misclassified = [] + + self.model.train() + + budget_exceeded = False + metric_results = [0] * len(metrics) + start_time = time.time() + for step, (data, targets) in enumerate(train_loader): + + # import matplotlib.pyplot as plt + # img = plt.imshow(data.numpy()[0,1,:]) + # plt.show() + + # images += list(data.numpy()) + # print('Data:', data.size(), ' - Label:', targets.size()) + + data = data.to(self.device) + targets = targets.to(self.device) + + data, criterion_kwargs = self.loss_computation.prepare_data(data, targets) + batch_size = data.size(0) + + outputs = self.model(data) + loss_func = self.loss_computation.criterion(**criterion_kwargs) + loss = loss_func(self.criterion, outputs) + + self.optimizer.zero_grad() + loss.backward() + self.optimizer.step() + + # print('Train:', ' '.join(str(outputs).split('\n')[0:2])) + + if self.images_plot_count > 0: + with torch.no_grad(): + _, pred = outputs.topk(1, 1, True, True) + pred = pred.t() + correct = pred.eq(targets.view(1, -1).expand_as(pred)).cpu().numpy()[0] + data = data.cpu().numpy() + classified += list(data[correct.astype(bool)]) + misclassified += list(data[(1-correct).astype(bool)]) + if len(classified) > self.images_plot_count: + classified = random.sample(classified, self.images_plot_count) + if len(misclassified) > self.images_plot_count: + misclassified = random.sample(misclassified, self.images_plot_count) + + # self.scheduler.cumulative_time += delta_time + # self.scheduler.last_step = self.scheduler.cumulative_time - delta_time - 1e-10 + + tmp = time.time() + + with torch.no_grad(): + for i, metric in enumerate(metrics): + metric_results[i] += self.loss_computation.evaluate(metric, outputs, **criterion_kwargs) * batch_size + + loss_sum += loss.item() * batch_size + N += batch_size + + #print('Update', (metric_results[0] / N), 'loss', (loss_sum / N), 'lr', self.optimizer.param_groups[0]['lr']) + + if self.budget_type == 'time' and self.cumulative_time + (time.time() - start_time) >= self.budget: + # print(' * Stopping at Epoch: [%d][%d/%d] for a budget of %.3f s' % (epoch, step + 1, train_size, self.config.budget)) + budget_exceeded = True + break + + + if self.images_plot_count > 0: + import tensorboard_logger as tl + tl.log_images('Train_Classified/Image', classified, step=epoch) + tl.log_images('Train_Misclassified/Image', misclassified, step=epoch) + + if self.checkpoint_path and self.scheduler.snapshot_before_restart and self.scheduler.needs_checkpoint(): + self.latest_checkpoint = save_checkpoint(self.checkpoint_path, self.config_id, self.budget, self.model, self.optimizer, self.scheduler) + + try: + self.scheduler.step(epoch=epoch) + except: + self.scheduler.step(metrics=loss_sum / N, epoch=epoch) + + self.cumulative_time += (time.time() - start_time) + #print('LR', self.optimizer.param_groups[0]['lr'], 'Update', (metric_results[0] / N), 'loss', (loss_sum / N)) + + return [res / N for res in metric_results], loss_sum / N, budget_exceeded + + + def evaluate(self, test_loader, metrics, epoch=0): + + N = 0 + metric_results = [0] * len(metrics) + + classified = [] + misclassified = [] + + self.model.eval() + + with torch.no_grad(): + for step, (data, targets) in enumerate(test_loader): + + # import matplotlib.pyplot as plt + # img = plt.imshow(data.numpy()[0,1,:]) + # plt.show() + + try: + data = data.to(self.device) + targets = targets.to(self.device) + except: + data = data.to("cpu") + targets = targets.to("cpu") + + batch_size = data.size(0) + + outputs = self.model(data) + + if self.images_plot_count > 0: + _, pred = outputs.topk(1, 1, True, True) + pred = pred.t() + correct = pred.eq(targets.view(1, -1).expand_as(pred)).cpu().numpy()[0] + data = data.cpu().numpy() + classified += list(data[correct.astype(bool)]) + misclassified += list(data[(1-correct).astype(bool)]) + if len(classified) > self.images_plot_count: + classified = random.sample(classified, self.images_plot_count) + if len(misclassified) > self.images_plot_count: + misclassified = random.sample(misclassified, self.images_plot_count) + + # print('Valid:', ' '.join(str(outputs).split('\n')[0:2])) + # print('Shape:', outputs.shape, 'Sums', str(outputs.cpu().numpy().sum(1)).replace('\n', '')) + + for i, metric in enumerate(metrics): + metric_results[i] += metric(outputs.data, targets.data) * batch_size + + N += batch_size + + if self.images_plot_count > 0: + import tensorboard_logger as tl + tl.log_images('Valid_Classified/Image', classified, step=epoch) + tl.log_images('Valid_Misclassified/Image', misclassified, step=epoch) + + self.model.train() + + return [res / N for res in metric_results] + + + def class_to_probability_mapping(self, test_loader): + + N = 0 + + import numpy as np + import torch.nn as nn + + probs = None; + class_to_index = dict() + target_count = [] + + self.model.eval() + + with torch.no_grad(): + for i, (data, targets) in enumerate(test_loader): + + data = data.to(self.device) + targets = targets.to(self.device) + + batch_size = data.size(0) + + outputs = self.model(data) + + for i, output in enumerate(outputs): + target = targets[i].cpu().item() + np_output = output.cpu().numpy() + if target not in class_to_index: + if probs is None: + probs = np.array([np_output]) + else: + probs = np.vstack((probs, np_output)) + class_to_index[target] = probs.shape[0] - 1 + target_count.append(0) + else: + probs[class_to_index[target]] = probs[class_to_index[target]] + np_output + + target_count[class_to_index[target]] += 1 + + N += batch_size + + probs = probs / np.array(target_count)[:, None] #np.max(probs, axis=1)[:, None] + probs = torch.from_numpy(probs) + # probs = nn.Softmax(1)(probs) + + self.model.train() + return probs, class_to_index diff --git a/autoPyTorch/components/training/trainer.py b/autoPyTorch/components/training/trainer.py index a1b6af4dc..1f8cbf413 100644 --- a/autoPyTorch/components/training/trainer.py +++ b/autoPyTorch/components/training/trainer.py @@ -163,4 +163,3 @@ def compute_metrics(self, outputs_data, targets_data): outputs_data = np.vstack(outputs_data) targets_data = np.vstack(targets_data) return [metric(outputs_data, targets_data) for metric in self.metrics] - \ No newline at end of file diff --git a/autoPyTorch/core/api.py b/autoPyTorch/core/api.py index ab64a3ae2..92c8d1da0 100644 --- a/autoPyTorch/core/api.py +++ b/autoPyTorch/core/api.py @@ -17,6 +17,8 @@ from autoPyTorch.pipeline.nodes.metric_selector import MetricSelector from autoPyTorch.pipeline.nodes.optimization_algorithm import OptimizationAlgorithm from autoPyTorch.pipeline.nodes.create_dataset_info import CreateDatasetInfo +from autoPyTorch.pipeline.nodes.network_selector import NetworkSelector +from autoPyTorch.pipeline.nodes.image.network_selector_datasetinfo import NetworkSelectorDatasetInfo from autoPyTorch.utils.config.config_file_parser import ConfigFileParser @@ -27,7 +29,7 @@ class AutoNet(): def __init__(self, config_preset="medium_cs", pipeline=None, **autonet_config): """Superclass for all AutoNet variations, that specifies the API of AutoNet. - + Keyword Arguments: pipeline {Pipeline} -- Define your own Autonet Pipeline (default: {None}) **autonet_config -- Configure AutoNet for your needs. You can also configure AutoNet in fit(). Call print_help() for more info. @@ -60,7 +62,6 @@ def print_help(self): print() config_file_parser.print_help(self.base_config) - def get_current_autonet_config(self): """Return the current AutoNet configuration @@ -140,10 +141,13 @@ def fit(self, X_train, Y_train, X_valid=None, Y_valid=None, refit=True, **autone self.fit_result = self.pipeline.fit_pipeline(pipeline_config=self.autonet_config, X_train=X_train, Y_train=Y_train, X_valid=X_valid, Y_valid=Y_valid) - self.dataset_info = self.pipeline[CreateDatasetInfo.get_name()].fit_output["dataset_info"] + try: + self.dataset_info = self.pipeline[CreateDatasetInfo.get_name()].fit_output["dataset_info"] + except: + self.dataset_info = None self.pipeline.clean() - if not self.fit_result["optimized_hyperparameter_config"]: + if "optimized_hyperparameter_config" not in self.fit_result.keys() or not self.fit_result["optimized_hyperparameter_config"]: # MODIFY raise RuntimeError("No models fit during training, please retry with a larger max_runtime.") if (refit): @@ -187,7 +191,11 @@ def refit(self, X_train, Y_train, X_valid=None, Y_valid=None, hyperparameter_con refit_data = {'hyperparameter_config': hyperparameter_config, 'budget': budget, 'rescore': rescore} - + + autonet_config = copy.deepcopy(autonet_config) + autonet_config['cv_splits'] = 1 + autonet_config['increase_number_of_trained_datasets'] = False #if training multiple datasets else ignored + return self.pipeline.fit_pipeline(pipeline_config=autonet_config, refit=refit_data, X_train=X_train, Y_train=Y_train, X_valid=X_valid, Y_valid=Y_valid) @@ -206,13 +214,19 @@ def predict(self, X, return_probabilities=False): # run predict pipeline X, = self.check_data_array_types(X) - autonet_config = self.autonet_config or self.base_config + autonet_config = self.get_current_autonet_config() + Y_pred = self.pipeline.predict_pipeline(pipeline_config=autonet_config, X=X)['Y'] - # reverse one hot encoding - OHE = self.pipeline[OneHotEncoding.get_name()] - result = OHE.reverse_transform_y(Y_pred, OHE.fit_output['y_one_hot_encoder']) - return result if not return_probabilities else (result, Y_pred) + # reverse one hot encoding + if OneHotEncoding.get_name() in self.pipeline: + OHE = self.pipeline[OneHotEncoding.get_name()] + result = OHE.reverse_transform_y(Y_pred, OHE.fit_output['y_one_hot_encoder']) + return result if not return_probabilities else (result, Y_pred) + else: + result = dict() + result['Y'] = Y_pred + return result if not return_probabilities else (result, Y_pred) def score(self, X_test, Y_test, return_loss_value=False): """Calculate the sore on test data using the specified optimize_metric @@ -225,20 +239,90 @@ def score(self, X_test, Y_test, return_loss_value=False): score -- The score for the test data. """ - # run predict pipeline + # Update config if needed X_test, Y_test = self.check_data_array_types(X_test, Y_test) - autonet_config = self.autonet_config or self.base_config - self.pipeline.predict_pipeline(pipeline_config=autonet_config, X=X_test) - Y_pred = self.pipeline[OptimizationAlgorithm.get_name()].predict_output['Y'] + autonet_config = self.get_current_autonet_config() + + res = self.pipeline.predict_pipeline(pipeline_config=autonet_config, X=X_test) + if 'score' in res: + # in case of default dataset like CIFAR10 - the pipeline will compute the score of the according pytorch test set + return res['score'] + Y_pred = res['Y'] + # run predict pipeline + #self.pipeline.predict_pipeline(pipeline_config=autonet_config, X=X_test) + #Y_pred = self.pipeline[OptimizationAlgorithm.get_name()].predict_output['Y'] + # one hot encode Y - OHE = self.pipeline[OneHotEncoding.get_name()] - Y_test = OHE.transform_y(Y_test, OHE.fit_output['y_one_hot_encoder']) + try: + OHE = self.pipeline[OneHotEncoding.get_name()] + Y_test = OHE.transform_y(Y_test, OHE.fit_output['y_one_hot_encoder']) + except: + print("No one-hot encodig possible. Continuing without.") + pass metric = self.pipeline[MetricSelector.get_name()].fit_output['optimize_metric'] + if return_loss_value: return metric.get_loss_value(Y_pred, Y_test) - return metric(Y_pred, Y_test) + return metric(torch.from_numpy(Y_test.astype(np.float32)), torch.from_numpy(Y_pred.astype(np.float32))) + + def get_pytorch_model(self): + """Returns a pytorch sequential model of the current incumbent configuration + + Arguments: + + Returns: + model -- PyTorch sequential model of the current incumbent configuration + """ + if NetworkSelector.get_name() in self.pipeline: + return self.pipeline[NetworkSelector.get_name()].fit_output["network"].layers + else: + return self.pipeline[NetworkSelectorDatasetInfo.get_name()].fit_output["network"].layers + + def initialize_from_checkpoint(self, hyperparameter_config, checkpoint, in_features, out_features, final_activation=None): + """Returns a pytorch sequential model from a state dict and a hyperparamter config. + + Arguments: + config_file: json with output as from .fit method + in_features: array-like object, channels first + out_features: int, number of classes + final_activation: + + Returns: + PyTorch Sequential model + + """ + # load state dict + state_dict = torch.load(checkpoint, map_location=torch.device('cpu'))["state"] + + # read config file + if type(hyperparameter_config)==dict: + config = hyperparameter_config + else: + with open(hyperparameter_config, 'r') as file: + config = json.load(file)[1] + + # get model + network_type = config['NetworkSelectorDatasetInfo:network'] + network_type = self.pipeline[NetworkSelectorDatasetInfo.get_name()].networks[network_type] + model = network_type(config=config, + in_features=in_features, + out_features=out_features, + final_activation=final_activation) + + # Apply state dict + pretrained_state = state_dict + model_state = model.state_dict() + + pretrained_state = { k:v for k,v in pretrained_state.items() if k in model_state and v.size() == model_state[k].size() } + model_state.update(pretrained_state) + model.load_state_dict(model_state) + + # Add to pipeline + self.pipeline[NetworkSelectorDatasetInfo.get_name()].fit_output["network"] = model + + return network.layers def check_data_array_types(self, *arrays): result = [] diff --git a/autoPyTorch/core/autonet_classes/__init__.py b/autoPyTorch/core/autonet_classes/__init__.py index 1979c846d..96e55e5e5 100644 --- a/autoPyTorch/core/autonet_classes/__init__.py +++ b/autoPyTorch/core/autonet_classes/__init__.py @@ -1,3 +1,5 @@ from autoPyTorch.core.autonet_classes.autonet_feature_classification import AutoNetClassification from autoPyTorch.core.autonet_classes.autonet_feature_regression import AutoNetRegression -from autoPyTorch.core.autonet_classes.autonet_feature_multilabel import AutoNetMultilabel \ No newline at end of file +from autoPyTorch.core.autonet_classes.autonet_feature_multilabel import AutoNetMultilabel +from autoPyTorch.core.autonet_classes.autonet_image_classification import AutoNetImageClassification +from autoPyTorch.core.autonet_classes.autonet_image_classification_multiple_datasets import AutoNetImageClassificationMultipleDatasets diff --git a/autoPyTorch/core/autonet_classes/autonet_feature_data.py b/autoPyTorch/core/autonet_classes/autonet_feature_data.py index 8665e2cb3..c844d4807 100644 --- a/autoPyTorch/core/autonet_classes/autonet_feature_data.py +++ b/autoPyTorch/core/autonet_classes/autonet_feature_data.py @@ -108,9 +108,9 @@ def _apply_default_pipeline_settings(pipeline): from autoPyTorch.components.networks.feature import MlpNet, ResNet, ShapedMlpNet, ShapedResNet from autoPyTorch.components.networks.initialization import SimpleInitializer, SparseInitialization - from autoPyTorch.components.optimizer.optimizer import AdamOptimizer, SgdOptimizer + from autoPyTorch.components.optimizer.optimizer import AdamOptimizer, AdamWOptimizer, SgdOptimizer, RMSpropOptimizer from autoPyTorch.components.lr_scheduler.lr_schedulers import SchedulerCosineAnnealingWithRestartsLR, SchedulerNone, \ - SchedulerCyclicLR, SchedulerExponentialLR, SchedulerReduceLROnPlateau, SchedulerReduceLROnPlateau, SchedulerStepLR + SchedulerCyclicLR, SchedulerExponentialLR, SchedulerReduceLROnPlateau, SchedulerReduceLROnPlateau, SchedulerStepLR, SchedulerAdaptiveLR, SchedulerAlternatingCosineLR from autoPyTorch.components.networks.feature import LearnedEntityEmbedding from sklearn.preprocessing import MinMaxScaler, StandardScaler, MaxAbsScaler @@ -150,15 +150,19 @@ def _apply_default_pipeline_settings(pipeline): opt_selector = pipeline[OptimizerSelector.get_name()] opt_selector.add_optimizer('adam', AdamOptimizer) + opt_selector.add_optimizer('adamw', AdamWOptimizer) opt_selector.add_optimizer('sgd', SgdOptimizer) + opt_selector.add_optimizer('rmsprop', RMSpropOptimizer) lr_selector = pipeline[LearningrateSchedulerSelector.get_name()] - lr_selector.add_lr_scheduler('cosine_annealing', SchedulerCosineAnnealingWithRestartsLR) - lr_selector.add_lr_scheduler('cyclic', SchedulerCyclicLR) - lr_selector.add_lr_scheduler('exponential', SchedulerExponentialLR) - lr_selector.add_lr_scheduler('step', SchedulerStepLR) - lr_selector.add_lr_scheduler('plateau', SchedulerReduceLROnPlateau) - lr_selector.add_lr_scheduler('none', SchedulerNone) + lr_selector.add_lr_scheduler('cosine_annealing', SchedulerCosineAnnealingWithRestartsLR) + lr_selector.add_lr_scheduler('cyclic', SchedulerCyclicLR) + lr_selector.add_lr_scheduler('exponential', SchedulerExponentialLR) + lr_selector.add_lr_scheduler('step', SchedulerStepLR) + lr_selector.add_lr_scheduler('adapt', SchedulerAdaptiveLR) + lr_selector.add_lr_scheduler('plateau', SchedulerReduceLROnPlateau) + lr_selector.add_lr_scheduler('alternating_cosine', SchedulerAlternatingCosineLR) + lr_selector.add_lr_scheduler('none', SchedulerNone) train_node = pipeline[TrainNode.get_name()] train_node.add_training_technique("early_stopping", EarlyStopping) diff --git a/autoPyTorch/core/autonet_classes/autonet_image_classification.py b/autoPyTorch/core/autonet_classes/autonet_image_classification.py new file mode 100644 index 000000000..b9be0ec1a --- /dev/null +++ b/autoPyTorch/core/autonet_classes/autonet_image_classification.py @@ -0,0 +1,34 @@ +from autoPyTorch.core.autonet_classes.autonet_image_data import AutoNetImageData + + +class AutoNetImageClassification(AutoNetImageData): + preset_folder_name = "image_classification" + + @staticmethod + def _apply_default_pipeline_settings(pipeline): + import torch.nn as nn + from autoPyTorch.pipeline.nodes.metric_selector import MetricSelector + from autoPyTorch.pipeline.nodes.image.simple_train_node import SimpleTrainNode + from autoPyTorch.pipeline.nodes.image.cross_validation_indices import CrossValidationIndices + from autoPyTorch.pipeline.nodes.image.loss_module_selector_indices import LossModuleSelectorIndices + from autoPyTorch.pipeline.nodes.image.network_selector_datasetinfo import NetworkSelectorDatasetInfo + from autoPyTorch.components.metrics.standard_metrics import accuracy + from autoPyTorch.components.preprocessing.loss_weight_strategies import LossWeightStrategyWeighted + + AutoNetImageData._apply_default_pipeline_settings(pipeline) + + net_selector = pipeline[NetworkSelectorDatasetInfo.get_name()] + net_selector.add_final_activation('softmax', nn.Softmax(1)) + + loss_selector = pipeline[LossModuleSelectorIndices.get_name()] + loss_selector.add_loss_module('cross_entropy', nn.CrossEntropyLoss, None, True) + loss_selector.add_loss_module('cross_entropy_weighted', nn.CrossEntropyLoss, LossWeightStrategyWeighted(), True) + + metric_selector = pipeline[MetricSelector.get_name()] + metric_selector.add_metric('accuracy', accuracy) + + train_node = pipeline[SimpleTrainNode.get_name()] + train_node.default_minimize_value = False + + cv = pipeline[CrossValidationIndices.get_name()] + cv.use_stratified_cv_split_default = True diff --git a/autoPyTorch/core/autonet_classes/autonet_image_classification_multiple_datasets.py b/autoPyTorch/core/autonet_classes/autonet_image_classification_multiple_datasets.py new file mode 100644 index 000000000..95f6a9859 --- /dev/null +++ b/autoPyTorch/core/autonet_classes/autonet_image_classification_multiple_datasets.py @@ -0,0 +1,56 @@ +from autoPyTorch.core.autonet_classes.autonet_image_classification import AutoNetImageClassification + + +class AutoNetImageClassificationMultipleDatasets(AutoNetImageClassification): + preset_folder_name = "image_classification_multiple_datasets" + + @classmethod + def get_default_pipeline(cls): + from autoPyTorch.pipeline.base.pipeline import Pipeline + from autoPyTorch.pipeline.nodes.image.optimization_algorithm_no_timelimit import OptimizationAlgorithmNoTimeLimit + from autoPyTorch.pipeline.nodes.optimizer_selector import OptimizerSelector + from autoPyTorch.pipeline.nodes.log_functions_selector import LogFunctionsSelector + from autoPyTorch.pipeline.nodes.metric_selector import MetricSelector + + from autoPyTorch.pipeline.nodes.image.simple_scheduler_selector import SimpleLearningrateSchedulerSelector + from autoPyTorch.pipeline.nodes.image.cross_validation_indices import CrossValidationIndices + from autoPyTorch.pipeline.nodes.image.autonet_settings_no_shuffle import AutoNetSettingsNoShuffle + from autoPyTorch.pipeline.nodes.image.network_selector_datasetinfo import NetworkSelectorDatasetInfo + from autoPyTorch.pipeline.nodes.image.loss_module_selector_indices import LossModuleSelectorIndices + from autoPyTorch.pipeline.nodes.image.image_augmentation import ImageAugmentation + from autoPyTorch.pipeline.nodes.image.create_image_dataloader import CreateImageDataLoader + from autoPyTorch.pipeline.nodes.image.create_dataset_info import CreateDatasetInfo + from autoPyTorch.pipeline.nodes.image.simple_train_node import SimpleTrainNode + from autoPyTorch.pipeline.nodes.image.multiple_datasets import MultipleDatasets + from autoPyTorch.pipeline.nodes.image.image_dataset_reader import ImageDatasetReader + + # build the pipeline + pipeline = Pipeline([ + AutoNetSettingsNoShuffle(), + OptimizationAlgorithmNoTimeLimit([ + + MultipleDatasets([ + + ImageDatasetReader(), + CreateDatasetInfo(), + CrossValidationIndices([ + + NetworkSelectorDatasetInfo(), + OptimizerSelector(), + SimpleLearningrateSchedulerSelector(), + + LogFunctionsSelector(), + MetricSelector(), + + LossModuleSelectorIndices(), + + ImageAugmentation(), + CreateImageDataLoader(), + SimpleTrainNode() + ]) + ]) + ]) + ]) + + cls._apply_default_pipeline_settings(pipeline) + return pipeline diff --git a/autoPyTorch/core/autonet_classes/autonet_image_data.py b/autoPyTorch/core/autonet_classes/autonet_image_data.py new file mode 100644 index 000000000..1437182ae --- /dev/null +++ b/autoPyTorch/core/autonet_classes/autonet_image_data.py @@ -0,0 +1,133 @@ +import numpy as np +import torch +from autoPyTorch.core.api import AutoNet + + +__author__ = "Max Dippel, Michael Burkart and Matthias Urban" +__version__ = "0.0.1" +__license__ = "BSD" + + +class AutoNetImageData(AutoNet): + + @classmethod + def get_default_pipeline(cls): + from autoPyTorch.pipeline.base.pipeline import Pipeline + from autoPyTorch.pipeline.nodes.image.optimization_algorithm_no_timelimit import OptimizationAlgorithmNoTimeLimit + from autoPyTorch.pipeline.nodes.one_hot_encoding import OneHotEncoding + from autoPyTorch.pipeline.nodes.optimizer_selector import OptimizerSelector + from autoPyTorch.pipeline.nodes.log_functions_selector import LogFunctionsSelector + from autoPyTorch.pipeline.nodes.metric_selector import MetricSelector + + from autoPyTorch.pipeline.nodes.image.simple_scheduler_selector import SimpleLearningrateSchedulerSelector + from autoPyTorch.pipeline.nodes.image.cross_validation_indices import CrossValidationIndices + from autoPyTorch.pipeline.nodes.image.autonet_settings_no_shuffle import AutoNetSettingsNoShuffle + from autoPyTorch.pipeline.nodes.image.network_selector_datasetinfo import NetworkSelectorDatasetInfo + from autoPyTorch.pipeline.nodes.image.loss_module_selector_indices import LossModuleSelectorIndices + from autoPyTorch.pipeline.nodes.image.image_augmentation import ImageAugmentation + from autoPyTorch.pipeline.nodes.image.create_image_dataloader import CreateImageDataLoader + from autoPyTorch.pipeline.nodes.image.create_dataset_info import CreateDatasetInfo + from autoPyTorch.pipeline.nodes.image.simple_train_node import SimpleTrainNode + from autoPyTorch.pipeline.nodes.image.image_dataset_reader import ImageDatasetReader + from autoPyTorch.pipeline.nodes.image.single_dataset import SingleDataset + + # build the pipeline + pipeline = Pipeline([ + AutoNetSettingsNoShuffle(), + OptimizationAlgorithmNoTimeLimit([ + + SingleDataset([ + + ImageDatasetReader(), + CreateDatasetInfo(), + CrossValidationIndices([ + + NetworkSelectorDatasetInfo(), + OptimizerSelector(), + SimpleLearningrateSchedulerSelector(), + + LogFunctionsSelector(), + MetricSelector(), + + LossModuleSelectorIndices(), + + ImageAugmentation(), + CreateImageDataLoader(), + SimpleTrainNode() + ]) + ]) + ]) + ]) + + + cls._apply_default_pipeline_settings(pipeline) + return pipeline + + @staticmethod + def _apply_default_pipeline_settings(pipeline): + from autoPyTorch.pipeline.nodes.optimizer_selector import OptimizerSelector + from autoPyTorch.pipeline.nodes.image.simple_scheduler_selector import SimpleLearningrateSchedulerSelector + + from autoPyTorch.pipeline.nodes.image.network_selector_datasetinfo import NetworkSelectorDatasetInfo + from autoPyTorch.pipeline.nodes.image.simple_train_node import SimpleTrainNode + from autoPyTorch.pipeline.nodes.image.create_image_dataloader import CreateImageDataLoader + from autoPyTorch.pipeline.nodes.image.image_augmentation import ImageAugmentation + + from autoPyTorch.components.networks.image import DenseNet, ResNet, MobileNet + from autoPyTorch.components.networks.image.densenet_flexible import DenseNetFlexible + from autoPyTorch.components.networks.image.resnet152 import ResNet152 + from autoPyTorch.components.networks.image.darts.model import DARTSImageNet + + from autoPyTorch.components.optimizer.optimizer import AdamOptimizer, AdamWOptimizer, SgdOptimizer, RMSpropOptimizer + from autoPyTorch.components.lr_scheduler.lr_schedulers import SchedulerCosineAnnealingWithRestartsLR, SchedulerNone, \ + SchedulerCyclicLR, SchedulerExponentialLR, SchedulerReduceLROnPlateau, SchedulerReduceLROnPlateau, SchedulerStepLR, SchedulerAlternatingCosineLR, SchedulerAdaptiveLR, SchedulerExponentialLR + + from autoPyTorch.components.training.image.early_stopping import EarlyStopping + from autoPyTorch.components.training.image.mixup import Mixup + + net_selector = pipeline[NetworkSelectorDatasetInfo.get_name()] + net_selector.add_network('densenet', DenseNet) + net_selector.add_network('densenet_flexible', DenseNetFlexible) + net_selector.add_network('resnet', ResNet) + net_selector.add_network('resnet152', ResNet152) + net_selector.add_network('darts', DARTSImageNet) + net_selector.add_network('mobilenet', MobileNet) + net_selector._apply_search_space_update('resnet:nr_main_blocks', [2, 4], log=False) + net_selector._apply_search_space_update('resnet:widen_factor_1', [0.5, 8], log=True) + + opt_selector = pipeline[OptimizerSelector.get_name()] + opt_selector.add_optimizer('adam', AdamOptimizer) + opt_selector.add_optimizer('adamw', AdamWOptimizer) + opt_selector.add_optimizer('sgd', SgdOptimizer) + opt_selector.add_optimizer('rmsprop', RMSpropOptimizer) + + lr_selector = pipeline[SimpleLearningrateSchedulerSelector.get_name()] + lr_selector.add_lr_scheduler('cosine_annealing', SchedulerCosineAnnealingWithRestartsLR) + lr_selector.add_lr_scheduler('cyclic', SchedulerCyclicLR) + lr_selector.add_lr_scheduler('step', SchedulerStepLR) + lr_selector.add_lr_scheduler('adapt', SchedulerAdaptiveLR) + lr_selector.add_lr_scheduler('plateau', SchedulerReduceLROnPlateau) + lr_selector.add_lr_scheduler('alternating_cosine',SchedulerAlternatingCosineLR) + lr_selector.add_lr_scheduler('exponential', SchedulerExponentialLR) + lr_selector.add_lr_scheduler('none', SchedulerNone) + + lr_selector._apply_search_space_update('step:step_size', [1, 100], log=True) + lr_selector._apply_search_space_update('step:gamma', [0.001, 0.99], log=True) + lr_selector._apply_search_space_update('cosine_annealing:T_max', [1, 100], log=True) + lr_selector._apply_search_space_update('cosine_annealing:T_mult', [1., 2.], log=False) + + train_node = pipeline[SimpleTrainNode.get_name()] + #train_node.add_training_technique("early_stopping", EarlyStopping) + train_node.add_batch_loss_computation_technique("mixup", Mixup) + + data_node = pipeline[CreateImageDataLoader.get_name()] + + data_node._apply_search_space_update('batch_size', [32, 160], log=True) + + augment_node = pipeline[ImageAugmentation.get_name()] + augment_node._apply_search_space_update('augment', [False, True]) + augment_node._apply_search_space_update('autoaugment', [False, True]) + augment_node._apply_search_space_update('fastautoaugment', [False, True]) + augment_node._apply_search_space_update('length', [2,6]) + augment_node._apply_search_space_update('cutout', [False, True]) + augment_node._apply_search_space_update('cutout_holes', [1, 50]) diff --git a/autoPyTorch/core/hpbandster_extensions/bohb_multi_kde_ext.py b/autoPyTorch/core/hpbandster_extensions/bohb_multi_kde_ext.py new file mode 100644 index 000000000..bc26e1629 --- /dev/null +++ b/autoPyTorch/core/hpbandster_extensions/bohb_multi_kde_ext.py @@ -0,0 +1,20 @@ +try: + from hpbandster.optimizers.bohb_multi_kde import BOHB_Multi_KDE +except: + print("Could not find BOHB_Multi_KDE, replacing with object") + BOHB_Multi_KDE = object +from autoPyTorch.core.hpbandster_extensions.run_with_time import run_with_time + +class BOHBMultiKDEExt(BOHB_Multi_KDE): + def run_until(self, runtime=1, n_iterations=float("inf"), min_n_workers=1, iteration_kwargs = {},): + """ + Parameters: + ----------- + runtime: int + time for this run in seconds + n_iterations: + the number of hyperband iterations to run + min_n_workers: int + minimum number of workers before starting the run + """ + return run_with_time(self, runtime, n_iterations, min_n_workers, iteration_kwargs) diff --git a/autoPyTorch/core/hpbandster_extensions/run_with_time.py b/autoPyTorch/core/hpbandster_extensions/run_with_time.py index 30bc48fc8..32bd4518a 100644 --- a/autoPyTorch/core/hpbandster_extensions/run_with_time.py +++ b/autoPyTorch/core/hpbandster_extensions/run_with_time.py @@ -18,7 +18,7 @@ def run_with_time(self, runtime=1, n_iterations=float("inf"), min_n_workers=1, i """ self.wait_for_workers(min_n_workers) - + iteration_kwargs.update({'result_logger': self.result_logger}) if self.time_ref is None: @@ -30,15 +30,15 @@ def run_with_time(self, runtime=1, n_iterations=float("inf"), min_n_workers=1, i self.thread_cond.acquire() start_time = time.time() - kill = False + while True: - if (not kill and runtime < time.time() - start_time): - # wait for running jobs and finish - kill = True - self.logger.info('HBMASTER: Timelimit reached: wait for remaining %i jobs'%self.num_running_jobs) - self._queue_wait() + + # Check if timelimit is reached + if (runtime < time.time() - start_time): + self.logger.info('HBMASTER: Timelimit reached: wait for remaining %i jobs'%self.num_running_jobs) + break next_run = None # find a new run to schedule @@ -47,16 +47,10 @@ def run_with_time(self, runtime=1, n_iterations=float("inf"), min_n_workers=1, i if not next_run is None: break if next_run is not None: - if kill: - # register new run as finished - this will be interpreted as a crashed job - config_id, config, budget = next_run - job = Job(config_id, config=config, budget=budget, working_directory=self.working_directory) - self.iterations[job.id[0] - self.iterations[0].HPB_iter].register_result(job) - else: - self.logger.debug('HBMASTER: schedule new run for iteration %i'%i) - self._submit_job(*next_run) + self.logger.debug('HBMASTER: schedule new run for iteration %i'%i) + self._submit_job(*next_run) continue - elif not kill and n_iterations > 0: + elif n_iterations > 0: next_HPB_iter = len(self.iterations) + (self.iterations[0].HPB_iter if len(self.iterations) > 0 else 0) self.iterations.append(self.get_next_iteration(next_HPB_iter, iteration_kwargs)) n_iterations -= 1 @@ -69,6 +63,27 @@ def run_with_time(self, runtime=1, n_iterations=float("inf"), min_n_workers=1, i else: break + # clean up / cancel remaining iteration runs + next_run = True + n_canceled = 0 + while next_run is not None: + next_run = None + for i in self.active_iterations(): + next_run = self.iterations[i].get_next_run() + if not next_run is None: + config_id, config, budget = next_run + job = Job(config_id, config=config, budget=budget, working_directory=self.working_directory) + self.iterations[job.id[0]].register_result(job) # register dummy job - will be interpreted as canceled job + n_canceled += 1 + break + + self.logger.info('HBMASTER: Canceled %i remaining runs'%n_canceled) + + # wait for remaining jobs + while self.num_running_jobs > 0: + self.thread_cond.wait(60) + self.logger.info('HBMASTER: Job finished: wait for remaining %i jobs'%self.num_running_jobs) + self.thread_cond.release() for i in self.warmstart_iteration: diff --git a/autoPyTorch/core/presets/image_classification/__init__.py b/autoPyTorch/core/presets/image_classification/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/autoPyTorch/core/presets/image_classification/full_cs.txt b/autoPyTorch/core/presets/image_classification/full_cs.txt new file mode 100644 index 000000000..e69de29bb diff --git a/autoPyTorch/core/presets/image_classification/medium_cs.txt b/autoPyTorch/core/presets/image_classification/medium_cs.txt new file mode 100644 index 000000000..f5d2fda04 --- /dev/null +++ b/autoPyTorch/core/presets/image_classification/medium_cs.txt @@ -0,0 +1,4 @@ +lr_scheduler=[cosine_annealing, step] +networks=[resnet, mobilenet] +batch_loss_computation_techniques=[mixup] +optimizer=[adamw, sgd] diff --git a/autoPyTorch/core/presets/image_classification/tiny_cs.txt b/autoPyTorch/core/presets/image_classification/tiny_cs.txt new file mode 100644 index 000000000..ebefdde80 --- /dev/null +++ b/autoPyTorch/core/presets/image_classification/tiny_cs.txt @@ -0,0 +1,4 @@ +lr_scheduler=[cosine_annealing] +networks=[resnet] +batch_loss_computation_techniques=[standard] +optimizer=[adamw] diff --git a/autoPyTorch/core/presets/image_classification_multiple_datasets/__init__.py b/autoPyTorch/core/presets/image_classification_multiple_datasets/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/autoPyTorch/core/presets/image_classification_multiple_datasets/full_cs.txt b/autoPyTorch/core/presets/image_classification_multiple_datasets/full_cs.txt new file mode 100644 index 000000000..e69de29bb diff --git a/autoPyTorch/core/presets/image_classification_multiple_datasets/medium_cs.txt b/autoPyTorch/core/presets/image_classification_multiple_datasets/medium_cs.txt new file mode 100644 index 000000000..f5d2fda04 --- /dev/null +++ b/autoPyTorch/core/presets/image_classification_multiple_datasets/medium_cs.txt @@ -0,0 +1,4 @@ +lr_scheduler=[cosine_annealing, step] +networks=[resnet, mobilenet] +batch_loss_computation_techniques=[mixup] +optimizer=[adamw, sgd] diff --git a/autoPyTorch/core/presets/image_classification_multiple_datasets/tiny_cs.txt b/autoPyTorch/core/presets/image_classification_multiple_datasets/tiny_cs.txt new file mode 100644 index 000000000..ebefdde80 --- /dev/null +++ b/autoPyTorch/core/presets/image_classification_multiple_datasets/tiny_cs.txt @@ -0,0 +1,4 @@ +lr_scheduler=[cosine_annealing] +networks=[resnet] +batch_loss_computation_techniques=[standard] +optimizer=[adamw] diff --git a/autoPyTorch/core/worker_no_timelimit.py b/autoPyTorch/core/worker_no_timelimit.py new file mode 100644 index 000000000..d99e3459e --- /dev/null +++ b/autoPyTorch/core/worker_no_timelimit.py @@ -0,0 +1,130 @@ +import logging +import torch +import time +import random +from hpbandster.core.worker import Worker + +from autoPyTorch.components.training.image.budget_types import BudgetTypeTime + +__author__ = "Max Dippel, Michael Burkart and Matthias Urban" +__version__ = "0.0.1" +__license__ = "BSD" + + +class ModuleWorkerNoTimeLimit(Worker): + def __init__(self, pipeline, pipeline_config, constant_hyperparameter, + X_train, Y_train, X_valid, Y_valid, budget_type, max_budget, working_directory, permutations=None, *args, **kwargs): + self.X_train = X_train #torch.from_numpy(X_train).float() + self.Y_train = Y_train #torch.from_numpy(Y_train).long() + self.X_valid = X_valid + self.Y_valid = Y_valid + + if permutations is None: + self.permutations = [idx for idx in range(len(X_train))] + else: + self.permutations = permutations + + self.max_budget = max_budget + self.budget_type = budget_type + + self.pipeline = pipeline + self.pipeline_config = pipeline_config + self.constant_hyperparameter = constant_hyperparameter + + self.working_directory = working_directory + + self.autonet_logger = logging.getLogger('autonet') + # self.end_time = None + + # We can only use user defined limits (memory) if we have the required module 'resource' - not available on windows! + self.guarantee_limits = module_exists("resource") and module_exists("pynisher") + if (not self.guarantee_limits): + self.autonet_logger.info("Can not guarantee memory and time limit because module 'resource' is not available") + + super().__init__(*args, **kwargs) + + def compute(self, config, budget, working_directory, config_id, **kwargs): + + start_time = time.time() + + self.autonet_logger.debug("Starting optimization!") + + config.update(self.constant_hyperparameter) + + self.autonet_logger.debug("Budget " + str(budget) + " config: " + str(config)) + + if self.guarantee_limits and self.budget_type == 'time': + import pynisher + + limit_train = pynisher.enforce_limits(wall_time_in_s=int(budget * 4))(self.optimize_pipeline) + result, randomstate = limit_train(config, budget, config_id, random.getstate()) + + if (limit_train.exit_status == pynisher.MemorylimitException): + raise Exception("Memory limit reached. Took " + str((time.time()-start_time)) + " seconds with budget " + str(budget)) + elif (limit_train.exit_status != 0): + self.autonet_logger.info('Exception occurred using config:\n' + str(config)) + raise Exception("Exception in train pipeline. Took " + str((time.time()-start_time)) + " seconds with budget " + str(budget)) + + else: + result, randomstate = self.optimize_pipeline(config, budget, config_id, random.getstate()) + + random.setstate(randomstate) + + loss = result['loss'] + if 'losses' in result.keys(): + losses = result['losses'] + else: + losses = loss + info = result['info'] + + self.autonet_logger.debug("Result: " + str(loss) + " info: " + str(info)) + + # that is not really elegant but we can want to achieve some kind of feedback + network_name = [v for k, v in config.items() if k.endswith('network')] or "None" + + self.autonet_logger.info("Training " + str(network_name) + " with budget " + str(budget) + " resulted in score: " + str(loss) + " took " + str((time.time()-start_time)) + " seconds") + + if 'use_tensorboard_logger' in self.pipeline_config and self.pipeline_config['use_tensorboard_logger']: + import os + log_file = os.path.join(self.working_directory, "worker_logs_" + str(self.pipeline_config['task_id']), 'results.log') + sep = '\t' + with open(log_file, 'a+') as f: + f.write('Result: ' + str(round(loss, 2)) + sep + \ + 'Budget: ' + str(round(budget)) + '/' + str(round(self.pipeline_config['max_budget'])) + sep + \ + 'Used time: ' + str(round((time.time()-start_time))) + 'sec (' + str(round((time.time()-start_time)/budget, 2)) + 'x)' + sep + \ + 'ID: ' + str(config_id) + '\n') + + return { + 'loss': loss, + 'info': info, + 'losses': losses + } + + def optimize_pipeline(self, config, budget, config_id, random_state): + + random.setstate(random_state) + + if self.permutations is not None: + current_sh_run = config_id[0] + self.pipeline_config["dataset_order"] = self.permutations[current_sh_run%len(self.permutations)].tolist() + + try: + self.autonet_logger.info("Fit optimization pipeline") + return self.pipeline.fit_pipeline(hyperparameter_config=config, pipeline_config=self.pipeline_config, + X_train=self.X_train, Y_train=self.Y_train, X_valid=self.X_valid, Y_valid=self.Y_valid, + budget=budget, budget_type=self.budget_type, max_budget=self.max_budget, + config_id=config_id, working_directory=self.working_directory), random.getstate() + except Exception as e: + if 'use_tensorboard_logger' in self.pipeline_config and self.pipeline_config['use_tensorboard_logger']: + import tensorboard_logger as tl + tl.log_value('Exceptions/' + str(e), budget, int(time.time())) + #self.autonet_logger.exception('Exception occurred') + raise e + +def module_exists(module_name): + try: + __import__(module_name) + except ImportError: + return False + else: + return True diff --git a/autoPyTorch/data_management/data_loader.py b/autoPyTorch/data_management/data_loader.py new file mode 100644 index 000000000..f7cac7380 --- /dev/null +++ b/autoPyTorch/data_management/data_loader.py @@ -0,0 +1,47 @@ +import os +import math +from PIL import Image +import requests +from io import BytesIO +from torchvision import transforms, utils + + + +class DataLoader(): + def __init__(self): + pass + + def load(self, url, size): + try: + response = requests.get(url) + img = Image.open(BytesIO(response.content)).convert('RGB') + except: + return None + t = transforms.Compose([transforms.Resize(size), + transforms.CenterCrop(size), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + # transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + ]) + res = (t(img) * 255).int() + return res.reshape((size*size*3)) + + def save_imagenet_subset(self, root, name, class_wnids, image_size, max_images=None): + with open(os.path.join(root, name) + '.data', 'w+') as data: + with open(os.path.join(root, name) + '.label', 'w+') as label: + for i, wnid in enumerate(class_wnids): + urls = requests.get('http://www.image-net.org/api/text/imagenet.synset.geturls?wnid=' + wnid).content + urls = urls.split(b"\n") + images = 0 + for u in range(len(urls)): + if max_images is not None and images+1 > max_images / len(class_wnids): + break + img = self.load(urls[u].decode('utf-8'), image_size) + if img is None: + continue + images += 1 + data.write(' '.join([str(rgb) for rgb in img.numpy()]) + '\n') + label.write(str(i) + '\n') + missing = math.floor(max_images/len(class_wnids)) - images + if missing > 0: + print('Wnid', wnid, 'needs', missing, 'more images.') \ No newline at end of file diff --git a/autoPyTorch/data_management/data_manager.py b/autoPyTorch/data_management/data_manager.py index 0507f25d9..b0e0d7777 100644 --- a/autoPyTorch/data_management/data_manager.py +++ b/autoPyTorch/data_management/data_manager.py @@ -17,8 +17,10 @@ from enum import Enum class ProblemType(Enum): FeatureClassification = 1 + ImageClassification = 2 FeatureRegression = 3 FeatureMultilabel = 4 + ImageClassificationMultipleDatasets = 5 class DataManager(object): """ Load data from multiple sources and formants""" @@ -156,6 +158,102 @@ def _split_data(self, test_split, seed): self.X_train = self.X self.Y_train = self.Y + +class ImageManager(DataManager): + + def read_data(self, file_name, test_split=0.0, is_classification=None, **kwargs): + self.is_classification = True + self.is_multilabel = False + + if isinstance(file_name, list): + import numpy as np + arr = np.array(file_name) + self.X_train = arr + self.Y_train = np.array([0] * len(file_name)) + self.X_valid = self.Y_valid = self.X_test = self.Y_test = None + self.problem_type = ProblemType.ImageClassificationMultipleDatasets + elif file_name.endswith(".csv"): + import pandas as pd + import math + import numpy as np + self.data = np.array(pd.read_csv(file_name, header=None)) + + self.X_train = np.array(self.data[:,0]) + self.Y_train = np.array(self.data[:,1]) + + self.X_valid = self.Y_valid = self.X_test = self.Y_test = None + + if test_split > 0: + samples = self.X_train.shape[0] + indices = list(range(samples)) + np.random.shuffle(indices) + split = samples * test_split + test_indices, train_indices = indices[:math.ceil(split)], indices[math.floor(split):] + self.X_test, self.Y_test = self.X_train[test_indices], self.Y_train[test_indices] + self.X_train, self.Y_train = self.X_train[train_indices], self.Y_train[train_indices] + + self.problem_type = ProblemType.ImageClassification + + def generate_classification(self, problem="MNIST", test_split=0.1, force_download=False, train_size=-1, test_size=-1): + self.is_classification = True + data = None + conversion = False + if problem == "MNIST": + data = torchvision.datasets.MNIST + elif problem == "Fashion-MNIST": + data = torchvision.datasets.FashionMNIST + elif problem == "CIFAR": + conversion = True + data = torchvision.datasets.CIFAR10 + else: + raise ValueError("Dataset not supported: " + problem) + + + train_dataset = data(root='datasets/torchvision/' + problem + '/', + train=True, + transform=transforms.ToTensor(), + download=True) + + test_dataset = data(root='datasets/torchvision/' + problem + '/', + train=False, + transform=transforms.ToTensor()) + images_train = [] + labels_train = [] + + train_size = train_dataset.__len__() if train_size == -1 else min(train_size, train_dataset.__len__()) + test_size = test_dataset.__len__() if test_size == -1 else min(test_size, test_dataset.__len__()) + + for i in range(train_size): + sys.stdout.write("Reading " + problem + " train data ["+ str(train_size)+"] - progress: %d%% \r" % (int(100 * (i + 1)/ train_size) )) + sys.stdout.flush() + image, label = train_dataset.__getitem__(i) + if conversion: + label = torch.tensor(label) + images_train.append(image.numpy()) + labels_train.append(label.numpy()) + + self.X_train = np.array(images_train) + self.Y_train = np.array(labels_train) + + images_test = [] + labels_test = [] + print() + for i in range(test_size): + sys.stdout.write("Reading " + problem + " test data ["+ str(test_size)+"] - progress: %d%% \r" % (int(100 * (i + 1) / test_size) )) + sys.stdout.flush() + image, label = test_dataset.__getitem__(i) + if conversion: + label = torch.tensor(label) + images_test.append(image.numpy()) + labels_test.append(label.numpy()) + + self.problem_type = ProblemType.ImageClassification + self.X_test = np.array(images_test) + self.Y_test = np.array(labels_test) + + self.categorical_features = None + print() + def deterministic_shuffle_and_split(X, Y, split, seed): """Split the data deterministically given the seed diff --git a/autoPyTorch/data_management/data_reader.py b/autoPyTorch/data_management/data_reader.py index 5d7d7d85d..36ed25352 100644 --- a/autoPyTorch/data_management/data_reader.py +++ b/autoPyTorch/data_management/data_reader.py @@ -231,4 +231,37 @@ def read_binary_sparse_datafile(self, filepath, shape): col_indizes.append(int(value) - 1) row_indizes.append(row) print("Done") - return csr_matrix(([1] * len(row_indizes), (row_indizes, col_indizes)), shape=shape) \ No newline at end of file + return csr_matrix(([1] * len(row_indizes), (row_indizes, col_indizes)), shape=shape) + + +class OpenMLImageReader(OpenMlReader): + def __init__(self, dataset_id, is_classification = None, api_key=None, nChannels=1): + self.channels = nChannels + super(OpenMLImageReader, self).__init__(dataset_id, is_classification, api_key) + + def read(self, auto_convert=True, **kwargs): + """ + Read the data from given openml datset file. + + Arguments: + auto_convert: Automatically convert data after reading. + *args, **kwargs: arguments for converting. + """ + + dataset = self.openml.datasets.get_dataset(self.dataset_id) + self.data = dataset.get_data() + + + self.num_entries = len(self.data) + self.num_features = len(self.data[0]) - 1 + + + self.X = self.data[0:self.num_entries, 0:self.num_features] / 255 + + image_size = int(math.sqrt(self.num_features / self.channels)) + self.X = np.reshape(self.X, (self.X.shape[0], self.channels, image_size, image_size)) + + self.Y = self.data[0:self.num_entries, -1] + self.num_classes = len(np.unique(self.Y)) + if self.is_classification is None: + self.is_classification = dataset.get_features_by_type("nominal")[-1] == self.num_features diff --git a/autoPyTorch/data_management/image_loader.py b/autoPyTorch/data_management/image_loader.py new file mode 100644 index 000000000..00dad12c1 --- /dev/null +++ b/autoPyTorch/data_management/image_loader.py @@ -0,0 +1,119 @@ +import torch.utils.data as data + +import os +import os.path + +import logging +logging.getLogger('PIL').setLevel(logging.CRITICAL) +from PIL import Image + +def default_loader(path): + return Image.open(path).convert('RGB') + +from multiprocessing import Process, RawValue, Lock +import time + +class ThreadCounter(object): + def __init__(self): + # RawValue because we don't need it to create a Lock: + self.val = RawValue('d', 0) + self.num = RawValue('i', 0) + self.lock = Lock() + + def add(self, value): + with self.lock: + self.val.value += value + self.num.value += 1 + + def value(self): + with self.lock: + return self.val.value + + def avg(self): + with self.lock: + return self.val.value / self.num.value + + def reset(self): + with self.lock: + self.val.value = 0 + self.num.value = 0 + +class ImageFilelist(data.Dataset): + def __init__(self, image_file_list, label_list, transform=None, target_transform=None, loader=default_loader, cache_size=0, image_size=None): + self.image_file_list = image_file_list + self.label_list = label_list + self.transform = transform + self.target_transform = target_transform + self.loader = loader + # self.readTime = ThreadCounter() + # self.augmentTime = ThreadCounter() + # self.loadTime = ThreadCounter() + self.fill_cache(cache_size, image_size) + + def get_times(self, prefix): + times = dict() + # times.update({prefix + k: v for k, v in self.transform.get_times().items()}) + # times[prefix + 'read_time'] = self.readTime.value() + # times[prefix + 'read_time_avg'] = self.readTime.avg() + # times[prefix + 'augment_time'] = self.augmentTime.value() + # times[prefix + 'augment_time_avg'] = self.augmentTime.avg() + # times[prefix + 'load_time'] = self.loadTime.value() + return times + + def fill_cache(self, cache_size, image_size_pixels): + self.cache = dict() + if cache_size == 0: + return + import sys + max_image_size = 0 + cur_size = 0 + for i, impath in enumerate(self.image_file_list): + img = self.loader(impath) + image_size = sys.getsizeof(img) + max_image_size = max(max_image_size, image_size) + cur_size += image_size + if image_size_pixels is not None: + img = img.resize(image_size_pixels) + self.cache[impath] = img + # logging.getLogger('autonet').info('Load image: ' + str(sys.getsizeof(self.cache[impath])) + ' bytes - Cache: ' + str(cur_size)) + if cur_size + max_image_size > cache_size: + break + logging.getLogger('autonet').info('Could load ' + str(i+1) + '/' + str(len(self.image_file_list)) + ' images into cache, used ' + str(cur_size) + '/' + str(cache_size) + ' bytes') + + def __getitem__(self, index): + impath = self.image_file_list[index] + target = self.label_list[index] + # start_time = time.time() + img = self.cache[impath] if impath in self.cache else self.loader(impath) + # self.readTime.add(time.time() - start_time) + # start_time = time.time() + if self.transform is not None: + img = self.transform(img) + if self.target_transform is not None: + target = self.target_transform(target) + # self.augmentTime.add(time.time() - start_time) + # self.loadTime.add(time.time() - start_time) + return img, target + + def __len__(self): + return len(self.image_file_list) + +class XYDataset(data.Dataset): + def __init__(self, X, Y, transform=None, target_transform=None): + self.X = X + self.Y = Y + self.transform = transform + self.target_transform = target_transform + + def __getitem__(self, index): + img = self.X[index] + target = self.Y[index] + + if self.transform is not None: + img = self.transform(img) + if self.target_transform is not None: + target = self.target_transform(target) + return img, target + + def __len__(self): + return len(self.image_file_list) \ No newline at end of file diff --git a/autoPyTorch/pipeline/base/node.py b/autoPyTorch/pipeline/base/node.py index e58b82323..45d43448e 100644 --- a/autoPyTorch/pipeline/base/node.py +++ b/autoPyTorch/pipeline/base/node.py @@ -5,6 +5,7 @@ import gc import inspect +import logging class Node(): @@ -12,6 +13,7 @@ def __init__(self): self.child_node = None self.fit_output = None self.predict_output = None + self.logger = logging.getLogger('autonet') def fit(self, **kwargs): """Fit pipeline node. @@ -105,7 +107,10 @@ def fit_traverse(self, **kwargs): else: # Neither default specified nor keyword available print ("Available keywords:", sorted(available_kwargs.keys())) raise ValueError('Node ' + str(type(node)) + ' requires keyword ' + str(keyword) + ' which is not available.') - + + if type(node) != Node: + self.logger.debug('Fit: ' + str(type(node).__name__)) + # call fit method node.fit_output = node.fit(**required_kwargs) if (not isinstance(node.fit_output, dict)): diff --git a/autoPyTorch/pipeline/base/pipeline.py b/autoPyTorch/pipeline/base/pipeline.py index 378875f61..af246f8a5 100644 --- a/autoPyTorch/pipeline/base/pipeline.py +++ b/autoPyTorch/pipeline/base/pipeline.py @@ -107,7 +107,8 @@ def get_hyperparameter_search_space(self, dataset_info=None, **pipeline_config): # add the config space of each node for name, node in self._pipeline_nodes.items(): - config_space = node.get_hyperparameter_search_space(dataset_info=dataset_info, **pipeline_config) + #print("dataset_info" in pipeline_config.keys()) + config_space = node.get_hyperparameter_search_space(**pipeline_config) cs.add_configuration_space(prefix=name, configuration_space=config_space, delimiter=ConfigWrapper.delimiter) # add the dependencies between the nodes diff --git a/autoPyTorch/pipeline/nodes/create_dataloader.py b/autoPyTorch/pipeline/nodes/create_dataloader.py index 1d51b529d..187332c9f 100644 --- a/autoPyTorch/pipeline/nodes/create_dataloader.py +++ b/autoPyTorch/pipeline/nodes/create_dataloader.py @@ -49,7 +49,7 @@ def fit(self, pipeline_config, hyperparameter_config, X, Y, train_indices, valid def predict(self, pipeline_config, X, batch_size): X = torch.from_numpy(to_dense(X)).float() - y_placeholder = torch.Tensor(X.size()[0]) + y_placeholder = torch.zeros(X.size()[0]) predict_loader = DataLoader(TensorDataset(X.float(), y_placeholder), batch_size) diff --git a/autoPyTorch/pipeline/nodes/image/__init__.py b/autoPyTorch/pipeline/nodes/image/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/autoPyTorch/pipeline/nodes/image/autonet_settings_no_shuffle.py b/autoPyTorch/pipeline/nodes/image/autonet_settings_no_shuffle.py new file mode 100644 index 000000000..476fec091 --- /dev/null +++ b/autoPyTorch/pipeline/nodes/image/autonet_settings_no_shuffle.py @@ -0,0 +1,71 @@ +__author__ = "Michael Burkart" +__version__ = "0.0.1" +__license__ = "BSD" + +import logging +import numpy as np +import sys, os +import pprint + +from autoPyTorch.pipeline.base.pipeline_node import PipelineNode +from autoPyTorch.utils.hyperparameter_search_space_update import parse_hyperparameter_search_space_updates + +from autoPyTorch.utils.config.config_option import ConfigOption, to_bool + +import random, torch + +class AutoNetSettingsNoShuffle(PipelineNode): + def __init__(self): + super(AutoNetSettingsNoShuffle, self).__init__() + + self.logger_settings = dict() + self.logger_settings['debug'] = logging.DEBUG + self.logger_settings['info'] = logging.INFO + self.logger_settings['warning'] = logging.WARNING + self.logger_settings['error'] = logging.ERROR + self.logger_settings['critical'] = logging.CRITICAL + + + def fit(self, pipeline_config, X_train, Y_train, X_valid, Y_valid): + + autonet_logger = logging.getLogger('autonet') + hpbandster_logger = logging.getLogger('hpbandster') + + level = self.logger_settings[pipeline_config['log_level']] + autonet_logger.setLevel(level) + hpbandster_logger.setLevel(level) + + random.seed(pipeline_config['random_seed']) + torch.manual_seed(pipeline_config['random_seed']) + np.random.seed(pipeline_config['random_seed']) + + if 'result_logger_dir' in pipeline_config: + directory = os.path.join(pipeline_config['result_logger_dir'], "worker_logs_" + str(pipeline_config['task_id'])) + os.makedirs(directory, exist_ok=True) + + if level == logging.DEBUG: + self.addHandler([autonet_logger, hpbandster_logger], level, os.path.join(directory, 'autonet_debug.log')) + self.addHandler([autonet_logger, hpbandster_logger], logging.INFO, os.path.join(directory, 'autonet_info.log')) + else: + self.addHandler([autonet_logger, hpbandster_logger], level, os.path.join(directory, 'autonet.log')) + + autonet_logger.info("Start autonet with config:\n" + str(pprint.pformat(pipeline_config))) + + return { 'X_train': X_train, 'Y_train': Y_train, 'X_valid': X_valid, 'Y_valid': Y_valid } + + def get_pipeline_config_options(self): + options = [ + ConfigOption(name='log_level', default='warning', type=str, choices=list(self.logger_settings.keys())), + ConfigOption(name='random_seed', default=lambda c: abs(hash(c["run_id"])) % (2 ** 32), type=int, depends=True, info="Make sure to specify the same seed for all workers."), + ConfigOption(name='hyperparameter_search_space_updates', default=None, type=["directory", parse_hyperparameter_search_space_updates], + info="object of type HyperparameterSearchSpaceUpdates"), + ] + return options + + def addHandler(self, loggers, level, path): + formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') + fh = logging.FileHandler(path) + fh.setLevel(level) + fh.setFormatter(formatter) + for logger in loggers: + logger.addHandler(fh) diff --git a/autoPyTorch/pipeline/nodes/image/create_dataset_info.py b/autoPyTorch/pipeline/nodes/image/create_dataset_info.py new file mode 100644 index 000000000..4feee5fb4 --- /dev/null +++ b/autoPyTorch/pipeline/nodes/image/create_dataset_info.py @@ -0,0 +1,154 @@ +__author__ = "Michael Burkart" +__version__ = "0.0.1" +__license__ = "BSD" + +import os +import numpy as np +import scipy.sparse + +from torchvision import datasets + +from autoPyTorch.pipeline.base.pipeline_node import PipelineNode + +from autoPyTorch.utils.config.config_option import ConfigOption, to_bool +from autoPyTorch.utils.config.config_file_parser import ConfigFileParser + + +class DataSetInfo(): + def __init__(self): + self.categorical_features = [] + self.x_shape = [] + self.y_shape = [] + self.is_sparse = False + self.default_dataset = None # could be set to CIFAR to download official CIFAR dataset from pytorch + +class CreateDatasetInfo(PipelineNode): + + default_datasets = { + # NAME # dataset # shape # classes + 'CIFAR10' : (datasets.CIFAR10, [50000, 3, 32, 32], 10), + 'CIFAR100' : (datasets.CIFAR100, [50000, 3, 32, 32], 10), + 'SVHN' : (datasets.SVHN, [70000, 3, 32, 32], 10), + 'MNIST' : (datasets.MNIST, [60000, 28, 28], 10), + } + + + def fit(self, pipeline_config, X_train, Y_train, X_valid, Y_valid, dataset_path): + info = DataSetInfo() + info.is_sparse = scipy.sparse.issparse(X_train) + info.path = dataset_path + + if X_train[0] in self.default_datasets: + dataset_type, shape, classes = self.default_datasets[X_train[0]] + info.default_dataset = dataset_type + info.x_shape = shape + info.y_shape = [shape[0], classes] + X_train = np.array([X_train[0]]) + Y_train = np.array([]) + + elif len(X_train.shape) == 1: + if 'max_class_size' not in pipeline_config.keys(): + pipeline_config['max_class_size'] = None # backwards compatibility + + if "file_extensions" not in pipeline_config.keys(): + pipeline_config['file_extensions'] = ['.png', '.jpg', '.JPEG', '.pgm'] + + X_train, Y_train = self.add_subpaths(X_train, Y_train, + pipeline_config['images_root_folders'], pipeline_config['file_extensions'], pipeline_config['max_class_size'] or float('inf')) + X_valid, Y_valid = self.add_subpaths(X_valid, Y_valid, + pipeline_config['images_root_folders'], pipeline_config['file_extensions'], pipeline_config['max_class_size'] or float('inf')) + + info.x_shape = [X_train.shape[0]] + pipeline_config['images_shape'] + info.y_shape = Y_train.shape + + if len(info.y_shape) == 1 or info.y_shape[1] == 1: + info.y_shape = (info.y_shape[0], len(np.unique(Y_train))) + else: + info.x_shape = X_train.shape + info.y_shape = Y_train.shape + + return {'X_train' : X_train, 'Y_train' : Y_train, 'X_valid' : X_valid, 'Y_valid' : Y_valid, 'dataset_info' : info} + + + def predict(self, pipeline_config, X): + fit_res = self.fit(pipeline_config, X, np.zeros(X.shape[0]), None, None, pipeline_config) + return { 'X': fit_res['X_train'], 'dataset_info': fit_res['dataset_info'] } + + def get_pipeline_config_options(self): + options = [ + ConfigOption(name="file_extensions", default=['.png', '.jpg', '.JPEG', '.pgm'], type=str, list=True), + ConfigOption(name="images_shape", default=[3, 32, 32], type=int, list=True), + ConfigOption(name="images_root_folders", default=[ConfigFileParser.get_autonet_home()], type='directory', list=True), + ConfigOption(name="max_class_size", default=None, type=int), + ] + return options + + def add_subpaths(self, X, Y, root_folders, extensions, max_class_size): + if X is None or Y is None: + return None, None + + new_X, new_Y = [], [] + #for i, path in enumerate(X): + # for root in root_folders: + # tmp = os.path.join(root, path) + # if os.path.exists(tmp): + # path = tmp + # break + # if "."+path.split(".")[1] in extensions: + # new_X.append(X) + # new_Y = Y + # continue + # if not os.path.exists(path): + # print(path) + # raise Exception('Invalid path: ' + str(root_folders) + str(path)) + # if os.path.isfile(path) and os.path.splitext(path)[1] == '.h5': + # import h5py + # return h5py.File(path, 'r')['x'].value, h5py.File(os.path.join(root, Y[i]), 'r')['y'].value.squeeze() + # self.add_path(path, Y[i], new_X, new_Y, extensions, max_class_size) + + for i, path in enumerate(X): + for root in root_folders: + tmp = os.path.join(root, path) + if os.path.exists(tmp): + path = tmp + break + if not os.path.exists(path): + raise Exception('Invalid path: ' + str(root_folders) + str(path)) + if os.path.isfile(path) and os.path.splitext(path)[1] == '.h5': + import h5py + return h5py.File(path, 'r')['x'].value, h5py.File(os.path.join(root, Y[i]), 'r')['y'].value.squeeze() + self.add_path(path, Y[i], new_X, new_Y, extensions, max_class_size) + + if len(new_X) == 0: + raise Exception('Could not find any images in ' + str(root_folders) + '...' + str(extensions)) + return np.array(new_X), np.array(new_Y) + + def add_path(self, cur_X, cur_Y, new_X, new_Y, extensions, max_class_size): + is_file, max_class_size = self.add_file(cur_X, cur_Y, new_X, new_Y, extensions, max_class_size) + if is_file: + return + + for sub in os.listdir(cur_X): + if max_class_size <= 0: + return max_class_size + path = os.path.join(cur_X, sub) + is_file, max_class_size = self.add_file(path, cur_Y, new_X, new_Y, extensions, max_class_size) + + if not is_file: + max_class_size = self.add_path(path, cur_Y, new_X, new_Y, extensions, max_class_size) + + def add_file(self, cur_X, cur_Y, new_X, new_Y, extensions, max_class_size): + if not os.path.isfile(cur_X): + return False, max_class_size + if not os.path.splitext(cur_X)[1] in extensions: + return True, max_class_size + if os.path.getsize(cur_X) > 0: + new_X.append(cur_X) + new_Y.append(cur_Y) + max_class_size -= 1 + return True, max_class_size - 1 + else: + import logging + logging.getLogger('autonet').debug('Image is invalid! - size == 0:' + str(cur_X)) + return True, max_class_size + diff --git a/autoPyTorch/pipeline/nodes/image/create_image_dataloader.py b/autoPyTorch/pipeline/nodes/image/create_image_dataloader.py new file mode 100644 index 000000000..987be3418 --- /dev/null +++ b/autoPyTorch/pipeline/nodes/image/create_image_dataloader.py @@ -0,0 +1,97 @@ +__author__ = "Michael Burkart" +__version__ = "0.0.1" +__license__ = "BSD" + +import inspect +import logging +import numpy as np + +from autoPyTorch.pipeline.nodes.create_dataloader import CreateDataLoader +from autoPyTorch.utils.configspace_wrapper import ConfigWrapper +from autoPyTorch.utils.config.config_file_parser import ConfigFileParser +from autoPyTorch.utils.config.config_option import ConfigOption + +import torch +import scipy.sparse +from torch.utils.data import DataLoader, TensorDataset, Dataset +from autoPyTorch.data_management.image_loader import ImageFilelist, XYDataset +from torch.utils.data.sampler import SubsetRandomSampler +from torchvision import datasets, models, transforms + +class CreateImageDataLoader(CreateDataLoader): + + def fit(self, pipeline_config, hyperparameter_config, X, Y, train_indices, valid_indices, train_transform, valid_transform, dataset_info): + + # if len(X.shape) > 1: + # return super(CreateImageDataLoader, self).fit(pipeline_config, hyperparameter_config, X, Y, train_indices, valid_indices) + + torch.manual_seed(pipeline_config["random_seed"]) + hyperparameter_config = ConfigWrapper(self.get_name(), hyperparameter_config) + + if dataset_info.default_dataset: + train_dataset = dataset_info.default_dataset(root=pipeline_config['default_dataset_download_dir'], train=True, download=True, transform=train_transform) + if valid_indices is not None: + valid_dataset = dataset_info.default_dataset(root=pipeline_config['default_dataset_download_dir'], train=True, download=True, transform=valid_transform) + elif len(X.shape) > 1: + train_dataset = XYDataset(X, Y, transform=train_transform, target_transform=lambda y: y.astype(np.int64)) + valid_dataset = XYDataset(X, Y, transform=valid_transform, target_transform=lambda y: y.astype(np.int64)) + else: + train_dataset = ImageFilelist(X, Y, transform=train_transform, target_transform=lambda y: y.astype(np.int64), cache_size=pipeline_config['dataloader_cache_size_mb'] * 1000, image_size=dataset_info.x_shape[2:]) + if valid_indices is not None: + valid_dataset = ImageFilelist(X, Y, transform=valid_transform, target_transform=lambda y: y.astype(np.int64), cache_size=0, image_size=dataset_info.x_shape[2:]) + valid_dataset.cache = train_dataset.cache + + train_loader = DataLoader( + dataset=train_dataset, + batch_size=int(hyperparameter_config['batch_size']), + sampler=SubsetRandomSampler(train_indices), + drop_last=True, + pin_memory=True, + num_workers=pipeline_config['dataloader_worker']) + + valid_loader = None + if valid_indices is not None: + valid_loader = DataLoader( + dataset=valid_dataset, + batch_size=int(hyperparameter_config['batch_size']), + sampler=SubsetRandomSampler(valid_indices), + drop_last=False, + pin_memory=True, + num_workers=pipeline_config['dataloader_worker']) + + return {'train_loader': train_loader, 'valid_loader': valid_loader, 'batch_size': hyperparameter_config['batch_size']} + + def get_pipeline_config_options(self): + options = [ + ConfigOption("default_dataset_download_dir", default=ConfigFileParser.get_autonet_home(), type='directory'), + ConfigOption("dataloader_worker", default=1, type=int), + ConfigOption("dataloader_cache_size_mb", default=0, type=int) + ] + return options + + def predict(self, pipeline_config, X, batch_size, predict_transform, dataset_info): + + if len(X.shape) > 1: + return super(CreateImageDataLoader, self).predict(pipeline_config, X, batch_size) + + + if dataset_info.default_dataset: + predict_dataset = dataset_info.default_dataset(root=pipeline_config['default_dataset_download_dir'], train=False, download=True, transform=predict_transform) + else: + try: + y_placeholder = torch.zeros(X.size()[0]) + except: + y_placeholder = torch.zeros(len(X)) + predict_dataset = ImageFilelist(X, y_placeholder, transform=predict_transform) + + predict_loader = DataLoader( + dataset=predict_dataset, + batch_size=int(batch_size), + shuffle=False, + pin_memory=True, + num_workers=pipeline_config['dataloader_worker']) + + return {'predict_loader': predict_loader} + + + diff --git a/autoPyTorch/pipeline/nodes/image/cross_validation_indices.py b/autoPyTorch/pipeline/nodes/image/cross_validation_indices.py new file mode 100644 index 000000000..d96493bf3 --- /dev/null +++ b/autoPyTorch/pipeline/nodes/image/cross_validation_indices.py @@ -0,0 +1,223 @@ +__author__ = "Michael Burkart" +__version__ = "0.0.1" +__license__ = "BSD" + +import torch +import logging +import scipy.sparse +import numpy as np +import pandas as pd +import signal +import time +import math +import copy + +from sklearn.model_selection import StratifiedKFold +from autoPyTorch.pipeline.base.sub_pipeline_node import SubPipelineNode +from autoPyTorch.pipeline.base.pipeline import Pipeline + +from autoPyTorch.utils.config.config_option import ConfigOption, to_bool +from autoPyTorch.components.training.image.budget_types import BudgetTypeTime +from sklearn.model_selection import StratifiedShuffleSplit + +import time + +class CrossValidationIndices(SubPipelineNode): + def __init__(self, train_pipeline_nodes): + """CrossValidation pipeline node. + It will run the train_pipeline by providing different train and validation datasets given the cv_split value defined in the config. + Cross validation can be disabled by setting cv_splits to <= 1 in the config + This enables the validation_split config parameter which, if no validation data is provided, will split the train dataset according its value (percent of train dataset) + + Train: + The train_pipeline will receive the following inputs: + {hyperparameter_config, pipeline_config, X, Y, train_sampler, valid_sampler, budget, training_techniques, fit_start_time, categorical_features} + + Prediction: + The train_pipeline will receive the following inputs: + {pipeline_config, X} + + Arguments: + train_pipeline {Pipeline} -- training pipeline that will be computed cv_split times + train_result_node {PipelineNode} -- pipeline node that provides the results of the train_pipeline + """ + + super(CrossValidationIndices, self).__init__(train_pipeline_nodes) + + self.use_stratified_cv_split_default = False + self.logger = logging.getLogger('autonet') + + + def fit(self, hyperparameter_config, pipeline_config, X_train, Y_train, X_valid, Y_valid, budget, budget_type, dataset_info, config_id, working_directory): + + cv_splits = max(1, pipeline_config['cv_splits']) + val_split = max(0, min(1, pipeline_config['validation_split'])) + + budget_too_low_for_cv, cv_splits, loss_penalty = self.incorporate_num_cv_splits_in_budget(budget, pipeline_config, cv_splits) + + loss = 0 + infos = [] + + np.random.seed(pipeline_config['random_seed']) + + split_indices = [] + X = X_train + Y = Y_train + + if X_valid is not None and Y_valid is not None: + if cv_splits > 1: + self.logger.warning('CV splits are set to ' + str(cv_splits) + ' and validation set is specified, autonet will ignore cv splits and evaluate on given validation set') + if val_split > 0.0: + self.logger.warning('Validation split is set to ' + str(val_split) + ' and validation set is specified, autonet will ignore split and evaluate on given validation set') + + train_indices = self.shuffle_indices(list(range(X_train.shape[0])), pipeline_config['shuffle']) + valid_indices = self.shuffle_indices(list(range(X_train.shape[0], X_train.shape[0] + X_valid.shape[0])), pipeline_config['shuffle']) + + X = self.concat(X_train, X_valid) + Y = self.concat(Y_train, Y_valid) + + split_indices.append([train_indices, valid_indices]) + + elif cv_splits > 1: + if val_split > 0.0: + self.logger.warning('Validation split is set to ' + str(val_split) + ' and cv splits are specified, autonet will ignore validation split and evaluate on ' + str(cv_splits) + ' cv splits') + + if pipeline_config['use_stratified_cv_split'] and Y.shape[0] == dataset_info.x_shape[0]: + assert len(Y.shape) == 1 or Y.shape[1] == 1, "Y is in wrong shape for stratified CV split" + skf = StratifiedKFold(n_splits=cv_splits, shuffle=pipeline_config['shuffle']) + split_indices = list(skf.split(np.zeros(dataset_info.x_shape[0]), Y.reshape((-1, )))) + else: + indices = self.shuffle_indices(list(range(dataset_info.x_shape[0])), pipeline_config['shuffle']) + split_size = len(indices) / cv_splits + for split in range(cv_splits): + i1 = int(split*split_size) + i2 = int((split+1)*split_size) + train_indices, valid_indices = indices[:i1] + indices[i2:], indices[i1:i2] + split_indices.append([train_indices, valid_indices]) + + elif val_split > 0.0: + if pipeline_config['use_stratified_cv_split'] and Y.shape[0] == dataset_info.x_shape[0] and (len(Y.shape) == 1 or Y.shape[1] == 1): + sss = StratifiedShuffleSplit(n_splits=1, test_size=val_split, random_state=pipeline_config['random_seed']) + train, valid = list(sss.split(np.zeros(dataset_info.x_shape[0]), Y.reshape((-1, ))))[0] + split_indices.append([train.tolist(), valid.tolist()]) + + # samples = dataset_info.x_shape[0] + # skf = StratifiedKFold(n_splits=math.ceil(samples / (samples * val_split)), shuffle=pipeline_config['shuffle']) + # split_indices = [list(skf.split(np.zeros(dataset_info.x_shape[0]), Y.reshape((-1, ))))[0]] + else: + indices = self.shuffle_indices(list(range(dataset_info.x_shape[0])), pipeline_config['shuffle']) + split = int(len(indices) * (1-val_split)) + + train_indices, valid_indices = indices[:split], indices[split:] + split_indices.append([train_indices, valid_indices]) + else: + train_indices = self.shuffle_indices(list(range(dataset_info.x_shape[0])), pipeline_config['shuffle']) + split_indices.append([train_indices, []]) + + + + + if 'categorical_features' in pipeline_config and pipeline_config['categorical_features']: + categorical_features = pipeline_config['categorical_features'] + else: + categorical_features = [False] * dataset_info.x_shape[1] + + for i, split in enumerate(split_indices): + + self.logger.debug("CV split " + str(i)) + + train_indices = split[0] + valid_indices = split[1] if len(split[1]) > 0 else None + + if budget_too_low_for_cv: + cv_splits = 1 + + cur_budget = budget/cv_splits + + result = self.sub_pipeline.fit_pipeline( + hyperparameter_config=hyperparameter_config, pipeline_config=pipeline_config, + X=X, Y=Y, dataset_info=dataset_info, + train_indices=train_indices, valid_indices=valid_indices, + budget=cur_budget, budget_type=budget_type, + categorical_features=categorical_features, + config_id=config_id, + working_directory=working_directory) + + if result is not None: + loss += result['loss'] + infos.append(result['info']) + + if budget_too_low_for_cv: + break + + if (len(infos) == 0): + raise Exception("Could not finish a single cv split due to memory or time limitation") + + if len(infos) == 1: + info = infos[0] + else: + df = pd.DataFrame(infos) + info = dict(df.mean()) + + loss = loss / cv_splits + loss_penalty + + return {'loss': loss, 'info': info} + + def predict(self, pipeline_config, X, dataset_info): + return self.sub_pipeline.predict_pipeline(pipeline_config=pipeline_config, X=X, dataset_info=dataset_info) + + def get_pipeline_config_options(self): + options = [ + # percent/100 of train dataset used for validation if no validation and cv_splits == 1 + ConfigOption("validation_split", default=0.0, type=float, choices=[0, 1]), + # number of cross validation splits 1 -> no cv + ConfigOption("cv_splits", default=1, type=int), + ConfigOption("use_stratified_cv_split", default=self.use_stratified_cv_split_default, type=to_bool, choices=[True, False]), + # specify minimum budget for cv. If budget is smaller only evaluate a single fold. + ConfigOption("min_budget_for_cv", default=0, type=float), + # incorporate number of cv splits in budget: Use half the number of specified cv splits below given budget. + ConfigOption("half_num_cv_splits_below_budget", default=0, type=float), + # shuffle train and validation set + ConfigOption('shuffle', default=True, type=to_bool, choices=[True, False]), + ] + return options + + def split_cv(self, X_shape, split, max_splits): + split_size = X_shape[0] / max_splits + i1 = int(split*split_size) + i2 = int((split+1)*split_size) + + train_indices = list(range(0, i1)) + list(range(i2, X_shape[0])) + valid_indices = list(range(i1, i2)) + + return train_indices, valid_indices + + def concat(self, upper, lower): + if (scipy.sparse.issparse(upper)): + return scipy.sparse.vstack([upper, lower]) + else: + return np.concatenate([upper, lower]) + + def shuffle_indices(self, indices, shuffle): + if shuffle: + np.random.shuffle(indices) + return indices + + + def clean_fit_data(self): + super(CrossValidationIndices, self).clean_fit_data() + self.sub_pipeline.root.clean_fit_data() + + def incorporate_num_cv_splits_in_budget(self, budget, pipeline_config, cv_splits): + budget_too_low_for_cv = budget < pipeline_config["min_budget_for_cv"] and cv_splits > 1 + half_num_cv_splits = not budget_too_low_for_cv and budget < pipeline_config["half_num_cv_splits_below_budget"] and cv_splits > 1 + + if budget_too_low_for_cv: + self.logger.debug("Only evaluate a single fold of CV, since the budget is lower than the min_budget for cv") + return True, cv_splits, 1000 + + if half_num_cv_splits: + self.logger.debug("Using half number of cv splits since budget is lower than the budget you specified for half number of cv splits") + return False, int(math.ceil(cv_splits / 2)), 1000 + + return False, cv_splits, 0 diff --git a/autoPyTorch/pipeline/nodes/image/image_augmentation.py b/autoPyTorch/pipeline/nodes/image/image_augmentation.py new file mode 100644 index 000000000..26eafd369 --- /dev/null +++ b/autoPyTorch/pipeline/nodes/image/image_augmentation.py @@ -0,0 +1,221 @@ +__author__ = "Michael Burkart" +__version__ = "0.0.1" +__license__ = "BSD" + +import inspect +import logging +import numpy as np + +from autoPyTorch.pipeline.base.pipeline_node import PipelineNode +from autoPyTorch.utils.configspace_wrapper import ConfigWrapper + +import torch +from torchvision import datasets, models, transforms +from autoPyTorch.components.preprocessing.image_preprocessing.transforms import Cutout, AutoAugment, FastAutoAugment + + +import time +from autoPyTorch.data_management.image_loader import ThreadCounter +class TimeCompose(object): + """Composes several transforms together. + Args: + transforms (list of ``Transform`` objects): list of transforms to compose. + Example: + >>> transforms.Compose([ + >>> transforms.CenterCrop(10), + >>> transforms.ToTensor(), + >>> ]) + """ + + def __init__(self, transforms): + self.transforms = transforms + self.counters = [ThreadCounter() for _ in transforms] + + def __call__(self, img): + for i, t in enumerate(self.transforms): + start_time = time.time() + img = t(img) + self.counters[i].add(time.time() - start_time) + return img + + def get_times(self): + return {str(t): self.counters[i].value() for i, t in enumerate(self.transforms) } + + def __repr__(self): + format_string = self.__class__.__name__ + '(' + for t in self.transforms: + format_string += '\n' + format_string += ' {0}'.format(t) + format_string += '\n)' + return format_string + +class ImageAugmentation(PipelineNode): + def __init__(self): + super(ImageAugmentation, self).__init__() + self.mean_std_cache = dict() + + def fit(self, pipeline_config, hyperparameter_config, dataset_info, X, Y, train_indices, valid_indices): + mean, std = self.compute_mean_std(pipeline_config, hyperparameter_config, X, Y, train_indices, dataset_info) #dataset_info.mean, dataset_info.std + + hyperparameter_config = ConfigWrapper(self.get_name(), hyperparameter_config) + + transform_list = [] + image_size = min(dataset_info.x_shape[-2], dataset_info.x_shape[-1]) + + if len(X.shape) > 1: + transform_list.append(transforms.ToPILImage()) + + if hyperparameter_config['augment']: + if hyperparameter_config['fastautoaugment'] and hyperparameter_config['autoaugment']: + # fast autoaugment and autoaugment + transform_list.extend([ + FastAutoAugment(), + AutoAugment(), + transforms.Resize(image_size), + transforms.RandomCrop(image_size, padding=4), + transforms.RandomHorizontalFlip() + ]) + elif hyperparameter_config['fastautoaugment']: + # fast autoaugment + transform_list.extend([ + FastAutoAugment(), + transforms.Resize(image_size), + transforms.RandomCrop(image_size, padding=4), + transforms.RandomHorizontalFlip() + ]) + elif hyperparameter_config['autoaugment']: + # autoaugment + transform_list.extend([ + AutoAugment(), + transforms.Resize(image_size), + transforms.RandomCrop(image_size, padding=4), + transforms.RandomHorizontalFlip() + ]) + else: + # default augment color, rotation, size + transform_list.extend([ + transforms.ColorJitter(brightness=0.196, saturation=0.196, hue=0.141), + transforms.RandomAffine(degrees=10, shear=0.1, fillcolor=127), + transforms.RandomResizedCrop(image_size, scale=(0.533, 1), ratio=(0.75, 1.25)), + transforms.RandomHorizontalFlip() + ]) + else: + transform_list.extend([ + transforms.Resize(image_size), + transforms.CenterCrop(image_size), + ]) + + + # grayscale if only one channel + if dataset_info.x_shape[1] == 1: + transform_list.append(transforms.Grayscale(1)) + + # normalize + transform_list.append(transforms.ToTensor()) + transform_list.append(transforms.Normalize(mean, std)) + + # cutout + if hyperparameter_config['cutout']: + n_holes = hyperparameter_config['cutout_holes'] + transform_list.append(Cutout(n_holes=1, length=hyperparameter_config['length'], probability=0.5)) + + + train_transform = transforms.Compose(transform_list) + + transform_list = [] + if len(X.shape) > 1: + transform_list.append(transforms.ToPILImage()) + + transform_list.extend([ + transforms.Resize(image_size), + transforms.CenterCrop(image_size), + transforms.ToTensor(), + transforms.Normalize(mean, std), + ]) + valid_transform = transforms.Compose([transforms.Grayscale(1)] + transform_list if dataset_info.x_shape[1] == 1 else transform_list) + + return { 'train_transform': train_transform, 'valid_transform': valid_transform, 'mean': mean, 'std': std } + + def predict(self, pipeline_config, mean, std): + + predict_transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize(mean, std), + ]) + + return {'predict_transform': predict_transform} + + def get_hyperparameter_search_space(self, **pipeline_config): + import ConfigSpace as CS + import ConfigSpace.hyperparameters as CSH + cs = CS.ConfigurationSpace() + + augment = cs.add_hyperparameter(CSH.CategoricalHyperparameter('augment', [True, False])) + autoaugment = cs.add_hyperparameter(CSH.CategoricalHyperparameter('autoaugment', [True, False])) + fastautoaugment = cs.add_hyperparameter(CSH.CategoricalHyperparameter('fastautoaugment', [True, False])) + + cutout = cs.add_hyperparameter(CSH.CategoricalHyperparameter('cutout', [True, False])) + cutout_length = cs.add_hyperparameter(CSH.UniformIntegerHyperparameter('length', lower=0, upper=20, log=False)) + cutout_holes = cs.add_hyperparameter(CSH.UniformIntegerHyperparameter('cutout_holes', lower=1, upper=3, log=False)) + + cs.add_condition(CS.EqualsCondition(cutout_length, cutout, True)) + cs.add_condition(CS.EqualsCondition(cutout_holes, cutout, True)) + + cs.add_condition(CS.EqualsCondition(autoaugment, augment, True)) + cs.add_condition(CS.EqualsCondition(fastautoaugment, augment, True)) + + return cs + + def compute_mean_std(self, pipeline_config, hyperparameter_config, X, Y, train_indices, dataset_info): + log = logging.getLogger('autonet') + + if dataset_info.path in self.mean_std_cache: + mean, std = self.mean_std_cache[dataset_info.path] + log.debug('CACHED: MEAN: ' + str(mean) + ' -- STD: ' + str(std)) + return mean, std + + from autoPyTorch.pipeline.nodes.image.create_image_dataloader import CreateImageDataLoader + loader = CreateImageDataLoader() + + image_size = min(dataset_info.x_shape[-2], dataset_info.x_shape[-1]) + transform_list = [] + if len(X.shape) > 1: + transform_list.append(transforms.ToPILImage()) + transform_list.append(transforms.Resize(image_size)) + transform_list.append(transforms.CenterCrop(image_size)) + if dataset_info.x_shape[1] == 1: + transform_list.append(transforms.Grayscale(1)) + transform_list.append(transforms.ToTensor()) + train_transform = transforms.Compose(transform_list) + + cache_size = pipeline_config['dataloader_cache_size_mb'] + pipeline_config['dataloader_cache_size_mb'] = 0 + train_loader = loader.fit(pipeline_config, hyperparameter_config, X, Y, train_indices, None, train_transform, None, dataset_info)['train_loader'] + pipeline_config['dataloader_cache_size_mb'] = cache_size + + mean = 0. + std = 0. + nb_samples = 0. + + with torch.no_grad(): + for data, _ in train_loader: + + # import matplotlib.pyplot as plt + # img = plt.imshow(data.numpy()[0,1,:]) + # plt.show() + + batch_samples = data.size(0) + data = data.view(batch_samples, data.size(1), -1) + mean = mean + data.mean(2).sum(0) + std = std + data.std(2).sum(0) + nb_samples += batch_samples + + mean /= nb_samples + std /= nb_samples + + mean, std = mean.numpy().tolist(), std.numpy().tolist() + + log.debug('MEAN: ' + str(mean) + ' -- STD: ' + str(std)) + + self.mean_std_cache[dataset_info.path] = [mean, std] + return mean, std diff --git a/autoPyTorch/pipeline/nodes/image/image_dataset_reader.py b/autoPyTorch/pipeline/nodes/image/image_dataset_reader.py new file mode 100644 index 000000000..85156d36a --- /dev/null +++ b/autoPyTorch/pipeline/nodes/image/image_dataset_reader.py @@ -0,0 +1,57 @@ +__author__ = "Michael Burkart" +__version__ = "0.0.1" +__license__ = "BSD" + +import os +import numpy as np +import math + +from autoPyTorch.pipeline.base.pipeline_node import PipelineNode + +from autoPyTorch.utils.config.config_option import ConfigOption, to_bool +from autoPyTorch.utils.config.config_file_parser import ConfigFileParser + +from autoPyTorch.data_management.data_manager import ImageManager + +class ImageDatasetReader(PipelineNode): + def __init__(self): + super(ImageDatasetReader, self).__init__() + + def fit(self, pipeline_config, X_train, Y_train, X_valid, Y_valid): + + if len(X_train.shape)==1 and len(X_train)==1: + X_train = X_train[0] + Y_train = 0 + + if X_valid is not None: + if len(X_valid.shape)==1 and len(X_valid)==1: + X_valid = X_valid[0] + Y_valid = None + + X_train, Y_train, path = self.read_data(X_train, Y_train) + X_valid, Y_valid, _ = self.read_data(X_valid, Y_valid) + + return { 'X_train': X_train, 'Y_train': Y_train, 'X_valid': X_valid, 'Y_valid': Y_valid, 'dataset_path': path } + + def get_pipeline_config_options(self): + options = [ + ] + return options + + def read_data(self, path, y): + if path is None: + return None, None, None + + if not isinstance(path, str): + return path, y, str(path)[0:300] + + if not os.path.isabs(path): + path = os.path.abspath(os.path.join(ConfigFileParser.get_autonet_home(), path)) + + if not os.path.exists(path): + raise ValueError('Path ' + str(path) + ' is not a valid path.') + + im = ImageManager() + im.read_data(path, is_classification=True) + + return im.X_train, im.Y_train, path diff --git a/autoPyTorch/pipeline/nodes/image/loss_module_selector_indices.py b/autoPyTorch/pipeline/nodes/image/loss_module_selector_indices.py new file mode 100644 index 000000000..be9741cb0 --- /dev/null +++ b/autoPyTorch/pipeline/nodes/image/loss_module_selector_indices.py @@ -0,0 +1,35 @@ +__author__ = "Michael Burkart" +__version__ = "0.0.1" +__license__ = "BSD" + +import inspect +import numpy as np + +import ConfigSpace +import ConfigSpace.hyperparameters as CSH + +from autoPyTorch.pipeline.nodes.loss_module_selector import LossModuleSelector +from autoPyTorch.utils.configspace_wrapper import ConfigWrapper +from autoPyTorch.utils.config.config_option import ConfigOption + + +class LossModuleSelectorIndices(LossModuleSelector): + def fit(self, hyperparameter_config, pipeline_config, X, Y, train_indices, dataset_info): + + if Y.shape[0] == dataset_info.y_shape[0]: + return super(LossModuleSelectorIndices, self).fit(hyperparameter_config, pipeline_config, X=np.zeros((Y.shape[0], 1)), Y=Y, train_indices=train_indices) + + print(Y.shape[0], dataset_info.y_shape[0]) + + hyperparameter_config = ConfigWrapper(self.get_name(), hyperparameter_config) + + loss_module_name = hyperparameter_config["loss_module"] + loss_module = self.loss_modules[loss_module_name] + loss = loss_module.module + if inspect.isclass(loss): + loss = loss() + loss_module.set_loss_function(loss) + + return {'loss_function': loss_module} + + diff --git a/autoPyTorch/pipeline/nodes/image/multiple_datasets.py b/autoPyTorch/pipeline/nodes/image/multiple_datasets.py new file mode 100644 index 000000000..80ed233b1 --- /dev/null +++ b/autoPyTorch/pipeline/nodes/image/multiple_datasets.py @@ -0,0 +1,115 @@ +__author__ = "Michael Burkart" +__version__ = "0.0.1" +__license__ = "BSD" + +import os +import numpy as np +import math +import time +import pandas as pd +import logging +import random +import torch + +from autoPyTorch.pipeline.base.sub_pipeline_node import SubPipelineNode + +from autoPyTorch.utils.config.config_option import ConfigOption, to_bool +from autoPyTorch.utils.config.config_file_parser import ConfigFileParser + +class MultipleDatasets(SubPipelineNode): + + def __init__(self, sub_pipeline_nodes): + super(MultipleDatasets, self).__init__(sub_pipeline_nodes) + + self.logger = logging.getLogger('autonet') + + + def fit(self, hyperparameter_config, pipeline_config, X_train, Y_train, X_valid, Y_valid, budget, budget_type, config_id, working_directory): + if len(X_train.shape) > 1: + return self.sub_pipeline.fit_pipeline( hyperparameter_config=hyperparameter_config, + pipeline_config=pipeline_config, + X_train=X_train, Y_train=Y_train, X_valid=X_valid, Y_valid=Y_valid, + budget=budget, budget_type=budget_type, config_id=config_id, working_directory=working_directory) + + + max_datasets = X_train.shape[0] + max_steps = math.floor((math.log(pipeline_config['max_budget']) - math.log(pipeline_config['min_budget'])) / math.log(pipeline_config['eta'])) + current_step = max_steps - math.floor((math.log(pipeline_config['max_budget']) - math.log(budget)) / math.log(pipeline_config['eta'])) if budget > 1e-10 else 0 + n_datasets = math.floor(math.pow(max_datasets, current_step/max(1, max_steps)) + 1e-10) + + # refit can cause issues with different budget + if max_steps == 0 or n_datasets > max_datasets or not pipeline_config['increase_number_of_trained_datasets']: + n_datasets = max_datasets + + if X_valid is None or Y_valid is None: + X_valid = [None] * n_datasets + Y_valid = [None] * n_datasets + + if 'use_tensorboard_logger' in pipeline_config and pipeline_config['use_tensorboard_logger']: + import tensorboard_logger as tl + tl.log_value('Train/datasets', float(n_datasets), int(time.time())) + + infos = [] + loss = 0 + losses = [] + + self.logger.debug('Start fitting ' + str(n_datasets) + ' dataset(s). Current budget: ' + str(budget) + ' - Step: ' + str(current_step) + '/' + str(max_steps)) + + #dataset_order = list(range(n_datasets)) + #random.shuffle(dataset_order) + #if pipeline_config['dataset_order'] and len(pipeline_config['dataset_order']) == n_datasets: + # dataset_order = pipeline_config['dataset_order'] + # dataset_order = [i for i in dataset_order if i < n_datasets] + #X_train = X_train[dataset_order] + if np.any(pipeline_config['dataset_order']): + dataset_order = pipeline_config['dataset_order'] + else: + dataset_order = list(range(n_datasets)) + X_train = X_train[dataset_order] + + for dataset in range(n_datasets): + self.logger.info('Fit dataset (' + str(dataset+1) + '/' + str(n_datasets) + '): ' + str(X_train[dataset]) + ' for ' + str(round(budget / n_datasets)) + 's') + + result = self.sub_pipeline.fit_pipeline(hyperparameter_config=hyperparameter_config, + pipeline_config=pipeline_config, + X_train=X_train[dataset], Y_train=Y_train[dataset], X_valid=X_valid[dataset], Y_valid=Y_valid[dataset], + budget=budget / n_datasets, budget_type=budget_type, config_id=config_id, working_directory=working_directory) + + # copy/rename checkpoint - save one checkpoint for each trained dataset + if 'checkpoint' in result['info']: + src = result['info']['checkpoint'] + folder, file = os.path.split(src) + dest = os.path.join(folder, os.path.splitext(file)[0] + '_' + str(dataset) + '.pt') + import shutil + if dataset < n_datasets - 1: + shutil.copy(src, dest) + else: + os.rename(src, dest) + result['info']['checkpoint'] = dest + + result['info']['dataset_path'] = str(X_train[dataset]) + result['info']['dataset_id'] = dataset_order[dataset] + + infos.append(result['info']) + loss += result['loss'] + losses.append(result['loss']) + + if 'use_tensorboard_logger' in pipeline_config and pipeline_config['use_tensorboard_logger']: + import tensorboard_logger as tl + tl.log_value('Train/datasets', float(n_datasets), int(time.time())) + + loss = loss / n_datasets + + return {'loss': loss, 'losses': losses, 'info': infos} + + def predict(self, pipeline_config, X): + return self.sub_pipeline.predict_pipeline(pipeline_config=pipeline_config, X=X) + + def get_pipeline_config_options(self): + options = [ + ConfigOption('dataset_order', default=None, type=int, list=True), + + #autonet.refit sets this to false to avoid refit budget issues + ConfigOption('increase_number_of_trained_datasets', default=True, type=to_bool) + ] + return options diff --git a/autoPyTorch/pipeline/nodes/image/network_selector_datasetinfo.py b/autoPyTorch/pipeline/nodes/image/network_selector_datasetinfo.py new file mode 100644 index 000000000..4adf11c4b --- /dev/null +++ b/autoPyTorch/pipeline/nodes/image/network_selector_datasetinfo.py @@ -0,0 +1,35 @@ +__author__ = "Michael Burkart" +__version__ = "0.0.1" +__license__ = "BSD" + + +from autoPyTorch.pipeline.nodes.network_selector import NetworkSelector +from autoPyTorch.components.networks.base_net import BaseNet + +import torch.nn as nn +import ConfigSpace +import ConfigSpace.hyperparameters as CSH +from autoPyTorch.utils.configspace_wrapper import ConfigWrapper +from autoPyTorch.utils.config.config_option import ConfigOption +import torchvision.models as models + +class NetworkSelectorDatasetInfo(NetworkSelector): + def fit(self, hyperparameter_config, pipeline_config, dataset_info): + config = ConfigWrapper(self.get_name(), hyperparameter_config) + network_name = config['network'] + + network_type = self.networks[network_name] + network_config = ConfigWrapper(network_name, config) + activation = self.final_activations[pipeline_config["final_activation"]] + + in_features = dataset_info.x_shape[1:] + if len(in_features) == 1: + # feature data - otherwise image data (channels, width, height) + in_features = in_features[0] + + network = network_type( config=network_config, + in_features=in_features, out_features=dataset_info.y_shape[1], + final_activation=activation) + + # self.logger.debug('NETWORK:\n' + str(network)) + return {'network': network} diff --git a/autoPyTorch/pipeline/nodes/image/optimization_algorithm_no_timelimit.py b/autoPyTorch/pipeline/nodes/image/optimization_algorithm_no_timelimit.py new file mode 100644 index 000000000..fd7c01165 --- /dev/null +++ b/autoPyTorch/pipeline/nodes/image/optimization_algorithm_no_timelimit.py @@ -0,0 +1,365 @@ + +import numpy as np +import os +import time +import shutil +import netifaces +import traceback +import logging +import itertools +import random + + +import autoPyTorch.utils.thread_read_write as thread_read_write +import datetime + +from hpbandster.core.nameserver import NameServer, nic_name_to_host +from hpbandster.core.result import (json_result_logger, + logged_results_to_HBS_result) + +from autoPyTorch.pipeline.base.sub_pipeline_node import SubPipelineNode +from autoPyTorch.pipeline.base.pipeline import Pipeline +from autoPyTorch.utils.config.config_option import ConfigOption, to_bool + +from autoPyTorch.core.hpbandster_extensions.bohb_ext import BOHBExt +from autoPyTorch.core.hpbandster_extensions.bohb_multi_kde_ext import BOHBMultiKDEExt +from autoPyTorch.core.hpbandster_extensions.hyperband_ext import HyperBandExt +from autoPyTorch.core.worker_no_timelimit import ModuleWorkerNoTimeLimit + +from autoPyTorch.components.training.image.budget_types import BudgetTypeTime, BudgetTypeEpochs +import copy + +from autoPyTorch.utils.modify_config_space import remove_constant_hyperparameter + +from autoPyTorch.utils.loggers import combined_logger, bohb_logger, tensorboard_logger + +import pprint + +tensorboard_logger_configured = False + +class OptimizationAlgorithmNoTimeLimit(SubPipelineNode): + def __init__(self, optimization_pipeline_nodes): + """OptimizationAlgorithm pipeline node. + It will run either the optimization algorithm (BOHB, Hyperband - defined in config) or start workers + Each worker will run the provided optimization_pipeline and will return the output + of the pipeline_result_node to the optimization algorithm + + Train: + The optimization_pipeline will get the following inputs: + {hyperparameter_config, pipeline_config, X_train, Y_train, X_valid, Y_valid, budget, budget_type} + The pipeline_result_node has to provide the following outputs: + - 'loss': the optimization value (minimize) + - 'info': dict containing info for the respective training process + + Predict: + The optimization_pipeline will get the following inputs: + {pipeline_config, X} + The pipeline_result_node has to provide the following outputs: + - 'Y': result of prediction for 'X' + Note: predict will not call the optimization algorithm + + Arguments: + optimization_pipeline {Pipeline} -- pipeline that will be optimized (hyperparamter) + pipeline_result_node {PipelineNode} -- pipeline node that provides the results of the optimization_pieline + """ + + super(OptimizationAlgorithmNoTimeLimit, self).__init__(optimization_pipeline_nodes) + + self.algorithms = dict() + self.algorithms["bohb"] = BOHBExt + self.algorithms["hyperband"] = HyperBandExt + self.algorithms["bohb_multi_kde"] = BOHBMultiKDEExt + + self.logger = logging.getLogger('autonet') + + self.n_datasets=1 + + def fit(self, pipeline_config, X_train, Y_train, X_valid, Y_valid, refit=None): + res = None + + + config_space = self.pipeline.get_hyperparameter_search_space(**pipeline_config) + config_space, constants = remove_constant_hyperparameter(config_space) + config_space.seed(pipeline_config['random_seed']) + + self.n_datasets = X_train.shape[0] if X_train.shape[0]<100 else 1 + + #Get number of budgets + max_budget = pipeline_config["max_budget"] + min_budget = pipeline_config["min_budget"] + eta = pipeline_config["eta"] + max_SH_iter = -int(np.log(min_budget/max_budget)/np.log(eta)) + 1 + budgets = max_budget * np.power(eta, -np.linspace(max_SH_iter-1, 0, max_SH_iter)) + n_budgets = len(budgets) + + # Get permutations + self.permutations = self.get_permutations(n_budgets) + + self.logger.info('BOHB-ConfigSpace:\n' + str(config_space)) + self.logger.info('Constant Hyperparameter:\n' + str(pprint.pformat(constants))) + + run_id, task_id = pipeline_config['run_id'], pipeline_config['task_id'] + + + global tensorboard_logger_configured + if pipeline_config['use_tensorboard_logger'] and not tensorboard_logger_configured: + import tensorboard_logger as tl + directory = os.path.join(pipeline_config['result_logger_dir'], "worker_logs_" + str(task_id)) + os.makedirs(directory, exist_ok=True) + tl.configure(directory, flush_secs=60) + tensorboard_logger_configured = True + + if (refit is not None): + return self.run_refit(pipeline_config, refit, constants, X_train, Y_train, X_valid, Y_valid) + + try: + ns_credentials_dir, tmp_models_dir, network_interface_name = self.prepare_environment(pipeline_config) + + # start nameserver if not on cluster or on master node in cluster + if task_id in [1, -1]: + NS = self.get_nameserver(run_id, task_id, ns_credentials_dir, network_interface_name) + ns_host, ns_port = NS.start() + + self.run_worker(pipeline_config=pipeline_config, run_id=run_id, task_id=task_id, ns_credentials_dir=ns_credentials_dir, + network_interface_name=network_interface_name, X_train=X_train, Y_train=Y_train, X_valid=X_valid, Y_valid=Y_valid, + constant_hyperparameter=constants) + + # start BOHB if not on cluster or on master node in cluster + if task_id in [1, -1]: + self.run_optimization_algorithm(pipeline_config, config_space, constants, run_id, ns_host, ns_port, NS, task_id) + + + res = self.parse_results(pipeline_config["result_logger_dir"]) + + except Exception as e: + print(e) + traceback.print_exc() + finally: + self.clean_up(pipeline_config, ns_credentials_dir, tmp_models_dir) + + if (res): + return {'loss': res[0], 'optimized_hyperparameter_config': res[1], 'budget': res[2], 'info': dict()} + else: + return {'optimized_hyperparameter_config': dict(), 'budget': 0, 'loss': float('inf'), 'info': dict()} + + def predict(self, pipeline_config, X): + return self.sub_pipeline.predict_pipeline(pipeline_config=pipeline_config, X=X) + + def get_pipeline_config_options(self): + options = [ + ConfigOption("run_id", default="0", type=str, info="Unique id for each run."), + ConfigOption("task_id", default=-1, type=int, info="ID for each worker, if you run AutoNet on a cluster. Set to -1, if you run it locally. "), + ConfigOption("algorithm", default="bohb", type=str, choices=list(self.algorithms.keys())), + ConfigOption("budget_type", default="time", type=str, choices=['time', 'epochs']), + ConfigOption("min_budget", default=lambda c: 120 if c['budget_type'] == 'time' else 5, type=float, depends=True), + ConfigOption("max_budget", default=lambda c: 6000 if c['budget_type'] == 'time' else 150, type=float, depends=True), + ConfigOption("max_runtime", + default=lambda c: ((-int(np.log(c["min_budget"] / c["max_budget"]) / np.log(c["eta"])) + 1) * c["max_budget"]) + if c["budget_type"] == "time" else float("inf"), + type=float, depends=True), + ConfigOption("num_iterations", + default=lambda c: (-int(np.log(c["min_budget"] / c["max_budget"]) / np.log(c["eta"])) + 1) + if c["budget_type"] == "epochs" else float("inf"), + type=float, depends=True), + ConfigOption("eta", default=3, type=float, info='eta parameter of Hyperband.'), + ConfigOption("min_workers", default=1, type=int), + ConfigOption("working_dir", default=".", type="directory"), + ConfigOption("network_interface_name", default=self.get_default_network_interface_name(), type=str), + ConfigOption("memory_limit_mb", default=1000000, type=int), + ConfigOption("result_logger_dir", default=".", type="directory"), + ConfigOption("use_tensorboard_logger", default=False, type=to_bool), + ConfigOption("keep_only_incumbent_checkpoints", default=True, type=to_bool), + ConfigOption("global_results_dir", default=None, type='directory'), + ] + return options + + def get_default_network_interface_name(self): + try: + return netifaces.gateways()['default'][netifaces.AF_INET][1] + except: + return 'lo' + + def prepare_environment(self, pipeline_config): + if not os.path.exists(pipeline_config["working_dir"]) and pipeline_config['task_id'] in [1, -1]: + try: + os.mkdir(pipeline_config["working_dir"]) + except: + pass + tmp_models_dir = os.path.join(pipeline_config["working_dir"], "tmp_models_" + str(pipeline_config['run_id'])) + ns_credentials_dir = os.path.abspath(os.path.join(pipeline_config["working_dir"], "ns_credentials_" + str(pipeline_config['run_id']))) + network_interface_name = pipeline_config["network_interface_name"] or (netifaces.interfaces()[1] if len(netifaces.interfaces()) > 1 else "lo") + + if os.path.exists(tmp_models_dir) and pipeline_config['task_id'] in [1, -1]: + shutil.rmtree(tmp_models_dir) + if os.path.exists(ns_credentials_dir) and pipeline_config['task_id'] in [1, -1]: + shutil.rmtree(ns_credentials_dir) + return ns_credentials_dir, tmp_models_dir, network_interface_name + + def clean_up(self, pipeline_config, tmp_models_dir, ns_credentials_dir): + if pipeline_config['task_id'] in [1, -1]: + # Delete temporary files + if os.path.exists(tmp_models_dir): + shutil.rmtree(tmp_models_dir) + if os.path.exists(ns_credentials_dir): + shutil.rmtree(ns_credentials_dir) + + def get_nameserver(self, run_id, task_id, ns_credentials_dir, network_interface_name): + if not os.path.isdir(ns_credentials_dir): + try: + os.mkdir(ns_credentials_dir) + except: + pass + return NameServer(run_id=run_id, nic_name=network_interface_name, working_directory=ns_credentials_dir) + + def get_optimization_algorithm_instance(self, config_space, run_id, pipeline_config, ns_host, ns_port, result_logger, previous_result=None): + optimization_algorithm = self.algorithms[pipeline_config["algorithm"]] + + if pipeline_config["algorithm"]=="bohb_multi_kde": + hb = optimization_algorithm(configspace=config_space, run_id = run_id, + eta=pipeline_config["eta"], min_budget=pipeline_config["min_budget"], max_budget=pipeline_config["max_budget"], + host=ns_host, nameserver=ns_host, nameserver_port=ns_port, + result_logger=result_logger, + ping_interval=10**6, + working_directory=pipeline_config["working_dir"], + previous_result=previous_result, + n_kdes=self.n_datasets, + permutations=self.permutations) + else: + hb = optimization_algorithm(configspace=config_space, run_id = run_id, + eta=pipeline_config["eta"], min_budget=pipeline_config["min_budget"], max_budget=pipeline_config["max_budget"], + host=ns_host, nameserver=ns_host, nameserver_port=ns_port, + result_logger=result_logger, + ping_interval=10**6, + working_directory=pipeline_config["working_dir"], + previous_result=previous_result) + return hb + + + def parse_results(self, result_logger_dir): + res = logged_results_to_HBS_result(result_logger_dir) + id2config = res.get_id2config_mapping() + incumbent_trajectory = res.get_incumbent_trajectory(bigger_is_better=False, non_decreasing_budget=False) + + if (len(incumbent_trajectory['config_ids']) == 0): + return dict() + + final_config_id = incumbent_trajectory['config_ids'][-1] + return incumbent_trajectory['losses'][-1], id2config[final_config_id]['config'], incumbent_trajectory['budgets'][-1] + + + def run_worker(self, pipeline_config, constant_hyperparameter, run_id, task_id, ns_credentials_dir, network_interface_name, + X_train, Y_train, X_valid, Y_valid): + if not task_id == -1: + time.sleep(5) + while not os.path.isdir(ns_credentials_dir): + time.sleep(5) + host = nic_name_to_host(network_interface_name) + + worker = ModuleWorkerNoTimeLimit( pipeline=self.sub_pipeline, pipeline_config=pipeline_config, + constant_hyperparameter=constant_hyperparameter, + X_train=X_train, Y_train=Y_train, X_valid=X_valid, Y_valid=Y_valid, + budget_type=pipeline_config['budget_type'], + max_budget=pipeline_config["max_budget"], + host=host, run_id=run_id, + id=task_id, + working_directory=pipeline_config["result_logger_dir"], + permutations=self.permutations) + worker.load_nameserver_credentials(ns_credentials_dir) + # run in background if not on cluster + worker.run(background=(task_id <= 1)) + + + def run_optimization_algorithm(self, pipeline_config, config_space, constant_hyperparameter, run_id, ns_host, ns_port, nameserver, task_id): + self.logger.info("[AutoNet] Start " + pipeline_config["algorithm"]) + + # initialize optimization algorithm + + result_logger = self.get_result_logger(pipeline_config, constant_hyperparameter) + HB = self.get_optimization_algorithm_instance(config_space=config_space, run_id=run_id, + pipeline_config=pipeline_config, ns_host=ns_host, ns_port=ns_port, result_logger=result_logger) + + # start algorithm + min_num_workers = pipeline_config["min_workers"] if task_id != -1 else 1 + + reduce_runtime = pipeline_config["max_budget"] if pipeline_config["budget_type"] == "time" else 0 + + HB.wait_for_workers(min_num_workers) + self.logger.info('Workers are ready!') + + thread_read_write.append('runs.log', "{0}: {1} | {2}-{3}\n".format( + str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")), + run_id, + pipeline_config['min_budget'], + pipeline_config['max_budget'])) + + HB.run_until(runtime=(pipeline_config["max_runtime"] - reduce_runtime), + n_iterations=pipeline_config["num_iterations"], + min_n_workers=min_num_workers) + + HB.shutdown(shutdown_workers=True) + nameserver.shutdown() + + + def clean_fit_data(self): + super(OptimizationAlgorithmNoTimeLimit, self).clean_fit_data() + self.sub_pipeline.root.clean_fit_data() + + def run_refit(self, pipeline_config, refit, constants, X_train, Y_train, X_valid, Y_valid): + start_time = time.time() + + result_logger = self.get_result_logger(pipeline_config, constants) + result_logger.new_config((0, 0, 0), refit["hyperparameter_config"], {'model_based_pick': False}) + + full_config = dict() + full_config.update(constants) + full_config.update(refit["hyperparameter_config"]) + + self.logger.info('Refit-Config:\n' + str(pprint.pformat(full_config))) + + class Job(): + pass + job = Job() + job.id = (0, 0, 0) + job.kwargs = { + 'budget': refit['budget'], + 'config': refit["hyperparameter_config"], + } + + try: + res = self.sub_pipeline.fit_pipeline( + hyperparameter_config=full_config, pipeline_config=pipeline_config, + X_train=X_train, Y_train=Y_train, X_valid=X_valid, Y_valid=Y_valid, + budget=refit["budget"], budget_type=pipeline_config['budget_type'], config_id='refit', working_directory=pipeline_config['result_logger_dir']) + job.exception = None + except Exception as e: + self.logger.exception('Exception during refit') + res = None + job.exception = str(e) + + end_time = time.time() + + job.timestamps = {'submitted': start_time, 'started': start_time, 'finished': end_time} + job.result = res + + result_logger(job) + + return {'loss': res['loss'] if res else float('inf'), + 'optimized_hyperparameter_config': full_config, + 'budget': refit['budget'], + 'info': res['info'] if res else dict()} + + def get_result_logger(self, pipeline_config, constant_hyperparameter): + loggers = [bohb_logger(constant_hyperparameter=constant_hyperparameter, directory=pipeline_config["result_logger_dir"], overwrite=True)] + if pipeline_config['use_tensorboard_logger']: + loggers.append(tensorboard_logger(pipeline_config, constant_hyperparameter, pipeline_config['global_results_dir'])) + return combined_logger(*loggers) + + def get_permutations(self, n_budgets=1): + # Get permutations, since HB fits like this: b1 - b2 -b3 - b2 -b3, repeat them accordingly + idx = [i for i in range(self.n_datasets)] + permutations = np.array(list(itertools.permutations(idx))) + ret = [] + for perm in permutations: + for ind in range(n_budgets): + ret.append(perm) + return np.array(ret) diff --git a/autoPyTorch/pipeline/nodes/image/simple_scheduler_selector.py b/autoPyTorch/pipeline/nodes/image/simple_scheduler_selector.py new file mode 100644 index 000000000..59de24aa1 --- /dev/null +++ b/autoPyTorch/pipeline/nodes/image/simple_scheduler_selector.py @@ -0,0 +1,26 @@ +__author__ = "Michael Burkart" +__version__ = "0.0.1" +__license__ = "BSD" + + +from autoPyTorch.pipeline.base.pipeline_node import PipelineNode + +from autoPyTorch.pipeline.nodes.lr_scheduler_selector import LearningrateSchedulerSelector + +import ConfigSpace +import ConfigSpace.hyperparameters as CSH +from autoPyTorch.utils.configspace_wrapper import ConfigWrapper +from autoPyTorch.utils.config.config_option import ConfigOption + +class SimpleLearningrateSchedulerSelector(LearningrateSchedulerSelector): + + def fit(self, hyperparameter_config, pipeline_config, optimizer): + config = ConfigWrapper(self.get_name(), hyperparameter_config) + + scheduler_name = config['lr_scheduler'] + + lr_scheduler_type = self.lr_scheduler[scheduler_name] + lr_scheduler_config = ConfigWrapper(scheduler_name, config) + lr_scheduler = lr_scheduler_type(optimizer, lr_scheduler_config) + + return {'lr_scheduler': lr_scheduler} \ No newline at end of file diff --git a/autoPyTorch/pipeline/nodes/image/simple_train_node.py b/autoPyTorch/pipeline/nodes/image/simple_train_node.py new file mode 100644 index 000000000..6ccddbdeb --- /dev/null +++ b/autoPyTorch/pipeline/nodes/image/simple_train_node.py @@ -0,0 +1,348 @@ +__author__ = "Max Dippel, Michael Burkart and Matthias Urban" +__version__ = "0.0.1" +__license__ = "BSD" + +import torch +import time +import logging + +import os, pprint +import scipy.sparse +import numpy as np +import torch.nn as nn +from torch.autograd import Variable +from torch.utils.data import DataLoader, TensorDataset + +from autoPyTorch.pipeline.base.pipeline_node import PipelineNode + +import ConfigSpace +import ConfigSpace.hyperparameters as CSH +from autoPyTorch.utils.configspace_wrapper import ConfigWrapper +from autoPyTorch.utils.config.config_option import ConfigOption, to_bool +from autoPyTorch.components.training.image.base_training import BaseTrainingTechnique, BaseBatchLossComputationTechnique + +from autoPyTorch.components.training.image.trainer import Trainer +from autoPyTorch.components.training.image.checkpoints.save_load import save_checkpoint, load_checkpoint, get_checkpoint_dir +from autoPyTorch.components.training.image.checkpoints.load_specific import load_model #, load_optimizer, load_scheduler + +torch.backends.cudnn.benchmark = True + +import signal + +class SimpleTrainNode(PipelineNode): + def __init__(self): + super(SimpleTrainNode, self).__init__() + self.default_minimize_value = True + self.logger = logging.getLogger('autonet') + self.training_techniques = dict() + self.batch_loss_computation_techniques = dict() + self.add_batch_loss_computation_technique("standard", BaseBatchLossComputationTechnique) + + def fit(self, hyperparameter_config, pipeline_config, + train_loader, valid_loader, + network, optimizer, lr_scheduler, + optimize_metric, additional_metrics, + log_functions, + budget, + loss_function, + budget_type, + config_id, working_directory, + train_indices, valid_indices): + + + if budget < 1e-5: + return {'loss': float('inf') if pipeline_config["minimize"] else -float('inf'), 'info': dict()} + + training_start_time = time.time() + # prepare + if not torch.cuda.is_available(): + pipeline_config["cuda"] = False + + device = torch.device('cuda' if pipeline_config['cuda'] else 'cpu') + + checkpoint_path = get_checkpoint_dir(working_directory) + checkpoint = None + if pipeline_config['save_checkpoints']: + checkpoint = load_checkpoint(checkpoint_path, config_id, budget) + + network = load_model(network, checkpoint) + + tensorboard_logging = 'use_tensorboard_logger' in pipeline_config and pipeline_config['use_tensorboard_logger'] + + # from torch.optim import SGD + # optimizer = SGD(network.parameters(), lr=0.3) + + # optimizer = load_optimizer(optimizer, checkpoint, device) + # lr_scheduler = load_scheduler(lr_scheduler, checkpoint) + + hyperparameter_config = ConfigWrapper(self.get_name(), hyperparameter_config) + + batch_loss_name = hyperparameter_config["batch_loss_computation_technique"] if "batch_loss_computation_technique" in hyperparameter_config else pipeline_config["batch_loss_computation_techniques"][0] + + batch_loss_computation_technique = self.batch_loss_computation_techniques[batch_loss_name]() + batch_loss_computation_technique.set_up( + pipeline_config, ConfigWrapper(batch_loss_name, hyperparameter_config), self.logger) + + + # Training loop + logs = [] + epoch = 0 + + optimize_metrics = [] + val_metrics = [optimize_metric] + additional_metrics + if pipeline_config['evaluate_on_train_data']: + optimize_metrics = val_metrics + elif valid_loader is None: + self.logger.warning('No valid data specified and train process should not evaluate on train data! Will ignore \"evaluate_on_train_data\" and evaluate on train data!') + optimize_metrics = val_metrics + + trainer = Trainer( + model=network, + loss_computation=batch_loss_computation_technique, + criterion=loss_function, + budget=budget, + optimizer=optimizer, + scheduler=lr_scheduler, + budget_type=budget_type, + device=device, + config_id=config_id, + checkpoint_path=checkpoint_path if pipeline_config['save_checkpoints'] else None, + images_to_plot=tensorboard_logging * pipeline_config['tensorboard_images_count']) + + model_params = self.count_parameters(network) + + start_up = time.time() - training_start_time + epoch_train_time = 0 + val_time = 0 + log_time = 0 + + # tmp = time.time() + # for _ in range(100): + # for _ in train_loader: + # pass + # time_used = time.time() - tmp + # self.logger.debug("Test time: " + str(time_used) + "s : \n" + str(pprint.pformat(train_loader.dataset.get_times('train_')))) + + self.logger.debug("Start train. Budget: " + str(budget)) + + last_log_time = time.time() + while True: + # prepare epoch + log = dict() + + # train + tmp = time.time() + optimize_metric_results, train_loss, stop_training = trainer.train(epoch + 1, train_loader, optimize_metrics) + + log['train_loss'] = train_loss + for i, metric in enumerate(optimize_metrics): + log['train_' + metric.name] = optimize_metric_results[i] + epoch_train_time += time.time() - tmp + + # evaluate + tmp = time.time() + if valid_loader is not None: + valid_metric_results = trainer.evaluate(valid_loader, val_metrics, epoch=epoch + 1) + + for i, metric in enumerate(val_metrics): + log['val_' + metric.name] = valid_metric_results[i] + val_time += time.time() - tmp + + # additional los - e.g. test evaluation + tmp = time.time() + for func in log_functions: + log[func.name] = func(network, epoch + 1) + log_time += time.time() - tmp + + log['epochs'] = epoch + 1 + log['model_parameters'] = model_params + log['learning_rate'] = optimizer.param_groups[0]['lr'] + + # log.update(train_loader.dataset.get_times('train_')) + # log.update(valid_loader.dataset.get_times('val_')) + + logs.append(log) + + epoch += 1 + + self.logger.debug("Epoch: " + str(epoch) + " : " + str(log)) + + if budget_type == 'epochs' and epoch + 1 >= budget: + break + + if stop_training: + break + + if tensorboard_logging and time.time() - last_log_time >= pipeline_config['tensorboard_min_log_interval']: + import tensorboard_logger as tl + worker_path = 'Train/' + tl.log_value(worker_path + 'budget', float(budget), epoch) + for name, value in log.items(): + tl.log_value(worker_path + name, float(value), epoch) + last_log_time = time.time() + + + # wrap up + wrap_up_start_time = time.time() + + self.logger.debug("Finished Training") + + opt_metric_name = 'train_' + optimize_metric.name + if valid_loader is not None: + opt_metric_name = 'val_' + optimize_metric.name + + if pipeline_config["minimize"]: + final_log = min(logs, key=lambda x:x[opt_metric_name]) + else: + final_log = max(logs, key=lambda x:x[opt_metric_name]) + + if tensorboard_logging: + import tensorboard_logger as tl + worker_path = 'Train/' + tl.log_value(worker_path + 'budget', float(budget), epoch) + for name, value in final_log.items(): + tl.log_value(worker_path + name, float(value), epoch) + + if trainer.latest_checkpoint: + final_log['checkpoint'] = trainer.latest_checkpoint + elif pipeline_config['save_checkpoints']: + path = save_checkpoint(checkpoint_path, config_id, budget, network, optimizer, lr_scheduler) + final_log['checkpoint'] = path + + final_log['train_datapoints'] = len(train_indices) + if valid_loader is not None: + final_log['val_datapoints'] = len(valid_indices) + + loss = final_log[opt_metric_name] * (1 if pipeline_config["minimize"] else -1) + + self.logger.info("Finished train with budget " + str(budget) + + "s, Training took " + str(int(wrap_up_start_time - training_start_time)) + + "s, Wrap up took " + str(int(time.time() - wrap_up_start_time)) + + "s, Init took " + str(int(start_up)) + + "s, Train took " + str(int(epoch_train_time)) + + "s, Validation took " + str(int(val_time)) + + "s, Log functions took " + str(int(log_time)) + + "s, Cumulative time " + str(int(trainer.cumulative_time)) + + "s.\nTotal time consumption in s: " + str(int(time.time() - training_start_time))) + + return {'loss': loss, 'info': final_log} + + def get_dataloader_times(self, dataloader): + read = dataloader.dataset.readTime.value() + read_avg = dataloader.dataset.readTime.avg() + augment = dataloader.dataset.augmentTime.value() + augment_avg = dataloader.dataset.augmentTime.avg() + return read, read_avg, augment, augment_avg + + @staticmethod + def count_parameters(model): + return sum(p.numel() for p in model.parameters() if p.requires_grad) + + def predict(self, pipeline_config, network, predict_loader, dataset_info, optimize_metric): + + if not torch.cuda.is_available(): + pipeline_config["cuda"] = False + else: + pipeline_config["cuda"] = True + + device = torch.device('cuda:0' if pipeline_config['cuda'] else 'cpu') + + if dataset_info.default_dataset: + metric_results = Trainer(None, network, None, None, None, None, None, device).evaluate(predict_loader, [optimize_metric]) + return { 'score': metric_results[0] } + else: + Y = predict(network, predict_loader, None, device) + return { 'Y': Y.detach().cpu().numpy() } + + def add_training_technique(self, name, training_technique): + if (not issubclass(training_technique, BaseTrainingTechnique)): + raise ValueError("training_technique type has to inherit from BaseTrainingTechnique") + self.training_techniques[name] = training_technique + + def remove_training_technique(self, name): + del self.training_techniques[name] + + def add_batch_loss_computation_technique(self, name, batch_loss_computation_technique): + if (not issubclass(batch_loss_computation_technique, BaseBatchLossComputationTechnique)): + raise ValueError("batch_loss_computation_technique type has to inherit from BaseBatchLossComputationTechnique, got " + str(batch_loss_computation_technique)) + self.batch_loss_computation_techniques[name] = batch_loss_computation_technique + + def remove_batch_loss_computation_technique(self, name, batch_loss_computation_technique): + del self.batch_loss_computation_techniques[name] + + def get_hyperparameter_search_space(self, **pipeline_config): + pipeline_config = self.pipeline.get_pipeline_config(**pipeline_config) + cs = ConfigSpace.ConfigurationSpace() + + hp_batch_loss_computation = cs.add_hyperparameter(CSH.CategoricalHyperparameter("batch_loss_computation_technique", list(self.batch_loss_computation_techniques.keys()))) + + for name, technique in self.batch_loss_computation_techniques.items(): + parent = {'parent': hp_batch_loss_computation, 'value': name} if hp_batch_loss_computation is not None else None + cs.add_configuration_space(prefix=name, configuration_space=technique.get_hyperparameter_search_space(**pipeline_config), + delimiter=ConfigWrapper.delimiter, parent_hyperparameter=parent) + + possible_loss_comps = sorted(set(pipeline_config["batch_loss_computation_techniques"]).intersection(self.batch_loss_computation_techniques.keys())) + + if 'batch_loss_computation_techniques' not in pipeline_config.keys(): + cs.add_hyperparameter(CSH.CategoricalHyperparameter("batch_loss_computation_technique", possible_loss_comps)) + self._check_search_space_updates() + + return cs + + def get_pipeline_config_options(self): + options = [ + ConfigOption(name="batch_loss_computation_techniques", default=list(self.batch_loss_computation_techniques.keys()), + type=str, list=True, choices=list(self.batch_loss_computation_techniques.keys())), + ConfigOption("minimize", default=self.default_minimize_value, type=to_bool, choices=[True, False]), + ConfigOption("cuda", default=True, type=to_bool, choices=[True, False]), + ConfigOption("save_checkpoints", default=False, type=to_bool, choices=[True, False]), + ConfigOption("tensorboard_min_log_interval", default=30, type=int), + ConfigOption("tensorboard_images_count", default=0, type=int), + ConfigOption("evaluate_on_train_data", default=True, type=to_bool), + ] + for name, technique in self.training_techniques.items(): + options += technique.get_pipeline_config_options() + for name, technique in self.batch_loss_computation_techniques.items(): + options += technique.get_pipeline_config_options() + return options + + +def predict(network, test_loader, metrics, device, move_network=True): + """ predict batchwise """ + # Build DataLoader + if move_network: + if torch.cuda.device_count() > 1: + network = nn.DataParallel(network) + network = network.to(device) + + # Batch prediction + network.eval() + if metrics is not None: + metric_results = [0] * len(metrics) + + N = 0.0 + for i, (X_batch, Y_batch) in enumerate(test_loader): + # Predict on batch + X_batch = Variable(X_batch).to(device) + batch_size = X_batch.size(0) + + Y_batch_pred = network(X_batch).detach().cpu() + + if metrics is None: + # Infer prediction shape + if i == 0: + Y_pred = Y_batch_pred + else: + # Add to prediction tensor + Y_pred = torch.cat((Y_pred, Y_batch_pred), 0) + else: + for i, metric in enumerate(metrics): + metric_results[i] += metric(Y_batch, Y_batch_pred) * batch_size + + N += batch_size + + if metrics is None: + return Y_pred + else: + return [res / N for res in metric_results] + diff --git a/autoPyTorch/pipeline/nodes/image/single_dataset.py b/autoPyTorch/pipeline/nodes/image/single_dataset.py new file mode 100644 index 000000000..6d8cd8417 --- /dev/null +++ b/autoPyTorch/pipeline/nodes/image/single_dataset.py @@ -0,0 +1,35 @@ +import logging + +from autoPyTorch.pipeline.base.sub_pipeline_node import SubPipelineNode + +from autoPyTorch.utils.config.config_option import ConfigOption, to_bool +from autoPyTorch.utils.config.config_file_parser import ConfigFileParser + +class SingleDataset(SubPipelineNode): + # Node for compatibility with MultipleDatasets model + + def __init__(self, sub_pipeline_nodes): + super(SingleDataset, self).__init__(sub_pipeline_nodes) + + self.logger = logging.getLogger('autonet') + + + def fit(self, hyperparameter_config, pipeline_config, X_train, Y_train, X_valid, Y_valid, budget, budget_type, config_id, working_directory): + return self.sub_pipeline.fit_pipeline(hyperparameter_config=hyperparameter_config, + pipeline_config=pipeline_config, + X_train=X_train, Y_train=Y_train, X_valid=X_valid, Y_valid=Y_valid, + budget=budget, budget_type=budget_type, config_id=config_id, working_directory=working_directory) + + + def predict(self, pipeline_config, X): + return self.sub_pipeline.predict_pipeline(pipeline_config=pipeline_config, X=X) + + def get_pipeline_config_options(self): + options = [ + ConfigOption('dataset_order', default=None, type=int, list=True), + + #autonet.refit sets this to false to avoid refit budget issues + ConfigOption('increase_number_of_trained_datasets', default=False, type=to_bool) + ] + return options + diff --git a/autoPyTorch/pipeline/nodes/loss_module_selector.py b/autoPyTorch/pipeline/nodes/loss_module_selector.py index 3d33c52cd..4368554db 100644 --- a/autoPyTorch/pipeline/nodes/loss_module_selector.py +++ b/autoPyTorch/pipeline/nodes/loss_module_selector.py @@ -93,4 +93,4 @@ def __call__(self, x, y): def to(self, device): result = AutoNetLossModule(self.module, self.weight_strategy, self.requires_target_class_labels) result.set_loss_function(self.function.to(device)) - return result \ No newline at end of file + return result diff --git a/autoPyTorch/pipeline/nodes/metric_selector.py b/autoPyTorch/pipeline/nodes/metric_selector.py index 31b2c3e41..3935a6d9d 100644 --- a/autoPyTorch/pipeline/nodes/metric_selector.py +++ b/autoPyTorch/pipeline/nodes/metric_selector.py @@ -6,6 +6,7 @@ from autoPyTorch.pipeline.base.pipeline_node import PipelineNode from autoPyTorch.utils.config.config_option import ConfigOption +import torch import numpy as np class MetricSelector(PipelineNode): @@ -21,6 +22,9 @@ def fit(self, pipeline_config): return {'optimize_metric': optimize_metric, 'additional_metrics': additional_metrics} + def predict(self, optimize_metric): + return { 'optimize_metric': optimize_metric } + def add_metric(self, name, metric, loss_transform=False, requires_target_class_labels=False, is_default_optimize_metric=False): """Add a metric, this metric has to be a function that takes to arguments y_true and y_predict @@ -70,6 +74,11 @@ def default_minimize_transform(value): def no_transform(value): return value +def ensure_numpy(y): + if type(y)==torch.Tensor: + return y.detach().cpu().numpy() + return y + def undo_ohe(y): if len(y.shape) == 1: return(y) @@ -83,6 +92,12 @@ def __init__(self, name, metric, loss_transform, ohe_transform): self.name = name def __call__(self, Y_pred, Y_true): + + Y_pred = ensure_numpy(Y_pred) + Y_true = ensure_numpy(Y_true) + + if len(Y_pred.shape) > len(Y_true.shape): + Y_pred = undo_ohe(Y_pred) return self.metric(self.ohe_transform(Y_true), self.ohe_transform(Y_pred)) def get_loss_value(self, Y_pred, Y_true): diff --git a/autoPyTorch/pipeline/nodes/train_node.py b/autoPyTorch/pipeline/nodes/train_node.py index ba7e5a80b..25735e45b 100644 --- a/autoPyTorch/pipeline/nodes/train_node.py +++ b/autoPyTorch/pipeline/nodes/train_node.py @@ -118,7 +118,7 @@ def fit(self, hyperparameter_config, pipeline_config, log = {key: value for key, value in log.items() if not isinstance(value, np.ndarray)} logger.debug("Epoch: " + str(epoch) + " : " + str(log)) if 'use_tensorboard_logger' in pipeline_config and pipeline_config['use_tensorboard_logger']: - self.tensorboard_log(budget=budget, epoch=epoch, log=log) + self.tensorboard_log(budget=budget, epoch=epoch, log=log, logdir=pipeline_config["result_logger_dir"]) if stop_training: break @@ -205,10 +205,14 @@ def get_pipeline_config_options(self): options += technique.get_pipeline_config_options() return options - def tensorboard_log(self, budget, epoch, log): + def tensorboard_log(self, budget, epoch, log, logdir): import tensorboard_logger as tl worker_path = 'Train/' - tl.log_value(worker_path + 'budget', float(budget), int(time.time())) + try: + tl.log_value(worker_path + 'budget', float(budget), int(time.time())) + except: + tl.configure(logdir) + tl.log_value(worker_path + 'budget', float(budget), int(time.time())) tl.log_value(worker_path + 'epoch', float(epoch + 1), int(time.time())) for name, value in log.items(): tl.log_value(worker_path + name, float(value), int(time.time())) diff --git a/autoPyTorch/utils/benchmarking/benchmark.py b/autoPyTorch/utils/benchmarking/benchmark.py index b2fa9e7bf..271e4ce62 100644 --- a/autoPyTorch/utils/benchmarking/benchmark.py +++ b/autoPyTorch/utils/benchmarking/benchmark.py @@ -8,6 +8,7 @@ ReadInstanceData, SaveResults, SetAutoNetConfig, + ApplyUserUpdates, SaveEnsembleLogs, SetEnsembleConfig) from autoPyTorch.utils.benchmarking.visualization_pipeline import (CollectAutoNetConfigTrajectories, @@ -46,12 +47,13 @@ def compute_ensemble_performance(self, **benchmark_config): def get_benchmark_pipeline(self): return Pipeline([ BenchmarkSettings(), - ForInstance([ #instance_file - ReadInstanceData(), #test_split, is_classification, instance + ForInstance([ # loop through instance files + ReadInstanceData(), # get test_split, is_classification, instance CreateAutoNet(), - ForAutoNetConfig([ #autonet_config_file - SetAutoNetConfig(), #use_dataset_metric, use_dataset_max_runtime - ForRun([ #num_runs, run_ids + #ApplyUserUpdates(), + ForAutoNetConfig([ # loop through autonet_config_file + SetAutoNetConfig(), # use_dataset_metric, use_dataset_max_runtime + ForRun([ # loop through num_runs, run_ids PrepareResultFolder(), FitAutoNet(), SaveResults(), diff --git a/autoPyTorch/utils/benchmarking/benchmark_pipeline/__init__.py b/autoPyTorch/utils/benchmarking/benchmark_pipeline/__init__.py index 972246d6f..c2975a56c 100644 --- a/autoPyTorch/utils/benchmarking/benchmark_pipeline/__init__.py +++ b/autoPyTorch/utils/benchmarking/benchmark_pipeline/__init__.py @@ -9,4 +9,5 @@ from autoPyTorch.utils.benchmarking.benchmark_pipeline.save_results import SaveResults from autoPyTorch.utils.benchmarking.benchmark_pipeline.benchmark_settings import BenchmarkSettings from autoPyTorch.utils.benchmarking.benchmark_pipeline.save_ensemble_logs import SaveEnsembleLogs -from autoPyTorch.utils.benchmarking.benchmark_pipeline.set_ensemble_config import SetEnsembleConfig \ No newline at end of file +from autoPyTorch.utils.benchmarking.benchmark_pipeline.set_ensemble_config import SetEnsembleConfig +from autoPyTorch.utils.benchmarking.benchmark_pipeline.apply_user_updates import ApplyUserUpdates diff --git a/autoPyTorch/utils/benchmarking/benchmark_pipeline/apply_user_updates.py b/autoPyTorch/utils/benchmarking/benchmark_pipeline/apply_user_updates.py new file mode 100644 index 000000000..9fce0d4b7 --- /dev/null +++ b/autoPyTorch/utils/benchmarking/benchmark_pipeline/apply_user_updates.py @@ -0,0 +1,71 @@ + +from autoPyTorch.utils.config.config_option import ConfigOption, to_bool +from autoPyTorch.pipeline.base.pipeline_node import PipelineNode +from autoPyTorch.utils.configspace_wrapper import ConfigWrapper + +import re +import os +import pandas as pd +import math +import numpy as np + + +class ApplyUserUpdates(PipelineNode): + + def fit(self, pipeline_config, autonet): + + path = pipeline_config['user_updates_config'] + if path is None: + return dict() + + if not os.path.exists(path): + raise ValueError('Invalid path: ' + path) + + data = np.array(pd.read_csv(path, header=None, sep=';')) + + for row in data: + name, value_range, is_log = row[0].strip(), self.string_to_list(str(row[1])), to_bool(row[2].strip()) + name_split = name.split(ConfigWrapper.delimiter) + autonet.pipeline[name_split[0]]._update_hyperparameter_range(ConfigWrapper.delimiter.join(name_split[1:]), value_range, is_log, check_validity=False) + + # print(autonet.get_hyperparameter_search_space()) + + return { 'autonet': autonet } + + + def get_pipeline_config_options(self): + options = [ + ConfigOption("user_updates_config", default=None, type='directory'), + ] + return options + + def string_to_list(self, string): + pattern = "\[(.*)\]" + match = re.search(pattern, string) + + if match is None: + # no list > make constant range + match = re.search(pattern, '[' + string + ',' + string + ']') + + if match is None: + raise ValueError('No valid range specified got: ' + string) + + lst = map(self.try_convert, match.group(1).split(',')) + return list(lst) + + def try_convert(self, string): + string = string.strip() + try: + return int(string) + except: + try: + return float(string) + except: + if string == 'True': + return True + if string == 'False': + return False + return string + + + diff --git a/autoPyTorch/utils/benchmarking/benchmark_pipeline/create_autonet.py b/autoPyTorch/utils/benchmarking/benchmark_pipeline/create_autonet.py index 456de2cc5..00dae6ede 100644 --- a/autoPyTorch/utils/benchmarking/benchmark_pipeline/create_autonet.py +++ b/autoPyTorch/utils/benchmarking/benchmark_pipeline/create_autonet.py @@ -18,6 +18,10 @@ def fit(self, pipeline_config, data_manager): autonet_type = AutoNetMultilabel elif (data_manager.problem_type == ProblemType.FeatureClassification): autonet_type = AutoNetClassification + elif data_manager.problem_type == ProblemType.ImageClassification: + autonet = AutoNetImageClassification() + elif data_manager.problem_type == ProblemType.ImageClassificationMultipleDatasets: + autonet = AutoNetImageClassificationMultipleDatasets() else: raise ValueError('Problem type ' + str(data_manager.problem_type) + ' is not defined') @@ -34,4 +38,4 @@ def get_pipeline_config_options(self): options = [ ConfigOption("enable_ensemble", default=False, type=to_bool) ] - return options \ No newline at end of file + return options diff --git a/autoPyTorch/utils/benchmarking/benchmark_pipeline/fit_autonet.py b/autoPyTorch/utils/benchmarking/benchmark_pipeline/fit_autonet.py index b1c1c64dc..e53694975 100644 --- a/autoPyTorch/utils/benchmarking/benchmark_pipeline/fit_autonet.py +++ b/autoPyTorch/utils/benchmarking/benchmark_pipeline/fit_autonet.py @@ -1,18 +1,136 @@ import time import logging from autoPyTorch.pipeline.base.pipeline_node import PipelineNode +from autoPyTorch.utils.config.config_option import ConfigOption +import json +import numpy as np class FitAutoNet(PipelineNode): - def fit(self, autonet, data_manager, **kwargs): + def __init__(self): + super(FitAutoNet, self).__init__() + + # if we have the required module 'resource' (not available on windows!) + self.guarantee_limits = module_exists("resource") and module_exists("pynisher") + + def fit(self, pipeline_config, autonet, data_manager, **kwargs): + start_time = time.time() + test_score = None + + if pipeline_config['refit_config'] is None: + # Start search + logging.getLogger('benchmark').debug("Fit autonet") + + # Email confirmation + if pipeline_config['confirmation_gmail_user']: + self.send_confirmation_mail(pipeline_config, autonet, data_manager) + + # Run fit + fit_result = self.fit_autonet(autonet, data_manager) + + if pipeline_config['refit_budget'] is not None: + # Refit + import os + import numpy as np + autonet_config = autonet.get_current_autonet_config() + from autoPyTorch.utils.loggers import get_refit_config + refit_config = get_refit_config(autonet_config['result_logger_dir']) + directory = os.path.join(autonet_config['result_logger_dir'], 'refit') + + autonet_config['result_logger_dir'] = directory + autonet_config['save_checkpoints'] = False + pipeline_config['refit_config'] = refit_config + + pipeline_config['refit_budget'] *= len(data_manager.X_train) + job_id = max(autonet_config['task_id'], 1) + if job_id == 1: + self.refit_autonet( + pipeline_config, autonet, autonet_config, + data_manager.X_train, data_manager.Y_train, + data_manager.X_valid, data_manager.Y_valid) - logging.getLogger('benchmark').debug("Fit autonet") + else: + # Refit + autonet_config= autonet.get_current_autonet_config() + fit_result = self.refit_autonet( + pipeline_config, autonet, autonet_config, + data_manager.X_train, data_manager.Y_train, + data_manager.X_valid, data_manager.Y_valid) - fit_result = autonet.fit( - data_manager.X_train, data_manager.Y_train, - data_manager.X_valid, data_manager.Y_valid, - refit=False) + if data_manager.X_test is not None: + # Score on test set + import numpy as np + test_score = autonet.score(data_manager.X_test, data_manager.Y_test.astype(np.int32)) return { 'fit_duration': int(time.time() - start_time), - 'fit_result': fit_result } \ No newline at end of file + 'fit_result': fit_result, + 'test_score': test_score} + + def fit_autonet(self, autonet, data_manager): + return autonet.fit( data_manager.X_train, data_manager.Y_train, + data_manager.X_valid, data_manager.Y_valid, + refit=False) + + def refit_autonet(self, pipeline_config, autonet, autonet_config, X_train, Y_train, X_valid, Y_valid): + logging.getLogger('benchmark').debug("Refit autonet") + + import torch + if torch.cuda.is_available(): + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + with open(pipeline_config['refit_config'], 'r') as f: + refit_config = json.load(f) + + if 'incumbent_config_path' in refit_config: + # > updates in set_autonet_config + with open(refit_config['incumbent_config_path'], 'r') as f: + config = json.load(f) + autonet_config['random_seed'] = refit_config['seed'] + autonet_config['dataset_order'] = refit_config['dataset_order'] + else: + config = refit_config + + fit_result = autonet.refit( + X_train, Y_train, + X_valid, Y_valid, + autonet_config=autonet_config, + hyperparameter_config=config, + budget=pipeline_config['refit_budget'] or autonet_config['max_budget']) + + logging.getLogger('benchmark').info("Result: " + str(fit_result)) + return fit_result + + def send_confirmation_mail(self, pipeline_config, autonet, data_manager): + user = pipeline_config['confirmation_gmail_user'] + import pprint + message = "\r\n".join(["Autonet run", + "Data:", + "%s", + "", + "Autonet Config:", + "%s" + "", + "", + "%s"]) % (pprint.pformat(data_manager.X_train.tolist()), pprint.pformat(autonet.get_current_autonet_config()), str(autonet.get_hyperparameter_search_space())) + user = user + '+benchmark@gmail.com' + from autoPyTorch.utils.mail import send_mail + send_mail(user, 'Benchmark Start', message) + + def get_pipeline_config_options(self): + options = [ + ConfigOption("refit_config", default=None, type='directory'), + ConfigOption("refit_budget", default=None, type=int), + ConfigOption("confirmation_gmail_user", default=None, type=str), + ] + return options + + +def module_exists(module_name): + try: + __import__(module_name) + except ImportError: + return False + else: + return True diff --git a/autoPyTorch/utils/benchmarking/benchmark_pipeline/for_autonet_config.py b/autoPyTorch/utils/benchmarking/benchmark_pipeline/for_autonet_config.py index 43352b59b..372895ec0 100644 --- a/autoPyTorch/utils/benchmarking/benchmark_pipeline/for_autonet_config.py +++ b/autoPyTorch/utils/benchmarking/benchmark_pipeline/for_autonet_config.py @@ -18,6 +18,7 @@ def fit(self, pipeline_config, autonet, instance, data_manager, run_id, task_id) def get_pipeline_config_options(self): options = [ ConfigOption("autonet_configs", default=None, type='directory', list=True, required=True), + ConfigOption("autonet_config_root", default=ConfigFileParser.get_autonet_home(), type='directory'), ConfigOption("autonet_config_slice", default=None, type=str) ] return options @@ -25,9 +26,13 @@ def get_pipeline_config_options(self): @staticmethod def get_config_files(pipeline_config, parse_slice=True): config_files = pipeline_config['autonet_configs'] + if pipeline_config['autonet_config_root'] is not None: + config_files = [os.path.join(pipeline_config['autonet_config_root'], config) if not os.path.isabs(config) else config for config in config_files] + autonet_config_slice = ForAutoNetConfig.parse_slice(pipeline_config['autonet_config_slice']) if autonet_config_slice is not None and parse_slice: return config_files[autonet_config_slice] + return config_files @staticmethod @@ -48,4 +53,4 @@ def parse_slice(splice_string): start = int(split[0]) if split[0] != "" else 0 stop = int(split[1]) if split[1] != "" else None step = int(split[2]) if split[2] != "" else 1 - return slice(start, stop, step) \ No newline at end of file + return slice(start, stop, step) diff --git a/autoPyTorch/utils/benchmarking/benchmark_pipeline/for_instance.py b/autoPyTorch/utils/benchmarking/benchmark_pipeline/for_instance.py index 5ee1ab697..dd04fea54 100644 --- a/autoPyTorch/utils/benchmarking/benchmark_pipeline/for_instance.py +++ b/autoPyTorch/utils/benchmarking/benchmark_pipeline/for_instance.py @@ -21,6 +21,7 @@ def get_pipeline_config_options(self): ConfigOption("instances", default=None, type='directory', required=True), ConfigOption("instance_slice", default=None, type=str), ConfigOption("dataset_root", default=ConfigFileParser.get_autonet_home(), type='directory'), + ConfigOption("multiple_datasets_indices", default=None, type=int, list=True), ] return options @@ -30,22 +31,28 @@ def get_instances(benchmark_config, instances_must_exist=True, instance_slice=No instances = [] if os.path.isfile(benchmark_config["instances"]): with open(benchmark_config["instances"], "r") as instances_file: - for line in instances_file: - if line.strip().startswith("openml"): - instances.append(line.strip()) - continue - - if line.strip().startswith("["): - instances.append([make_path(path, benchmark_config["dataset_root"]) for path in line.strip(' []').split(',')]) - continue + if os.path.splitext(benchmark_config['instances'])[1] == '.json': + import json + datasets = [make_path(path, benchmark_config["dataset_root"]) for path in json.load(instances_file)] + instances.append(datasets if benchmark_config['multiple_datasets_indices'] is None else [datasets[i] for i in benchmark_config['multiple_datasets_indices']]) + else: + for line in instances_file: + if line.strip().startswith("openml"): + instances.append(line.strip()) + continue - instance = os.path.abspath(os.path.join(benchmark_config["dataset_root"], line.strip())) - if os.path.isfile(instance) or os.path.isdir(instance): - instances.append(instance) - else: - if not instances_must_exist: + if line.strip().startswith("["): + datasets = [make_path(path, benchmark_config["dataset_root"]) for path in line.strip(' []\n').split(',')] + instances.append(datasets if benchmark_config['multiple_datasets_indices'] is None else [datasets[i] for i in benchmark_config['multiple_datasets_indices']]) + continue + + instance = os.path.abspath(os.path.join(benchmark_config["dataset_root"], line.strip())) + if os.path.isfile(instance) or os.path.isdir(instance): instances.append(instance) - logging.getLogger('benchmark').warning(str(instance) + " does not exist") + else: + if not instances_must_exist: + instances.append(instance) + logging.getLogger('benchmark').warning(str(instance) + " does not exist") elif os.path.isdir(benchmark_config["instances"]): for root, directories, filenames in os.walk(benchmark_config["instances"]): for filename in filenames: diff --git a/autoPyTorch/utils/benchmarking/benchmark_pipeline/for_run.py b/autoPyTorch/utils/benchmarking/benchmark_pipeline/for_run.py index 9119c6e30..69b57e34d 100644 --- a/autoPyTorch/utils/benchmarking/benchmark_pipeline/for_run.py +++ b/autoPyTorch/utils/benchmarking/benchmark_pipeline/for_run.py @@ -4,13 +4,12 @@ import traceback class ForRun(SubPipelineNode): - def fit(self, pipeline_config, autonet, data_manager, instance, autonet_config_file, run_id, task_id): + def fit(self, pipeline_config, autonet, data_manager, instance, run_id, task_id): for run_number in self.parse_range(pipeline_config['run_number_range'], pipeline_config['num_runs']): try: logging.getLogger('benchmark').info("Start run " + str(run_id) + "_" + str(run_number)) self.sub_pipeline.fit_pipeline(pipeline_config=pipeline_config, - data_manager=data_manager, instance=instance, - autonet_config_file=autonet_config_file, autonet=autonet, + data_manager=data_manager, instance=instance, run_number=run_number, run_id=run_id, task_id=task_id) except Exception as e: print(e) @@ -42,4 +41,4 @@ def parse_range(range_string, fallback): start = int(split[0]) if split[0] != "" else 0 stop = int(split[1]) if split[1] != "" else fallback step = int(split[2]) if split[2] != "" else 1 - return range(start, stop, step) \ No newline at end of file + return range(start, stop, step) diff --git a/autoPyTorch/utils/benchmarking/benchmark_pipeline/prepare_result_folder.py b/autoPyTorch/utils/benchmarking/benchmark_pipeline/prepare_result_folder.py index 5391b4cc7..27a9ee627 100644 --- a/autoPyTorch/utils/benchmarking/benchmark_pipeline/prepare_result_folder.py +++ b/autoPyTorch/utils/benchmarking/benchmark_pipeline/prepare_result_folder.py @@ -1,18 +1,19 @@ import os import logging -from ConfigSpace.read_and_write import json +from ConfigSpace.read_and_write import json as cs_json, pcs_new as cs_pcs from autoPyTorch.utils.config.config_option import ConfigOption, to_bool from autoPyTorch.pipeline.base.pipeline_node import PipelineNode from autoPyTorch.utils.hyperparameter_search_space_update import HyperparameterSearchSpaceUpdates +from autoPyTorch.utils.modify_config_space import remove_constant_hyperparameter class PrepareResultFolder(PipelineNode): - def fit(self, pipeline_config, data_manager, instance, - autonet_config_file, autonet, run_number, run_id, task_id): + def fit(self, pipeline_config, data_manager, instance, + autonet, run_number, run_id, task_id): - instance_name, autonet_config_name, run_name = get_names(instance, autonet_config_file, run_id, run_number) - run_result_dir = get_run_result_dir(pipeline_config, instance, autonet_config_file, run_id, run_number) - instance_run_id = str(run_name) + "_" + str(instance_name) + "_" + str(autonet_config_name) + instance_name, run_name = get_names(instance, run_id, run_number) + run_result_dir = get_run_result_dir(pipeline_config, instance, run_id, run_number, autonet) + instance_run_id = str(run_name) + "-" + str(instance_name) instance_run_id = '_'.join(instance_run_id.split(':')) autonet.autonet_config = None #clean results of last fit @@ -37,16 +38,38 @@ def fit(self, pipeline_config, data_manager, instance, instance_info['instance_shape'] = data_manager.X_train.shape instance_info['categorical_features'] = data_manager.categorical_features - if autonet.get_current_autonet_config()["hyperparameter_search_space_updates"] is not None: - autonet.get_current_autonet_config()["hyperparameter_search_space_updates"].save_as_file( + autonet_config = autonet.get_current_autonet_config() + if autonet_config["hyperparameter_search_space_updates"] is not None: + autonet_config["hyperparameter_search_space_updates"].save_as_file( os.path.join(run_result_dir, "hyperparameter_search_space_updates.txt")) + if 'user_updates_config' in pipeline_config: + user_updates_config = pipeline_config['user_updates_config'] + if user_updates_config: + from shutil import copyfile + copyfile(user_updates_config, os.path.join(run_result_dir, 'user_updates_config.csv')) + self.write_config_to_file(run_result_dir, "instance.info", instance_info) self.write_config_to_file(run_result_dir, "benchmark.config", pipeline_config) - self.write_config_to_file(run_result_dir, "autonet.config", autonet.get_current_autonet_config()) - + self.write_config_to_file(run_result_dir, "autonet.config", autonet_config) + + # save refit config - add indent and sort keys + if 'refit_config' in pipeline_config and pipeline_config['refit_config'] is not None: + import json + with open(pipeline_config['refit_config'], 'r') as f: + refit_config = json.loads(f.read()) + with open(os.path.join(run_result_dir, 'refit_config.json'), 'w+') as f: + f.write(json.dumps(refit_config, indent=4, sort_keys=True)) + + # save search space + search_space = autonet.pipeline.get_hyperparameter_search_space(**autonet_config) with open(os.path.join(run_result_dir, "configspace.json"), "w") as f: - f.write(json.write(autonet.pipeline.get_hyperparameter_search_space(**autonet.get_current_autonet_config()))) + f.write(cs_json.write(search_space)) + + # save search space without constants - used by bohb - as pcs (simple) + simplified_search_space, _ = remove_constant_hyperparameter(search_space) + with open(os.path.join(run_result_dir, "configspace_simple.pcs"), "w") as f: + f.write(cs_pcs.write(simplified_search_space)) return { 'result_dir': run_result_dir } @@ -54,30 +77,41 @@ def fit(self, pipeline_config, data_manager, instance, def write_config_to_file(self, folder, filename, config): do_not_write = ["hyperparameter_search_space_updates"] with open(os.path.join(folder, filename), "w") as f: - f.write("\n".join([(key + '=' + str(value)) for key, value in config.items() if not key in do_not_write])) + f.write("\n".join([(key + '=' + str(value)) for (key, value) in sorted(config.items(), key=lambda x: x[0]) if not key in do_not_write])) def get_pipeline_config_options(self): options = [ - ConfigOption('result_dir', default=None, type='directory', required=True) + ConfigOption('result_dir', default=None, type='directory', required=True), + ConfigOption('name', default=None, type=str, required=True) ] return options -def get_names(instance, autonet_config_file, run_id, run_number): +def get_names(instance, run_id, run_number): if isinstance(instance, list): - instance_name = "_".join([os.path.split(p)[1].split(".")[0] for p in instance]) + for p in instance: + if not os.path.exists(p): + raise Exception('Invalid path: ' + str(p)) + instance_name = "-".join(sorted([os.path.split(p)[1].split(".")[0] for p in instance])) + if len(instance_name) > 40: + instance_name = "-".join(sorted([os.path.split(q)[1] for q in sorted(set(os.path.split(p)[0] for p in instance))] + [str(len(instance))])) else: instance_name = os.path.basename(instance).split(".")[0] - autonet_config_name = os.path.basename(autonet_config_file).split(".")[0] run_name = "run_" + str(run_id) + "_" + str(run_number) - return "_".join(instance_name.split(':')), autonet_config_name, run_name + return "_".join(instance_name.split(':')), run_name + +def get_run_result_dir(pipeline_config, instance, run_id, run_number, autonet): + instance_name, run_name = get_names(instance, run_id, run_number) + autonet_config = autonet.get_current_autonet_config() + benchmark_name = '_'.join(pipeline_config['name'].split(' ')) + + if 'refit_config' not in pipeline_config or pipeline_config['refit_config'] is None: + benchmark_name += "[{0}_{1}]".format(int(autonet_config['min_budget']), int(autonet_config['max_budget'])) + elif 'refit_budget' not in pipeline_config or pipeline_config['refit_budget'] is None: + benchmark_name += "[refit_{0}]".format(int(autonet_config['max_budget'])) + else: + benchmark_name += "[refit_{0}]".format(int(pipeline_config['refit_budget'])) -def get_run_result_dir(pipeline_config, instance, autonet_config_file, run_id, run_number): - instance_name, autonet_config_name, run_name = get_names(instance, autonet_config_file, run_id, run_number) - run_result_dir = os.path.join(pipeline_config['result_dir'], - pipeline_config["benchmark_name"], - instance_name, - autonet_config_name, - run_name) - return run_result_dir \ No newline at end of file + run_result_dir = os.path.join(pipeline_config['result_dir'], instance_name, benchmark_name, run_name) + return run_result_dir diff --git a/autoPyTorch/utils/benchmarking/benchmark_pipeline/read_instance_data.py b/autoPyTorch/utils/benchmarking/benchmark_pipeline/read_instance_data.py index f1f2734fe..3a74c3fce 100644 --- a/autoPyTorch/utils/benchmarking/benchmark_pipeline/read_instance_data.py +++ b/autoPyTorch/utils/benchmarking/benchmark_pipeline/read_instance_data.py @@ -1,22 +1,45 @@ - +import numpy as np from autoPyTorch.utils.config.config_option import ConfigOption, to_bool from autoPyTorch.pipeline.base.pipeline_node import PipelineNode -from autoPyTorch.data_management.data_manager import DataManager +from autoPyTorch.data_management.data_manager import DataManager, ImageManager class ReadInstanceData(PipelineNode): def fit(self, pipeline_config, instance): - assert pipeline_config['problem_type'] in ['feature_classification', 'feature_multilabel', 'feature_regression'] - dm = DataManager(verbose=pipeline_config["data_manager_verbose"]) - dm.read_data(instance, - is_classification=(pipeline_config["problem_type"] in ['feature_classification', 'feature_multilabel']), - test_split=pipeline_config["test_split"]) + # Get data manager for train, val, test data + if pipeline_config['problem_type'] in ['feature_classification', 'feature_multilabel', 'feature_regression']: + dm = DataManager(verbose=pipeline_config["data_manager_verbose"]) + if pipeline_config['test_instances'] is not None: + dm_test = DataManager(verbose=pipeline_config["data_manager_verbose"]) + else: + dm = ImageManager(verbose=pipeline_config["data_manager_verbose"]) + if pipeline_config['test_instances'] is not None: + dm_test = ImageManager(verbose=pipeline_config["data_manager_verbose"]) + + # Read data + if pipeline_config['test_instances'] is not None: + # Use given test set + dm.read_data(instance, + is_classification=(pipeline_config["problem_type"] in ['feature_classification', 'feature_multilabel', 'image_classification']), + test_split=0.0) + dm_test.read_data(pipeline_config['test_instances'], + is_classification=(pipeline_config["problem_type"] in ['feature_classification', 'feature_multilabel', 'image_classification']), + test_split=0.0) + dm.X_test, dm.Y_test = dm_test.X_train, dm_test.Y_train.astype(np.int32) + + else: + # Use test split + dm.read_data(instance, + is_classification=(pipeline_config["problem_type"] in ['feature_classification', 'feature_multilabel', 'image_classification']), + test_split=pipeline_config["test_split"]) + return {"data_manager": dm} def get_pipeline_config_options(self): options = [ ConfigOption("test_split", default=0.0, type=float), - ConfigOption("problem_type", default='feature_classification', type=str, choices=['feature_classification', 'feature_multilabel', 'feature_regression']), + ConfigOption("problem_type", default='feature_classification', type=str, choices=['feature_classification', 'feature_multilabel', 'feature_regression', 'image_classification']), ConfigOption("data_manager_verbose", default=False, type=to_bool), + ConfigOption("test_instances", default=None, type=str) ] - return options \ No newline at end of file + return options diff --git a/autoPyTorch/utils/benchmarking/benchmark_pipeline/save_results.py b/autoPyTorch/utils/benchmarking/benchmark_pipeline/save_results.py index 65e31ad56..7891e80b5 100644 --- a/autoPyTorch/utils/benchmarking/benchmark_pipeline/save_results.py +++ b/autoPyTorch/utils/benchmarking/benchmark_pipeline/save_results.py @@ -8,27 +8,28 @@ class SaveResults(PipelineNode): - def fit(self, result_dir, fit_duration, fit_result, autonet, task_id): + def fit(self, result_dir, fit_duration, test_score, fit_result, autonet, task_id): if (task_id not in [-1, 1]): time.sleep(60) return dict() logging.getLogger('benchmark').info("Create and save summary") - summary ={ - "optimized_hyperparameter_config": fit_result["optimized_hyperparameter_config"], + summary = { + "incumbent_config": fit_result["optimized_hyperparameter_config"], "budget": fit_result["budget"], "loss": fit_result["loss"], + "test_score": test_score, + "incumbent_config" : incumbent_config, "info": fit_result["info"], "duration": fit_duration, } - + if "ensemble_configs" in fit_result: summary["ensemble_configs"] = list(fit_result["ensemble_configs"].values()) - # write as json with open(os.path.join(result_dir, "summary.json"), "w") as f: json.dump(summary, f) - return dict() \ No newline at end of file + return dict() diff --git a/autoPyTorch/utils/benchmarking/benchmark_pipeline/set_autonet_config.py b/autoPyTorch/utils/benchmarking/benchmark_pipeline/set_autonet_config.py index 47f99b351..b283bf613 100644 --- a/autoPyTorch/utils/benchmarking/benchmark_pipeline/set_autonet_config.py +++ b/autoPyTorch/utils/benchmarking/benchmark_pipeline/set_autonet_config.py @@ -27,7 +27,6 @@ def fit(self, pipeline_config, autonet, autonet_config_file, data_manager, insta if data_manager.categorical_features: config['categorical_features'] = data_manager.categorical_features - config['dataset_name'] = "_".join(os.path.basename(instance).split(":")) # Note: PrepareResultFolder will make a small run dependent update of the autonet_config autonet.update_autonet_config(**config) return dict() @@ -39,4 +38,4 @@ def get_pipeline_config_options(self): ConfigOption("working_dir", default=None, type='directory'), ConfigOption("network_interface_name", default=None, type=str) ] - return options \ No newline at end of file + return options diff --git a/autoPyTorch/utils/config/config_file_parser.py b/autoPyTorch/utils/config/config_file_parser.py index 0c685b224..e7d0acb59 100644 --- a/autoPyTorch/utils/config/config_file_parser.py +++ b/autoPyTorch/utils/config/config_file_parser.py @@ -39,7 +39,7 @@ def read_key_values_from_file(filename, delimiter='='): key_values[key] = value return key_values - def read(self, filename, key_values_dict=None): + def read(self, filename, key_values_dict=None, silent=False): """ Read a config file. @@ -58,6 +58,8 @@ def read(self, filename, key_values_dict=None): # open the config file for key, value in key_values_dict.items(): if (key not in self.config_options): + if silent: + continue raise ValueError("Config key '" + key + "' is not a valid autonet config option") option = self.config_options[key] @@ -204,4 +206,4 @@ def get_autonet_home(): """ Get the home directory of autonet """ if "AUTONET_HOME" in os.environ: return os.environ["AUTONET_HOME"] - return os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "..")) \ No newline at end of file + return os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "..")) diff --git a/autoPyTorch/utils/config_space_hyperparameter.py b/autoPyTorch/utils/config_space_hyperparameter.py index 218b74aa0..e3e0b115b 100644 --- a/autoPyTorch/utils/config_space_hyperparameter.py +++ b/autoPyTorch/utils/config_space_hyperparameter.py @@ -2,8 +2,7 @@ import ConfigSpace.hyperparameters as CSH -def get_hyperparameter(hyper_type, name, value_range): - log = False +def get_hyperparameter(hyper_type, name, value_range, log = False): if isinstance(value_range, tuple) and len(value_range) == 2 and isinstance(value_range[1], bool) and \ isinstance(value_range[0], (tuple, list)): value_range, log = value_range @@ -24,5 +23,5 @@ def get_hyperparameter(hyper_type, name, value_range): return CSH.UniformIntegerHyperparameter(name, lower=value_range[0], upper=value_range[1], log=log) raise ValueError('Unknown type: %s for hp %s' % (hyper_type, name) ) -def add_hyperparameter(cs, hyper_type, name, value_range): - return cs.add_hyperparameter(get_hyperparameter(hyper_type, name, value_range)) \ No newline at end of file +def add_hyperparameter(cs, hyper_type, name, value_range, log=False): + return cs.add_hyperparameter(get_hyperparameter(hyper_type, name, value_range, log)) diff --git a/autoPyTorch/utils/configspace_wrapper.py b/autoPyTorch/utils/configspace_wrapper.py index e09e73327..fc8561f2a 100644 --- a/autoPyTorch/utils/configspace_wrapper.py +++ b/autoPyTorch/utils/configspace_wrapper.py @@ -24,6 +24,12 @@ def __getitem__(self, key): pprint.pprint(self.config) return self.config[self.config_prefix + key] + def __iter__(self): + for k in self.config.__iter__(): + if not k.startswith(self.config_prefix): + continue + yield k[len(self.config_prefix):] + def __str__(self): return str(self.config) diff --git a/autoPyTorch/utils/hyperparameter_search_space_update.py b/autoPyTorch/utils/hyperparameter_search_space_update.py index bfceaf0a2..52d670945 100644 --- a/autoPyTorch/utils/hyperparameter_search_space_update.py +++ b/autoPyTorch/utils/hyperparameter_search_space_update.py @@ -1,6 +1,7 @@ import ast import os + class HyperparameterSearchSpaceUpdate(): def __init__(self, node_name, hyperparameter, value_range, log=False): self.node_name = node_name diff --git a/autoPyTorch/utils/loggers.py b/autoPyTorch/utils/loggers.py new file mode 100644 index 000000000..b4ed1e65f --- /dev/null +++ b/autoPyTorch/utils/loggers.py @@ -0,0 +1,219 @@ +import time, os, shutil +from hpbandster.core.result import json_result_logger + +class bohb_logger(json_result_logger): + def __init__(self, constant_hyperparameter, directory, overwrite=False): + super(bohb_logger, self).__init__(directory, overwrite) + self.constants = constant_hyperparameter + + + def new_config(self, config_id, config, config_info): + import json + if not config_id in self.config_ids: + self.config_ids.add(config_id) + + full_config = dict() + full_config.update(self.constants) + full_config.update(config) + + with open(self.config_fn, 'a') as fh: + fh.write(json.dumps([config_id, full_config, config_info])) + fh.write('\n') + + +class tensorboard_logger(object): + def __init__(self, pipeline_config, constant_hyperparameter, global_results_dir): + self.start_time = time.time() + + b = pipeline_config['max_budget'] + budgets = [] + while b >= pipeline_config['min_budget']: + budgets.append(int(b)) + b /= pipeline_config['eta'] + + self.incumbent_results = {b: 0 for b in budgets} + self.mean_results = {b: [0, 0] for b in budgets} + + self.constants = constant_hyperparameter + self.results_logged = 0 + self.seed = pipeline_config['random_seed'] + self.max_budget = pipeline_config['max_budget'] + self.global_results_dir = global_results_dir + + self.keep_only_incumbent_checkpoints = pipeline_config['keep_only_incumbent_checkpoints'] + + self.incumbent_configs_dir = os.path.join(pipeline_config['result_logger_dir'], 'incumbents') + self.status_dir = pipeline_config['result_logger_dir'] + self.run_name = '-'.join(pipeline_config['run_id'].split('-')[1:]) + os.makedirs(self.incumbent_configs_dir, exist_ok=True) + + + def new_config(self, config_id, config, config_info): + pass + + def __call__(self, job): + import json + import tensorboard_logger as tl + + id = job.id + budget = int(job.kwargs['budget']) + config = job.kwargs['config'] + # timestamps = job.timestamps + result = job.result + # exception = job.exception + + if result is None: + return + + self.results_logged += 1 + + tl.log_value('BOHB/all_results', result['loss'] * -1, self.results_logged) + + if budget not in self.incumbent_results or result['loss'] < self.incumbent_results[budget]: + self.incumbent_results[budget] = result['loss'] + + full_config = dict() + full_config.update(self.constants) + full_config.update(config) + + refit_config = dict() + refit_config['budget'] = budget + refit_config['seed'] = self.seed + + refit_config['incumbent_config_path'] = os.path.join(self.incumbent_configs_dir, 'config_' + str(budget) + '.json') + with open(refit_config['incumbent_config_path'], 'w+') as f: + f.write(json.dumps(full_config, indent=4, sort_keys=True)) + + with open(os.path.join(self.incumbent_configs_dir, 'result_' + str(budget) + '.json'), 'w+') as f: + f.write(json.dumps([job.id, job.kwargs['budget'], job.timestamps, job.result, job.exception], indent=4, sort_keys=True)) + + checkpoints, refit_config['dataset_order'] = get_checkpoints(result['info']) or ([],None) + refit_config['incumbent_checkpoint_paths'] = [] + for i, checkpoint in enumerate(checkpoints): + dest = os.path.join(self.incumbent_configs_dir, 'checkpoint_' + str(budget) + '_' + str(i) + '.pt' if len(checkpoints) > 1 else 'checkpoint_' + str(budget) + '.pt') + if os.path.exists(dest): + os.remove(dest) + if self.keep_only_incumbent_checkpoints: + shutil.move(checkpoint, dest) + else: + shutil.copy(checkpoint, dest) + refit_config['incumbent_checkpoint_paths'].append(dest) + + refit_path = os.path.join(self.incumbent_configs_dir, 'refit_config_' + str(budget) + '.json') + with open(refit_path, 'w+') as f: + f.write(json.dumps(refit_config, indent=4, sort_keys=True)) + + if budget >= self.max_budget and self.global_results_dir is not None: + import autoPyTorch.utils.thread_read_write as thread_read_write + import datetime + + dataset_names = sorted([os.path.splitext(os.path.split(info['dataset_path'])[1])[0] for info in result['info']]) + suffix = '' + if len(result['info']) > 1: + suffix += '+[' + ', '.join(dataset_names) + ']' + if budget > self.max_budget: + suffix += '+Refit' + + for info in result['info']: + thread_read_write.update_results(self.global_results_dir, { + 'name': os.path.splitext(os.path.split(info['dataset_path'])[1])[0] + suffix, + 'result': round(info['val_top1'], 2), + 'seed': self.seed, + 'refit_config': refit_path, + 'text': "{0}/{1} -- {2}".format( + round(info['val_datapoints'] * (info['val_top1'] / 100)), + info['val_datapoints'], + round(budget / len(result['info']))) + }) + + if self.keep_only_incumbent_checkpoints and get_checkpoints(result['info']): + for checkpoint in get_checkpoints(result['info'])[0]: + if os.path.exists(checkpoint): + os.remove(checkpoint) + + if budget not in self.mean_results: + self.mean_results[budget] = [result['loss'], 1] + else: + self.mean_results[budget][0] += result['loss'] + self.mean_results[budget][1] += 1 + + for b, loss in self.incumbent_results.items(): + tl.log_value('BOHB/incumbent_results_' + str(b), loss * -1, self.mean_results[b][1]) + + for b, (loss, n) in self.mean_results.items(): + tl.log_value('BOHB/mean_results_' + str(b), loss * -1 / n if n > 0 else 0, n) + + status = dict() + for b, loss in self.incumbent_results.items(): + budget_status = dict() + budget_status['incumbent'] = loss * -1 + mean_res = self.mean_results[b] + budget_status['mean'] = mean_res[0] / mean_res[1] * -1 if mean_res[1] > 0 else 0 + budget_status['configs'] = mean_res[1] + status['budget: ' + str(b)] = budget_status + + import datetime + status["runtime"] = str(datetime.timedelta(seconds=time.time() - self.start_time)) + + with open(os.path.join(self.status_dir, 'bohb_status.json'), 'w+') as f: + f.write(json.dumps(status, indent=4, sort_keys=True)) + + +def get_checkpoints(info): + if not isinstance(info, list): + if 'checkpoint' in info: + return [info['checkpoint']] + return [] + + checkpoints = [] + dataset_order = [] + for subinfo in info: + if 'checkpoint' in subinfo: + checkpoints.append(subinfo['checkpoint']) + dataset_order.append(subinfo['dataset_id']) + return checkpoints, dataset_order + +class combined_logger(object): + def __init__(self, *loggers): + self.loggers = loggers + + def new_config(self, config_id, config, config_info): + for logger in self.loggers: + logger.new_config(config_id, config, config_info) + + def __call__(self, job): + for logger in self.loggers: + logger(job) + +def get_incumbents(directory): + + incumbents = os.path.join(directory, 'incumbents') + + if not os.path.exists(incumbents): + return None + + import re + file_re = [ + re.compile('config_([0-9]+).json'), + re.compile('refit_config_([0-9]+).json'), + re.compile('result_([0-9]+).json'), + re.compile('checkpoint_([0-9]+).*.pt'), + ] + + incumbent_files = [[] for _ in range(len(file_re))] + for filename in sorted(os.listdir(incumbents)): + for i, reg in enumerate(file_re): + match = reg.match(filename) + + if match: + budget = int(match.group(1)) + inc_file = os.path.join(incumbents, filename) + incumbent_files[i].append([budget, inc_file]) + + return incumbent_files + + +def get_refit_config(directory): + _, refit_configs, _, _ = get_incumbents(directory) + refit_config = max(refit_configs, key=lambda x: x[0]) #get config of max budget + return refit_config[1] diff --git a/autoPyTorch/utils/modify_config_space.py b/autoPyTorch/utils/modify_config_space.py new file mode 100644 index 000000000..a12335bc5 --- /dev/null +++ b/autoPyTorch/utils/modify_config_space.py @@ -0,0 +1,242 @@ +import ConfigSpace as CS +import ConfigSpace.hyperparameters as CSH +import copy + +def remove_constant_hyperparameter(cs): + constants = dict() + + hyperparameter_to_add = [] + for hyper in cs.get_hyperparameters(): + const, value = is_constant(hyper) + if const: + constants[hyper.name] = value + else: + hyperparameter_to_add.append(copy.copy(hyper)) + + for name in constants: + truncate_hyperparameter(cs, cs.get_hyperparameter(name)) + + cs._hyperparameter_idx = dict() + cs._idx_to_hyperparameter = dict() + cs._sort_hyperparameters() + cs._update_cache() + + return cs, constants + + +def is_constant(hyper): + if isinstance(hyper, CSH.Constant): + return True, hyper.value + + elif isinstance(hyper, CSH.UniformFloatHyperparameter) or isinstance(hyper, CSH.UniformIntegerHyperparameter): + if abs(hyper.upper - hyper.lower) < 1e-10: + return True, hyper.lower + + elif isinstance(hyper, CSH.CategoricalHyperparameter): + if len(hyper.choices) == 1: + return True, hyper.choices[0] + + return False, None + + +def override_hyperparameter(config_space, hyper): + import ConfigSpace.conditions as CSC + + for condition in config_space._children[hyper.name].values(): + subconditions = condition.components if isinstance(condition, CSC.AbstractConjunction) else [condition] + for subcondition in subconditions: + if subcondition.parent.name == hyper.name: + subcondition.parent = hyper + + for condition in config_space._parents[hyper.name].values(): + if condition is None: + continue # root + subconditions = condition.components if isinstance(condition, CSC.AbstractConjunction) else [condition] + for subcondition in subconditions: + if subcondition.child.name == hyper.name: + subcondition.child = hyper + + config_space._hyperparameters[hyper.name] = hyper + + +def update_conditions(config_space, parent): + import ConfigSpace.conditions as CSC + + if parent.name not in config_space._hyperparameters: + # already removed -> all condition already updated + return + + possible_values, is_value_range = get_hyperparameter_values(parent) + children = [config_space.get_hyperparameter(name) for name in config_space._children[parent.name]] + + for child in children: + if child.name not in config_space._children[parent.name]: + # already cut + continue + condition = config_space._children[parent.name][child.name] + + if isinstance(condition, CSC.AbstractConjunction): + is_and = isinstance(condition, CSC.AndConjunction) + state = 2 + + new_subconditions = [] + for subcondition in condition.components: + if subcondition.parent.name != parent.name: + new_subconditions.append(subcondition) + continue + substate = get_condition_state(subcondition, possible_values, is_value_range) + if substate == 0 and is_and and state == 2: + state = 0 + + if substate == 1 and not is_and and state == 2: + state = 1 + + if substate == 2: + new_subconditions.append(subcondition) + + else: + # condition is not relevant anymore + del config_space._children[parent.name][child.name] + del config_space._parents[child.name][parent.name] + for grand_parent, cond in config_space._parents[parent.name].items(): + if cond is None: + continue + cond_type = type(cond) + values, _ = get_hyperparameter_values(cond.parent) + # fake parent value first as it might be invalid atm and gets truncated later + new_condition = cond_type(child, cond.parent, values[0]) + new_condition.value = cond.value + config_space._children[grand_parent][child.name] = new_condition + config_space._parents[child.name][grand_parent] = new_condition + + if len(new_subconditions) == 0: + state = 1 if is_and else 0 # either everything was false or true + + if state == 2: + + if len(new_subconditions) == 1: + condition = new_subconditions[0] + config_space._children[condition.parent.name][child.name] = new_subconditions[0] + config_space._parents[child.name][condition.parent.name] = new_subconditions[0] + else: + condition.__init__(*new_subconditions) + + for subcondition in new_subconditions: + config_space._children[subcondition.parent.name][child.name] = condition + config_space._parents[child.name][subcondition.parent.name] = condition + + else: + state = get_condition_state(condition, possible_values, is_value_range) + + if state == 1: + del config_space._children[parent.name][child.name] + del config_space._parents[child.name][parent.name] + + for grand_parent, cond in config_space._parents[parent.name].items(): + if cond is None: + continue + cond_type = type(cond) + values, _ = get_hyperparameter_values(cond.parent) + # fake parent value first as it might be invalid atm and gets truncated later + new_condition = cond_type(child, cond.parent, values[0]) + new_condition.value = cond.value + config_space._children[grand_parent][child.name] = new_condition + config_space._parents[child.name][grand_parent] = new_condition + + if len(config_space._parents[child.name]) == 0: + config_space._conditionals.remove(child.name) + if state == 0: + truncate_hyperparameter(config_space, child) + + + + +def truncate_hyperparameter(config_space, hyper): + if hyper.name not in config_space._hyperparameters: + return + + parent_names = list(config_space._parents[hyper.name].keys()) + for parent_name in parent_names: + del config_space._children[parent_name][hyper.name] + + del config_space._parents[hyper.name] + del config_space._hyperparameters[hyper.name] + + if hyper.name in config_space._conditionals: + config_space._conditionals.remove(hyper.name) + + child_names = list(config_space._children[hyper.name].keys()) + for child_name in child_names: + truncate_hyperparameter(config_space, config_space.get_hyperparameter(child_name)) + + +def get_condition_state(condition, possible_values, is_range): + """ + 0: always false + 1: always true + 2: true or false + """ + import ConfigSpace.conditions as CSC + + c_val = condition.value + if isinstance(condition, CSC.EqualsCondition): + if is_range: + if approx(possible_values[0], possible_values[1]): + return 1 if approx(possible_values[0], c_val) else 0 + return 2 if c_val >= possible_values[0] and c_val <= possible_values[1] else 0 + else: + if len(possible_values) == 1: + return 1 if c_val == possible_values[0] else 0 + return 2 if c_val in possible_values else 0 + + if isinstance(condition, CSC.NotEqualsCondition): + if is_range: + if approx(possible_values[0], possible_values[1]): + return 0 if approx(possible_values[0], c_val) else 1 + return 2 if c_val >= possible_values[0] and c_val <= possible_values[1] else 1 + else: + if len(possible_values) == 1: + return 0 if c_val == possible_values[0] else 1 + return 2 if c_val in possible_values else 1 + + if isinstance(condition, CSC.GreaterThanCondition): # is_range has to be true + if c_val < possible_values[0]: + return 1 + if c_val >= possible_values[1]: + return 0 + return 2 + + if isinstance(condition, CSC.LessThanCondition): # is_range has to be true + if c_val <= possible_values[0]: + return 0 + if c_val > possible_values[1]: + return 1 + return 2 + + if isinstance(condition, CSC.InCondition): + inter = set(possible_values).intersection(set(c_val)) + if len(inter) == len(possible_values): + return 1 + if len(inter) == 0: + return 0 + return 2 + + +def approx(x, y): + return abs(x - y) < 1e-10 + +def get_hyperparameter_values(hyper): + """Returns list[choices/range] and bool[is value range] + """ + import ConfigSpace.hyperparameters as CSH + + if isinstance(hyper, CSH.CategoricalHyperparameter): + return hyper.choices, False + + if isinstance(hyper, CSH.NumericalHyperparameter): + return [hyper.lower, hyper.upper], True + + if isinstance(hyper, CSH.Constant): + return [hyper.value, hyper.value], True + + raise ValueError(str(type(hyper)) + ' is not supported') diff --git a/autoPyTorch/utils/thread_read_write.py b/autoPyTorch/utils/thread_read_write.py new file mode 100644 index 000000000..75d75f874 --- /dev/null +++ b/autoPyTorch/utils/thread_read_write.py @@ -0,0 +1,42 @@ + +import fasteners, json, os, threading + +thread_lock = threading.Lock() + +def write(filename, content): + with open(filename, 'w+') as f: + f.write(content) + +def read(filename): + content = '{}' + if os.path.exists(filename): + with open(filename, 'r') as f: + content = f.read() + return content + +def append(filename, content): + with fasteners.InterProcessLock('{0}.lock'.format(filename)): + with open(filename, 'a+') as f: + f.write(content) + +def update_results_thread(filename, info): + thread_lock.acquire() + with fasteners.InterProcessLock('{0}.lock'.format(filename)): + content = json.loads(read(filename)) + name = info['name'] + result = info['result'] + refit_config = info['refit_config'] + text = info['text'] + seed = str(info['seed']) + + infos = content[name] if name in content else dict() + infos[seed] = {'result': result, 'description': text, 'refit': refit_config} + content[name] = infos + + write(filename, json.dumps(content, indent=4, sort_keys=True)) + thread_lock.release() + + +def update_results(filename, info): + thread = threading.Thread(target = update_results_thread, args = (filename, info)) + thread.start() \ No newline at end of file diff --git a/configs/autonet/automl/cifar_example.txt b/configs/autonet/automl/cifar_example.txt new file mode 100644 index 000000000..39121280b --- /dev/null +++ b/configs/autonet/automl/cifar_example.txt @@ -0,0 +1,15 @@ +max_budget=1500 +min_budget=300 +min_workers=2 +max_runtime=3600 +budget_type=time +default_dataset_download_dir=./datasets/ +images_root_folders=./datasets/ +optimize_metric=accuracy +validation_split=0.1 +use_tensorboard_logger=True +networks=['resnet'] +lr_scheduler=['cosine_annealing'] +batch_loss_computation_techniques=['mixup'] +loss_modules=['cross_entropy'] +optimizer=['adamw'] diff --git a/configs/benchmark/cifar_example.txt b/configs/benchmark/cifar_example.txt new file mode 100644 index 000000000..2fc4d8f74 --- /dev/null +++ b/configs/benchmark/cifar_example.txt @@ -0,0 +1,7 @@ +result_dir=benchmark_results +instances=configs/datasets/cifar.txt +autonet_configs=[configs/autonet/automl/cifar_example.txt] +problem_type=image_classification +log_level=info +test_split=0.1 +num_runs=1 diff --git a/configs/datasets/cifar.txt b/configs/datasets/cifar.txt new file mode 100644 index 000000000..d9099be33 --- /dev/null +++ b/configs/datasets/cifar.txt @@ -0,0 +1 @@ +[datasets/CIFAR10.csv] diff --git a/configs/datasets/openml_image.txt b/configs/datasets/openml_image.txt new file mode 100644 index 000000000..086605861 --- /dev/null +++ b/configs/datasets/openml_image.txt @@ -0,0 +1 @@ +openml:40927:3 \ No newline at end of file diff --git a/configs/refit/refit_example.json b/configs/refit/refit_example.json new file mode 100644 index 000000000..3f9b4f8bc --- /dev/null +++ b/configs/refit/refit_example.json @@ -0,0 +1,19 @@ +{ + "SimpleTrainNode:batch_loss_computation_technique": "mixup", + "SimpleTrainNode:mixup:alpha": 0.012, + "CreateImageDataLoader:batch_size": 147, + "NetworkSelectorDatasetInfo:network": "efficientnetb0", + "OptimizerSelector:optimizer": "adamw", + "OptimizerSelector:adamw:learning_rate": 0.012, + "OptimizerSelector:adamw:weight_decay": 0.000017, + "SimpleLearningrateSchedulerSelector:lr_scheduler": "cosine_annealing", + "SimpleLearningrateSchedulerSelector:cosine_annealing:T_max": 73, + "SimpleLearningrateSchedulerSelector:cosine_annealing:T_mult": 1.38, + "ImageAugmentation:augment": "True", + "ImageAugmentation:cutout": "True", + "ImageAugmentation:cutout_holes": 3, + "ImageAugmentation:autoaugment": "True", + "ImageAugmentation:fastautoaugment": "False", + "ImageAugmentation:length": 6, + "LossModuleSelectorIndices:loss_module": "cross_entropy" +} diff --git a/datasets/CIFAR10.csv b/datasets/CIFAR10.csv new file mode 100644 index 000000000..04ab00f08 --- /dev/null +++ b/datasets/CIFAR10.csv @@ -0,0 +1 @@ +CIFAR10, 0 \ No newline at end of file diff --git a/datasets/example.csv b/datasets/example.csv new file mode 100644 index 000000000..529464e61 --- /dev/null +++ b/datasets/example.csv @@ -0,0 +1,99 @@ +icebreaker_s_001689.png,8 +peke_s_000545.png,5 +convertible_s_000520.png,1 +domestic_dog_s_000455.png,5 +broodmare_s_000313.png,7 +capreolus_capreolus_s_001380.png,4 +true_cat_s_000886.png,3 +cruiser_s_000163.png,8 +ostrich_s_001561.png,2 +buckskin_s_000031.png,7 +cassowary_s_002024.png,2 +fighter_aircraft_s_001009.png,0 +convertible_s_000295.png,1 +lapdog_s_001489.png,5 +delivery_truck_s_001300.png,9 +rana_pipiens_s_000379.png,6 +ostrich_s_000026.png,2 +fighter_aircraft_s_000720.png,0 +supertanker_s_000275.png,8 +ostrich_s_000147.png,2 +male_horse_s_000742.png,7 +monoplane_s_000877.png,0 +fallow_deer_s_000351.png,4 +automobile_s_001645.png,1 +walking_horse_s_000071.png,7 +stallion_s_000015.png,7 +capreolus_capreolus_s_001283.png,4 +mule_deer_s_000357.png,4 +dumper_s_000805.png,9 +trailer_truck_s_001350.png,9 +green_frog_s_001384.png,6 +rhea_americana_s_000436.png,2 +capreolus_capreolus_s_001605.png,4 +auto_s_000800.png,1 +tailed_frog_s_000246.png,6 +cervus_elaphus_s_000903.png,4 +articulated_lorry_s_000916.png,9 +bullfrog_s_000797.png,6 +bullfrog_s_001028.png,6 +ladder_truck_s_001799.png,9 +toad_frog_s_001786.png,6 +wrecker_s_002395.png,9 +dump_truck_s_001363.png,9 +canis_familiaris_s_000450.png,5 +lipizzan_s_001223.png,7 +station_wagon_s_000464.png,1 +american_toad_s_001003.png,6 +dredger_s_000486.png,8 +wagtail_s_000747.png,2 +dump_truck_s_000163.png,9 +mutt_s_000997.png,5 +dump_truck_s_001097.png,9 +puppy_s_001045.png,5 +tabby_s_001593.png,3 +broodmare_s_000179.png,7 +car_s_000040.png,1 +domestic_cat_s_000913.png,3 +alley_cat_s_000843.png,3 +truck_s_000028.png,9 +estate_car_s_001092.png,1 +arabian_s_000782.png,7 +supertanker_s_000761.png,8 +garbage_truck_s_001211.png,9 +arabian_s_002303.png,7 +red_deer_s_001101.png,4 +tabby_cat_s_000069.png,3 +cervus_elaphus_s_001124.png,4 +trucking_rig_s_001247.png,9 +pekinese_s_000046.png,5 +police_boat_s_001118.png,8 +fallow_deer_s_001785.png,4 +camion_s_000599.png,9 +tabby_s_001774.png,3 +spring_frog_s_000407.png,6 +wagon_s_002463.png,1 +station_wagon_s_002537.png,1 +elk_s_001751.png,4 +house_cat_s_000064.png,3 +lorry_s_000562.png,9 +delivery_truck_s_001587.png,9 +wagon_s_000378.png,1 +trucking_rig_s_001431.png,9 +tractor_trailer_s_000653.png,9 +cassowary_s_000194.png,2 +fawn_s_001418.png,4 +mouser_s_000792.png,3 +bird_of_passage_s_000006.png,2 +sika_s_000337.png,4 +dawn_horse_s_001453.png,7 +police_cruiser_s_001385.png,1 +maltese_s_000562.png,5 +wagon_s_000572.png,1 +liberty_ship_s_001456.png,8 +western_toad_s_000622.png,6 +house_cat_s_002004.png,3 +bufo_bufo_s_002202.png,6 +tabby_cat_s_001983.png,3 +fallow_deer_s_001133.png,4 +red_deer_s_001719.png,4 diff --git a/datasets/example_images/alley_cat_s_000843.png b/datasets/example_images/alley_cat_s_000843.png new file mode 100644 index 0000000000000000000000000000000000000000..bef5de5312a4b19b5175e13de430ba205bae1c6d GIT binary patch literal 2396 zcmV-i38VIjP)BzXS(yfHFj0An-sg*q)d?(5qXGW=)pn!5+DwY{8~R5&PxmgiJTyjBav*9 zJy%tC4R@+LoFNZU)(`j=K49TbUrw4vH~Wr|5s@*5F*e9zUopkx@-}CICqs65ab4s_ z5sifY+h4r;-9P^Ji!Z+p1*|@N`*8jC!{yx%_xZBHrW04!SLc^EhBFcIC`|af^9Lnm z90!Qb7;UYUQX(QE0)SEyedM9+fzWgl!T^<6KF`82}h#h=_;~5daa9005XXb{vS`e)Hw;{_dCaabk3D zksNV{j4`P=_W}A{ZgnrEVoVzC`c9X5!2!Xcb*PC*O364^O8qE600+Vdfqv@yUmnE2 zd~=w^mXQTiULZR2U!5xkW}RPf*4hmZgVD(rKzpMM7c-XaOA% z0uT@yj!n;B=l}?jBh!Z0YbIbsVRN~6|M{o?`TlDCo4@{Q8YNcix7Rnn|I?o@SL?&6 zmm&jagiu0w2xP1UT;E(T!*Qe( z-mljYVY_d(+v4o(6@=FkcVRCsZXb3HI17YK_qx^a;{yalz|)h{pMLrB5C8m!k9W5~ zB#22Q-6qGM4S(^g(}NMRwniHsA04s&z9-W|Kgfbfdg$CZxy0CDbF z%LoAh01*J7>$=BJo__V!&jxAgELcY*gfG>rmq*7>k|fLK0h^0XHMw+#1m}_~GF0Cm zCG>1MfL<5ne!JV1b!{CA9t7Tm5E&sLIsjJ`We`TE&z^t&?8Fm(-q=>?(^GnS?1zDD zo66PY>-q5cz)u(jq(TuU!aqvQ##u-3PEL40*KQ@B^gv*CzQLG1P zFd1f}EE^7z$uyqK9I%JFTHaoLxVn4j90)<2$QM!xX{_OlDN9Y>thQC&7;O+qF&;7I zAqoC};W)=F=>TIagA-+wgwx~=Oh2}h#=6XxyP>+Pn> zyxMi5PY&8T&jPV=tKroD z;qAj8zgxU{s2t4oQ=Sqm}`}du;iVMM)%SB!kSvoj5J*{_FH>)D@o$!KU zT`0+)yf~9#)EZ!1K7Kq)e3pcw>h#6(VZAHLx@wx%_6{8My|&giZEIYSW#VwgFBTg< z91d5@<;ls(eD)|x;(eLv{K81cvg!25kHaiE7;SeC?Y{CkO%I0wGrMj5{kz5ex-5#? zXdS4q(rvGG5T=~7eOK3&Yuh#+n1QwY@aSmYv~id@ZMT~`i82*;C_8U3o*$h^71a6i zcCpGrF&!tJX|5LQ>+6TNAMW;TM@WI^`JUJ4o`hr^x^C})JLlC+jijf;kh(S>jx&o) z`pOHvM(g#qxc>ffb$-(o6*{xHy;?1AqsR+GZ?~(rmHW&2?Q*@*);a_W)`4Nb(lDJ2 zhLTV+8;5+5MWGi-XZiO2g8YzEXKb2{l$6>v<8f-itk=1fBHwFc3?B`%kJpRILF$WG0|Sr5(V0J! z!-3fMo zGydrD^CEBUc72!E!`UN+y~AoakAi^h9~#ocv3Azn+}@35v#v9{Z6UbV)~&WCilQ{~ zo$lhq-*=$cAj;-(eE8MZ^G8RMG>yF=<^Z(1`Sfr9@~<+aU;X-Tjt<8RmhBFgAMSk> z9v`3Zwte&J#bQ?#WrJK4b&X6orvpC@q9hxS+NSf9gESaFA3cfELm|8*^=I=WNqK94 z-+cV@!{WQi%$v`qyj!lWu6+^cKGs#9jH1pox-GLf5z)bY*&FERPoCG6d2}3)MuXYW zD4R?jB+8$7K@`WakkV-5oY@wYMGv{;m*?Ld9i&ZT1L+GZH_QCfc5!epe0n@}+^c)$ z4^O->jiPyMB_MnKSBh~F20<2vp3e=^zNO9(A|ax+mJuNeBDdLAUElIL-@bS{bA4Y| zMOke7zDtK$JRV|@MT1B4&t7NQ3`MYbXsQm8NhvuJNI|}041gSP<~n1van4d_T+ifBs~i4aMz!=|gz>^5x^t&a!Ce1sPKj0IwSB$SFlka^+D~>$2`m zZy69r2z>MvS&ObuVZF&Ibdx0^IC*%3S!@%2Y zHjAb6ElO_KY^ksMJeJ5JVQaP zQoJ6S=?7_#DUy;@Nu^c;2Z z?4rB9wVbQh*OxR_)GHMjrxC>P!=sUp(vLp+sMYG(rvB>1Up&wI_-{TG#d1NAC`Ngb zpuDIws*dm30g8h_tZng%fFkqr&;M?*m|whnaWj6WZw!PYL1;mOOj7jj<`vxT*ZZyB zZ~yV1XrU4(=*9EzHkKol+?83;{kK-@tx<_}P=6?(82=#MM0QV;OpDxL4u| z3{Tgp?kCYvU8%IItF;Uq0caqQVfU6`Tw><6vZ5&RCr>`@G<)OOT+^*M zNo~tQSpl2XhNjK`^O-K6s2% zoT;x3-L97TH1fTS`STy2W$3`$rUjbLa|y!*Z81rbs4Q1h{dTgl-o3p$KR;5nxuKig zcB|d)3Z+uB-TL)!e)HnHufKVD0aevR2_cl~-CD;3@lr#tFTUT|t}~1X=?V_X|M=a1 z|LE!Q!yk-F9K|sV%d;53Lf7*m+{<96*J!os?RGaoh-QIpZwg2gG|QBwC!c@b9gOfK z45!oCY`*j(T@>)aR$DHU1n!eGhvVSvd^kMpq#4ea>Ig-qb4RlTB&mg}rAanw^kk8> z^aVqM-BF*Sp=GZ&t684-5-;RA#+6F&`1GLF>3XgahZ<%jJTEs}RWdhIfQOD(!t-jG zZMT|IN%6AHV!i}`Z8n;&>o6?akZULBA851pZ(e;*kr3bzV0eoB>o+eLuF4DI$~15c z1MNnmtdzXK4FbOaMZ436(XG~C zckfYJ5SqG#092GZq?nuW^5vViUJ7KT2cLiYEr}DS=f^qCOG=&N=q$8CFCiebEKRNs zw)Rg@Xi$(W2=?Ik0FoI7l3GP5mlc#jb3LO;T&z_vnu<}z3N@+BeEjK7S|pafv=&pi zUTO?WvVy#Oau48{wO%;uDI|#m%SuXP_vj=HeL%znrtUn=p)*^F_T~r-fQ` z*uk>U(rzS~O%bGNK5}E*)_l(ogEY+o!*V#joMf4y>39J>&o(Tp+3QgPo&jRAatKJl zMqRo0@IC;^X7SE9?L0yp3DP79T_56`B8wQdoM*sxV}}nf-re=eZ#V3 zt+6FYQkb|=5+#AFFYcl!!7-}Q-~HjEAHoOsPODY^`tsuL_BM+$hJ@v6t=8^MRbzbp znr68qit{{0c~)d73d20l3as#K!?N8B!%4{JIq3AZqS&>xx0~54E^|qWBzgCfPk$w_ z5ITXm82SqE%3?1qxxv;XtxI`Vt4u8=Cq;V3*QG((GB@D+^>QzosFha%w85rYMQ!Wzn`Nh@7wA>)?cK5rTZcQq~ z92ZEk6xaqz5RVgl31fLyTdgRH7b?xjvq=hKcnWfm_%KeQI7PBjX*Rng#|9zFvykR# zEGvxJ3}^USvr!NTEQE_5SeT>2S8Z%L(H8)w8N1&@^M%LFe#{00n_# zD2xk)K}blQ8qQL*hqecUJdIU#5k?V&G)N;`w`_Gn(F|AV`+iWXmPVtk*?c-(O&~+V z`D_;D_;By=;?EQ4g@upwjZ4t8QX`1-IE`Y*wn3KS04vfQ2bf`O^tEc}Dgkl=P#UJK zmU_8Nl%+C;!`tyhlK2!6CnqPvK_AW+liT;+_RdZLh%lq5Jd?_$o~Yd2E-77SCEB%Y z&-Drfi1j82lQ=<;B;pX|c}7A&61i@gy6*1oA2;f?>B>{rwx({Hjh5%SVVL0UM#J{e zTHO>yW>|h0X9U0j4*WD+FPBM>hB0zNWQCF81WphIVH!k;;aRa%B4{$fv6W`$0Em@} zAeFHKgnsCIJ^^97H|QK3Lzx$A6)DGxAofke#xa2;UCW#l00<&K9% z0cq1gIAJkVrbuOq=kC@w%R4JeGYrERhRL!tijt$V`+KJ+lbf5>Y)wI%=TN)f7s_p2 zceeJFA3Xh)TyL@rtEo%J^&lq;qwNDKb9HTfuy;IN&hjj%lq)>XVIvitmUqz5WL3Hu;cg=>}9Gr{QG(%x;bgS>7&54YRxVy%cfzX z2pNl+WtiQq1IN)Zh#TtrtMC3`sGBTJ;MLU|Dv$T7)i={AB}*X+DUNgP=x$~JEVl#i z;9$4gZ?D#Bx4M1!;7O;y6NCvUiZsLi`sJ6?yIWBfDF8RKF^)m8Qd!U6y?yb2raB8f zm*hD(p3WH@uXE)TCwWNml4$6*QkI{+nn)tUGSp_RZVv~;qaW3K`@`KM*D@T_LZ0`> zFMoeGen(R2@Z?^#JtR`3&TrQ0+%`0OqiecpTc#iiP!>w2AIv;&Y5Im6Ja}+ch@jJ{ z0pP#8dXp1MXK-)(_(|yD3@(f>-kyJc}QH^l@i+uy^`lZ?xz7k)=)iybu+M z7X;r~VtLf4id99-i{keBG6};xOAU1s2A34aVFg}O+ALF&MDhOtPJZ_iu_{$S00000 LNkvXXu0mjfmxfkD literal 0 HcmV?d00001 diff --git a/datasets/example_images/arabian_s_000782.png b/datasets/example_images/arabian_s_000782.png new file mode 100644 index 0000000000000000000000000000000000000000..79b94674fde254314699fddc9b2383f2606ee494 GIT binary patch literal 2389 zcmV-b399yqP)arMRsi6&*B<_%|DIbd$yOv=a$@Wx4#qxCrHVQSE~z@N!2|I$TtHF9;es0qh~s$B zV9j%@$A8?zT3qlw!6*LVAOA@R5CVyaDNu0jc6+oon$4zFT?>pfC<6e3&>(7Jlprz< zv^|!uU#_JZss_FsUCs06GT5h-0B7&mQjc z`9d+NmJ)~}YegsZ2k zzL-pA57VW$5T{XURMPIPt#!6GM`y=pNYfw3eLx&;g+VpjNK~Siei;lOpwI!+HhD`OIG>h{_b%9Q$D{p!ii9* z(9Fu3taaDt2R9d|nS4UN1EI4%90Y5lB;FkN%IUO)uH8)*-Wumx&eEc|np@dxMT&NI zo_4#V`D|vZDsIc^WLhrEVp#-hgI%hx`@c>m56-#Se6A&Y_x#D;-p=0cu9mv-E|I7Z zo;}?<-`)DWf6(o8UOri$A00@55634PfAyj>>ZVD&v-SAs%V$?ty#Jk+=Rf_jEQ1a~c~=xGwmGlHwyJ*o{zYrNktA_bTk!R>-5nHa zv^g5B55Il7{cr#B&v`!m;rVw;C~sZf>BR5;@M@md`}@a+AU5icZ-3d^-txgq)W}TC zS*M+JvcxDZiZag!&t6VetLw{4>zqK@G!0QR#&KQO-EQyo>o@!RAIa5fW9vDSU;;$- zNQ%dsPZfa#k@dTA(k5aS;3))Vl#+$8Afi^cmknz6&P)+eNhPJyI-X3X7nc{?Pqwte z;c)HQ-gBU!FzyRRU_?M-+Z*Ue8NnhjQ;@_!%tS$;(8?$jfx!pi0ylv=-R{PCq?8hX z%p%R&C#Oe0!XPq028y|IEm}3K!HV`-%suX3w7}IqePS_fdK#!rBWi&#z@l< zBl0x{4Z>Sr1xipzh5+c5fDoW5z4wjLMk*PY8f)uN1;?3|Rrzpu`JQ!4eLA}YfDl3m zL84SBlwz$=p&4{G_qP8y=!HPGar4-a#0Vux$&5a@ke5YOuCgpk(-sjoja}Ilkm^Av zpj(B7qf$Yu1q^}$_z(hz5DHfZ>pfNXl{lZ>Za%WME?4tOFAGtWDUF2aoLlCrRlcI& zWh5g`d|+qk?%_U3GCvqMf-3N(&s8Tei0FN&>k3$akdD~~5P*Piad-B^vm#i#n9qa@ zY0IM&5d5lG?VlWe`MM9lZ(jf5)yvlg(?EQAbFsep$RkgS<^K8M+%A-9ToT99M7eX$ z2hY@PC5@LZuwXg6y9!Oojt{opbf7nr>{aZ;qZcu30P} z)*r3=;L4^dY@Yj4-5lNq?}<5t;9WrA{vhjgGDSGKySr}f!{1d%>i1uNJv}`fj5ZOa zu+2PwunDZ~ZlKglJ2^W0JRWUYUwT)OuY+@SU7_MsYtSZl4d%q%^}fYoxjT$WAl z>aww$MLwT(vo*ndc5!-fd;e(tQ8zY%LkM(!b;hEIRq|i|^Pg{Cz3cbdsWI$*(^QN3 z+*zj$s-nw8GLRu+AA$#FZj*NcWG2xIbJNTQgRO7Be{n-M%SB^zs+>(R@(@;a4GoiN ze*gU8!-xO<+aLcvwcYEhNe#apogQ5r%otG>PzELtAPoX=9XvCONGOFKA%rk#7QwLT zCEPa(M41<*^=TAGIvxzWEFDQp0p6c{ezEuF*>^Ag^6@`D-hVp3KSyO8p^B6S2_S?3 zAYhgX5dpwC$3%f?S8~IX)27J>rhzJTz6j3WKTfzzg$EVBd+iJ4=>>KzBN9V^u z)#)&y3=Y3j-6^WtK0 zAzp`$Ok%5QM?~a^3R|g2lXadgFfk)Y)dVifMu7bPD3>0DL9LXM(ir2M&GWo2%f!U7 zHsnKOA`Va&^&snIB0c@>D3ne``e8nuEf%eIn_RG^vo+;~bD;X0kCaKs0XcvmjMA~u zXn`CuXypW}#3X6dPGil45VVnLW>i8Bh&le@+gERYew+4NmYQVPdH%yon^(7omv{Sj zcl!^xQXB1&1m%`Zar&TYkw&o+q=IwK`^ZFEX#ns(AV4cg;&uxojh=V9TgNxY?+-rg ze7hZWqR}^lhKu$nTi;pl9k!F134rygP6Z|?jD!SLGh4a7@ex|@RH`GAIxY^wkJ&KUi=TQ+RKw5jUftdubAKF1 z;+w}Cx}}UU#zd-}qyPX6At0Cl0*%p3%m840s-vn}d0qFnvbmiCfH6`=hLpXyJ2jDx zTbd0I#=UMwPp@y+$33b2M@%Bx4;uh) z>Jbr`8GxHPfCHGCm8@yVh{Q1xsrz7(HS17}-8>Bg^QfvS3ygsf(Nrk}A_}1j954t3 zFOrae13Nhpsj8|18h|Pqb7YQc0_aFkg=j?Lg;P#yW;r7u0!S*=s3Ig+>vIp%XpPKR z43Y>Kpc7F@NmRw%1EU#;c*$ah21=*|R(VfY0|AhkrIb=CA_|U(gctyWsbh!^3=~CRNa7w?RRI8rhGC#s5e9GwNM;&{5FHTC z&KFOfJW`imfBa!^F;zEEF##h&BI3ZtSc-r%IH`eoOevWpA__6a$WcT}E^g+k=q{y5 zDQd+*hm^nm>$gAt@MLkZ0U*R+4i11ARLu=IRxw5jfiRkJDHyor(5fM!`Lt~-CMG~D zV1j0nvKJ!*mxM0Q_xF!qy?*@k;LESR$tA>kY)Imm1yaVW#IeQ@s!`Q6BT#h#bPl0H zstDYr6qQ2g06O$*vjT1ij_%}EhQ9AMx8J$FUUi>;{-IW;&gvoQJF&DP`H3%&f@E{e$QGFTVKV%b(wV zdo&6m#KX^ltW2_qPh8*LlnY75GZoNPc4y0K`v8w9OHk0Y>m-p}IAq_cKfBAa8 zJ$>t~8z(2P{{FY`mWyr}Rx#419~R5=y&E_8ZocDA9BWZ1qD;&N)Ql%)7SzVmIl0C- zT3jq+9JS+#j2Nppzj{?t+PQjVHlOb9Zr!}KH>CbzadvUujViu!We(`a$Hzx6Uy10b zX-g@H>~8A7A;u76+VrWEW>iPy(WnCCK%p6rdo@+5Vwg6wrmk;Xzy9{mZZ#uXoF8lH z$ji{@lBdJan`jz(RcqRDD1}WMB2{IgaB;p^uh;Gtsj8bQ7sjzFI1(fc+;&94-d9?uTG}Z;;P`okhpE?i}MTBf`F%| zuVqr|x-!$g-{exp(`}Bl*{qHZpC2E+Y?@}VTt9jG;QV}5RZX`!F~h}b$uYeD)_Xv- zTrQbIN<-Fy9CAvWhkm_YZf$Mll%$kcv8bffn`_-vMbdJ$esTD;sj98{WHN0J4_`cc zcCc7nw9Qyli@LhCb$jRT-L6}470=F2hmCQq3@{cHf=LDlVRx1wUm_d zkcwU`PR(=Ej@8PK4}M5PxpL*ocsgSa_ddFNbbNMne7t+@+T9QCJ$?Fk-K`!zehBU< z<<)W-n7IsnU_Lu}jfB(jWW8SXednOL3=ZD)>Gbrtig5U1t(N9H(_tukH?LQ*nr}~M z^V$CX*{j#5P22W;U)7_o@4BHMH%&mAG&Le|DS5S80$|hBtJN|UNokYvAW}l0%kO%A zanUVT?sog#TSc|4w@T3veYrmS;KO?ti{)msUUgm5w$s^E%_?S42NO}_b=Qds2ktiA zdeb8XcNJ9tGzWq(9nZ*p=yD*O&36>QjYi|pG?l7Mw`RY%^WMXU4`MZ%ZB02cg9joc z2X|Cs1#<*(MMEM$KtSYJA%Z&~AXDU7J)K-VJ3YfNo^H)fPF`_D zS9X~h(2#(TFajZ&8Ur{W5FsKW0)nerDVc~6$&rad;F^hwod>QXH$Oc)oQ~&{YjY7{ z=EE~56BTna0{}N;cLR4sLWD~JcRe|Ig$QjsIbCd4tBzv?ht+E3s;^(2 zEzXxu9zQej?VWku#43h6ciwHA(Ppzj0yRKzL@-lRW0MR3>PCox*_;Bx&AsagNP&^- zv$Knzo;~BpdwY8UVgUT@r=Q-qapU0N;K`#$vw0giynpATGz`l!gb>gTT+s~7z?EGK z00NjJT5$jZ01g2V-E=-_fAh)5``fd=??3zOv&WAfgZpGWxw*G{?dtY--~IFD(f+8a ze*Dk(qpAXc0*FX}AR?-2W(;ZoVD5+j^hUr<;7th4b8~R z%+QRu9|lBX4gd%Q5D*|B1VjQr2QqgAL_`M%hf9pRo2j|GnS+~MW)ycbRZulE7co`) YANQ>%M|v=-LjV8(07*qoM6N<$f(g#^WB>pF literal 0 HcmV?d00001 diff --git a/datasets/example_images/articulated_lorry_s_000916.png b/datasets/example_images/articulated_lorry_s_000916.png new file mode 100644 index 0000000000000000000000000000000000000000..9d5d1a1406e4f80e25c46aeb1ef73d48369794a2 GIT binary patch literal 2357 zcmV-53Ci|~P)Su|NWe1ww#e z5g_0sb`n2hJB~em%y@cU{kq+^`##P+r;5cmNQh6htFKD+eJWKU5jp1&5&jp<3;-y_ zG|iqp-QC*S2s{OZhzyK?3`hWqPsjiOfB?))@FLFuQc8u0N-6k1xG!r!ND>OA7E*x( z07gOt03=2M|D}Bi0$`$&NGd526#+Qw5Rn0dc){c^MW7Jm$T&AOMSn7=hG7r{Y!MwR zjeslx1+oA@1b$J8rYMK;Xklp)rBn!jN+JLQJ0fBR0D(dAO52` z*ShfX#m?fH$g4G45g=3q`buaC$jpd{fB?jtX8DOSb$k1M7>3P8v*HDw@)Y3797Iqu z4U?qXJKWxXdi~DThfnScC9d53=ttjs`{Kr{@i+~_+H~AsY&AP83!XyboIq4kxECHE z0;u2o?$=QidD>f9Tv=b+Xf#{(Ml+1+Aob4S-h*e`-Q$Bt+xMS$AEOT>D9saV$}%fQ z@vzov_WMT?WxZbWJ>h_WISc{k5uGc`oSkqIz_F`_^;*>f!9q%AumnNH2OxIN z5Lu85%WX!<0IDdF71g5pT&rmVu`}4~9vzI5xU>Z#pg^glE1h*@%2J?E2*4yzkX0~8=XgnLHMOtzxl|+FIfFP`8W?*1q z1|UF%h>i;5N^4wJ=1CIwdc9eigpoHtx3IXl;(MV~QVEZIP;>3YFLoY3-g%Oj86cqq zMj}Fi9spVbax4XcKoH2xj=(wBA0B42csLpk<3W-P<2W7+hJ(Rzn&q9f&Gq%Ic4zg> z`r0=xUs504ygHdp2%J<3g zlS(CE!a?uQI@dq!-o5pys_6Mf^Ox`bLQTyCHA@uC1Ox~mQ2`<`BY_Z%8jU5#1ONz( z000<8^NX#OWRgF9^dw39)wPwSrFNN5t+S|95QYVn(|q#hzx_!`h0YN%03$LWGBYyB zz|%EVZ8Yjr%DTs00wzb&5raffzE@SAwyp%{l$4H5wHBgO3?wVT{OU?p8bNgL;e91R zJ4;FkPy5KAJr&hj^?FkRdMr=1+qbqqJGlbD%tXWjQ7ENQ0FeP80buf6N(DkV2S6@P z#${ z69AtCODT*ghNE7i)p+IdYf&`Ej7kT!S}O=5B|R;*uYD#!l&U`%fWwMko5YhSYBig4 zfZ)LCFqqD=(KL2!Px>-35->9ZW!WrA;>KL-?H~W-?u`$A`}&wuUpZx7@CVASgm zk56|_?|1i`jYu8!4wss%i;K%8=eylK->-NTZ!qZ>`QSV@9_sLpkjai&JwWNmQfiX@ZI=21MKx#=`nliVD z)`GLvxq`qEStfh(4F!s*J{JTPIkntxY837SV0(im`3`i`6vScMnVqsii3oC>uO`#;$BjrjL z)qFrSrCD9us6>s!?xF8{fd1j(VXYd@W=SoqDG5MKzzhIDEQqIZl8sVHEQACA?I}W; z#+jKJsU@SEO-B{q3SWHp&2P>!{`|qV@-+#eJ*~ATWSFL-G);+EYhRf-V+0|kBd0wj zL8ZxTvOkd|r4#^YodX5v5F8pTyo$PW<9gk%jpLE>)OX(ef#<>Q&f`kZ>pg$o@AZw% z5wLKilq~W>P4_3pmQqO&LMqf!+ex8HDNrjVq>x}sG9^0(23^s}n2h6FH$VRO`~S2= zQdiELJ@@eQ+qJM&5&o-}zv`S#(-}DzMYZ|)ma1`eoF&AL*cw48%{oh+ubn-8`|h2n zULm66qaiyGfGn`ls4Jy><;BCk$#WKX`}U3bxfT_(g?6XY-YAMS05m2~r`gitLYhy6 zk6Kz5)+utjq(%_E^~1N`{n^h~JMDJ+R5kDS_qLQsnQadCX-}g=~Nhod(U=upFh`rWo}_fYj2ViVXXzyo?4xd8Klt>M4@^|CN_7 z{QbRu6{h&&@pfnJ?5X9K3bRiv;?a0KNzyEDFE6Y1^78y#gPHO?ozBLicrYCHpjXR|3;V@*aV$nuR(u1g;(-d%9EKR7C#+1y+i$HTEb bRucaMC1I!snB5{K>|XUm}mDaMsX5O7tjq3(Q2nHhOy92Wu;Q72gH~};8{#klpvuDAp~*c zWXQeuz0nN-$&^jW!o{LTbEi>AL8-wFf~-Z1lCxSIcwbOL&Rzi(_J;Q+)n33rvcg12 z26kO+*4=ngCyS|%PV?lRbDx^!)djL=su{osW;V9yRJ{kVoY61 z!qn!z?gzvC3!M(VTpPU0Fi#4k%#Kx2hE>x9H&~m!&Yf;V>u~D^`S|Sho5e+049pSx zKE+-u)x(2dPRI9xPV2$V;9|(Jce7g@?pUp4QN~4$l?H)@9j-X;MT?@;k|Aae!{&#@ z?5edq9Nj;D@8iLHB6Pq3I2@*T_C74D!ML1=90MLCoW5Q!t+Jvt896ADf_G!KNhKCF zY{WuBRRuQg<+<)j@b+|>Jn!xwJUp5hq8pq7xQgm`Pyc>=_^4^mm(5E!%VONehc=vK z@B0m67cVVdSn+6jF$#?gDtCtji>r&v%{mRCX7(!%30$e)bokYU`6 zTk~bi?f?rAZIne9&K8CFVo-R=nK1yhVFMhmv;~wvf>rp2huf-Sh`FgebaV_KNtNjDSzT0e?@pyW0a08?ha0dwn@sSXH z(~4+r^~tyM)02fNZ+xIy4ue_F#b)vB?IRTRTPnPXh8 zR`sBs?e01Oz}?NvvR4D0&32H{Y?gg9_`d$87+;|9?4~Jqmb~|N?`Y?<&py35JDTn7 zyM(rFbIR`SZkCd}Ioz_*5i~fs%_5RQ2`W1~lby*l_Wf$LZ2k4-pO-EBasq9;F2}f9 zZ3Ol4?RWMjd$ZkLz|ArMfCL@xAmC;WcM%~8fFeSQh={0$qN)JfYxaKo>)(Cx)#r<= zHG=D6)25WuDorNS*_~1LkOgrA@!|l==*s4;NH%3 z_sPkNVldRHPc)R%ayErNetPoiYMHX!#c8H&SvoH_M)<_0Y|mzI$+dTofV3PRas0?qm0_FaPoGgS$t!4qu<1=$)fur22BP zTrHcsx9*lzT~@kjI|_@so(}4Ey;?5Tg%$ur#M~_#=&L>nC0f3DaP#P3zil^FSs{po z;-%`#vsbBG?Tl1UpI;dI7~^bwe0Kh7_0@VfE?3JX$>%30p{!m%J9+bFp+(p_X={H6 z#Y({zDRoS^0CZg3Yk2>=qr%$ana#Yiw^^X+*> zfb8yDlyAG(hNxrvx^0&*fEwtQi$d?;x%1J7AIujEP3B#nQ%2KvTSc(_n!IiS`2Pbz zTub$~Ism}j04L#2cT+90*aCHm$pd5JafTlrr2YgukV2>$`*8GT#0? zfS_=D2qy?)xn8d~jhSmQ7G;ILi!rus1IVx*kW8_+Y%s#HmFv0%;%#-3*ZOD(gQ_UY za=Bc`6k|-67jJ%gdQzxrP%QztRaGHC$|?3OA(oPv`PP$L!nJm97s1wH1cNZp-AK>L z`aXuh)pB+I`g}ATYFU;9+D&uk_7N$Y^(w`_>r)?N+A|qYyTDihm$n3zUxy= zB!;8%=E3g$`-eC7_Q;@axd>r89uKN&G8wJb%f9Q{uHOPu$~oKic94LX2`~1EP)Nkl36j+xoaIl?IC46V^o9D6=Tq5*(XPAO%`D5Z=dBB}CBTgxRALU6%l+}Rl2 zFRT3WV#?O~eG6a#Dpgsdv$UE_CO|QcqJT>W%J(I=8tN+WFz^IYAV3L0q}&>CjtEa8 za%Ws)jRBD^OW*T?D2zI7ReLP-xFf^@m7H2tFK@0wKVYF`4kHnEy|xHs?dn369Bm-1 zajr40Qpy--R8tnURh1{01rk!Cjq(`C7Oq_6wirXa3Yh$fAIzOy4_s4&-#{S`7e)ioDem{!foBh2P?KP6;pZ@#LWEei!8lBH( zNfHbOtF=mNrB=FYRi4%9&Ft#5%9CX>uNqC5Ke@aF0Beaw-w@j!tVQk7%~xM;4K{)( zWPkV5zr8yi+z&>ETN`zfk5)IvgEfPA zGMPFQkMBM~q?ABjWc~4|E*sr6V4KR|;$m5z&sgnnHMCNWwjN%bp530!J0Otz?5m^0 zwuik|SU2@sZ-48~Xw-?Krw@AL&Gje>N~KXyj!Yt9oE1gMx$uQgx(2i(1nkZm`rp5{ zm*=7rLp%09{=VM2W0&dtAfb;R_gIU?A{^a+<_q?2gPZrpyCXc3mXaS!cD; zjW!k$oh5`)WI}M1LDapv;6M4PI6pE;ie0a~n$BMS@5zt<@>)SoTBUgMAeXaAO#Fa`)@kRXrtu2U~Yq)P_x+t0t9gE^Hn~J*5c%wiH{0F3R+3V!jWud= zx}TSs$x;@^@#T5)t6%-P-)=jrfe?-)@&pr}=LbmP`0Qj+Rv&)lv;PpdL1Q*Bg|=e5?x80u6U zaM@jD|M~NOyFB``tn!27Jt@R+upW0;D&sKAk&s4Lg{t1(ezCQ=dC8{Za%1mxa(sGr zcf673b)!{Ql*9Fb<;)4zUg?TfP#eprmoH{TvZM}~HYUl7cDL(@Th3?u`+NQFZ4}3T z-0Ah#WE8i$tAn*Q)0j?oWy{$&JFkw z27Vx=FTA3vks%birxI5nHw^vwM>Xr2|p_Bj$FZ5)ft>Iym&To$P zUte6DtqjMX{NM*cD5BWk+u5DnPSR`{c&mnhrVPR`9t;o9ul5i2r61LX2QBZNk^Hj{ z?<(c*?v+>;nE?pHNOD0b2PP=@y|CRMj#k#koa6NB^yu)^S(nb2jj>g&)5T&wn`U`Z zlto$9`v*tge6wo|om@|*XNUI(;>Umf;M-4Puv#^SU0oiJ?rpZCI9tw*wuBHM0F*Hy zsSJZSuIpz1=&-3309@W&Pp&VjZ29`)*gA(mr3i|uXm>h{@yXR0)y4LR)M>DLzz}N)7Sro0&#EGG);i}Z zSXis4<-!`rDN)9Ly?go}zl@~s6?O!G@V!s|$_pc+^13Lvl-k)e%Z#T3JNJC8Ot1|r;KEI>-gN;^i%3<0f8 zD`>5*3_Q=L0F+o~v^IJ$*m&o?7a*XjQe-Gwal5zL>a2*Mg^XK*jsPLV0Xl$XT7J26 z_U3Sr6;0h3R@L?G%?(n)r_(r2$0gcEDW$H z>MWPV)2GjlPfoJgj1p>zwbl|~TuK^JLYy%gfDk|e=&ZBWT}^I<@R(M$HLk$ik7MEc z&NPD3x~vdDdcHHdsLW(~JHB&|W?Tks=?A7NC~#y15DB0$+Bt{F34kL2h?Ie88j6@C a>Hh=#XQFGNEAjXM0000>pN~k~?Pz9R^%T{d3 zTJCzbS-N?+SF#-0Rm{V!n(n!CzWJuV=?}knFtu3*00=LK0Dy>`O}c z6Q=~1#)bX=-_-Zw%c%xLM7+G>Ja`^IhkiMA{+G);=r_S!u30WeLP7)(1Oz}9W@Zoo z1VDrf%v>I?eBdQC0bpdrLAk`|r|7taE}(*ocD2 zfT{Mg|DNoAHQoJM$C9j7oqo|D6q$z9woOwuJa26(Zf?GJzqj?SXa~Xy1Q9^wVzFwF zW%!(XA&}(DX1re=KapmE-1Lj=)~y@sH*c9d^S&G(9X#HB{PfwA{qdun|2)0_$#2(h z-d5Tmz~!qZKwhFpAOQhnfsjH8vFbW9S{C*EyWj2t2e-PXI0%ca|2~{ zvbXcMKh(1qm|6s0p}c@tBd&G_L=+YYF)YgRnA%yP6ZKrL*ucEkUthvj&5Q$ zRJIdYhFiBk%(C+Mt%K*!!?y?3f(1lK3DQNWApn4A0YVU1oelvF=`Oj!EPw*+m^>Sf z^5IBao={2R6amA(B#*z(9w>GhgXI} zMJdH5r5IxfA=W9jZL39?ASqm1kQ6b+nO@99oP5-}Q;^1?@M zJuj-}@aWmY{YS>Ss%gfvS=lr(aTG{Mi2-;2@%JpVBvCkort)pcDM$>dZK|?79Zyci zW1kX|@+sC$8$(0}X7Mr2%eroZwizQKDQ%1~mb6kzIhPsh$Y}laUw=ZBvRusPv#PE_ z@G(UnQtP>_y-)0uFlyxrGER_;DYDEeQh?GO(wZ^W8m*PJ))LAYKOrU`{CqyspZ(>b zwU&hu5y*&WAau^TVKLBJ6UN{RXFGXjh*FA?11k~a;Jt5K-%h9FD#idH0=jBKSvB6L zm?R}g40`LWEn~FS${GzQ93u-0vmkNewy8s1Pf9)sscqZ3s%zgcFc1bGTkl)%6HB+- z&GVw@4|=1G&9~Q_%aM$=h06*pX%dB+D%N#VS9J`Dnd_#ld?am1nHZsIW77ujIVBN^ zzA4M9(k9P4gJ1melkKghfQYaxU-k=D3IIS5g_d)rD5WGTCn+f3bCLo8ngfbjHKLd8 zA4pIEpokzYvuDQc>}^B@PKf~Jtj!3%rYUO@A|^l<;ON_!0x8tW_$E##)2-{*j}8x= zb3S;ZP0=ZoCP4v`wrQr*X`UDT-jEZD5CFnU^SA<}1wYQH~V25YU>8sLo43s(MrxBXIf ToKP*E00000NkvXXu0mjfjwNsv literal 0 HcmV?d00001 diff --git a/datasets/example_images/broodmare_s_000179.png b/datasets/example_images/broodmare_s_000179.png new file mode 100644 index 0000000000000000000000000000000000000000..319f8a3983cd92db8567600fbab7dad495ad663d GIT binary patch literal 2431 zcmV-_34r#AP)1hB z0^$Y`3P^-FwlKyBX;^H-c-Ed?s=I4Fr_TOuFVBM^1V8|WzyA4WpMCkjcC`TZeWxJY z9;>D(UjOy)om`yX)#2&W(?``nEA(x*oX^s6_FF&s$-VpU{KH@U#m66i%zg33n{Q4= zSy46*9z3Y)a=qEUI6IfjOo&a_yf~X7c%=}mCqaZ**Nq@toS)Wt%@g&?hu{C<_kTKw z#lQUB-~8#H{BasbdppVE{py#$`1tpJG#X`b5??M>LPr-*9+y>ZZ9~R%!JFCD z*i2 zlo`&wd*KR#L&xcT_Rlaw0>@hyv9kyMQ9vos60TO};&JwTy z@XjGo6vemizMhRZ$&&HzP2cr>SKNR1?vFqEkzA?$Ti?C=`|rPh{`6v1Hp|84due?? z=={-tl^66ofA~i%F;#lL+%y(r8M-PzdH9gnrfDpM5JDJhC(}b59`;(_KDIymgP&~{ zi`8 z$^N9MZE5Y^o!7F1!)>u~*4lPik}_>=<})4KZ~WRz!gc=^w%Ks!#@_g71gYLG|K|%4 zk6MVY?Ov!BhqrE@7uB{`QV5Q2P5OB`+?hDzPcJXu+uKVbDZMQ(=ckVzGMBzyE13+U z7`xJ7SDMx9X@C6N?;y)!Ih(zB&eNMjgt}Hy{N7>Yj$ZCv^Xj1|Tgd=P1jFuu_r z96dXmpPtSVbp6$PLBp57`~?*!yE{4=j>)xk?p!bZLtUx*#K!a zMtNorc7FAbKOblpMN00BGjLW9+R=PtJ%r7iQ(h_PVBW z&c%bFbJn#@nhgk@j3h#o?cFHyjWO%C^X<0isx>um=lC8oef8~A*)}a&r&LGdEQv;C zS<0#q6k?Se?Cn2Ux3)L=W)Z14iihdWwC`K%z4ZYAyzQ+uQ5uUTGTV(awk`9nES!y= zSJ8B!e(;;`NM}7d=S-_XP43@&@voncUOis4S!T0s4=+y6Lb17{2XY(_1|!Fa#0cQL z2ln2y#u%Ye62iM)DH`= z`hV9Hr)4;%I0}Rwg79s%zBC4vcTx!w!ZWzOld)1-J4dJ_0c6v|H{U&I^3`&Y%P5VJ zt<*?t?d9n!AAKa)iD>qn5AJ>W;G`_;;EcPW6oQn(+NR0Z9yJm(4$)!Pw!s?fyQ=N1 z#UzSYP*wNi-9xR@;r>lYLb0_Wh_*#{J)1b4?H{c#K7Iad(6_lsZl>kaiz^r33_}%1 zit4fh;TcH;(pq`v%C_6CRu|VrAho|UqF8m7TxftDJ4apTTn}X(T2}2UKl%Lc1=pSD zc^EglFGp_cyE@-&tFmZ{m96u9TXbE+0fIHvwrZQw8n+m~Rit;?y3IHFy0I)0AqbI3 zknC;1wi`fYqOsJ{oqTilPk%`V6GgsX&AD2V!);TYOTV{YG<}gIgQ9BN{z?jBQda0i zcI@@R-qf|_vtg#TDrPCXao&4ol)W|Nct`1>T#H0lBZ}GCNxuBsa^7!?@n}@Hw(9!F zYw^KgSa|#2>(c8HSV)A{I`wfzB2GtTsLLd+mF(4xY zLnG3}7Ysnm4D12GTR9qu$kKGIkgS=QPpjIN4-E&8i2j`f+Pt-_6V#f=vej?I%|a#!8<9GH1$SD zT2dUVxbF=BPNsWjE+Xn%>Yb+fgbOAkDVdi)y!a<2a6_0PhvB4ZgH21ry@saxPS2tzkx`Rh&jbAz~l} zG7$`{v)$b{-j}VW$>)`;sW8Vr%2`C8xqLGdgoe-0Z zbP9ylIUr&{>wM?~B6V#W$3rdBE^iff*Yg>pQ;~8mIMe$8QhN{p!H2l++r4a{ zHLr4;Z^fBcM-%z#EhUvmgjN)fMwgcrOB7N|DXn#Y5KDm~_91w*Y|Y|&rln@*m{~`W z_d)857490*_NK0--F2DJcMs7{+`1G;Q>i0HbO2eR=SAH+mnNxJvGrbQY{{!{X**p$sV0{1p002ovPDHLkV1oXpt?2*& literal 0 HcmV?d00001 diff --git a/datasets/example_images/broodmare_s_000313.png b/datasets/example_images/broodmare_s_000313.png new file mode 100644 index 0000000000000000000000000000000000000000..87f106363d405f5d54ca10f48a48c3ee384c7dc4 GIT binary patch literal 2470 zcmV;X30d}uP)tFa~_x z2~(EJC6%hrK2NjH+1+9QkMKu)gCCVrd7f*ni3k7?ks&%_jvSyNVhn{40Ej5KwU#Ua zIR?Nvx0VCd$`CMtm7&6AfC}LblSrb`A#*hON zawfunF z7!bq3aEQo?yfnu8zUTSE7)=CW7;2?Dakjm+>FLntJl$!%I)4-Q6Y}V6epv@u&x@Oe zy?%9Flybbe!-0wAa{2AI-+ub(r)8CEXDoxgTl;lg%d%0*8lyVBlygjiXm4x$=-&PJ zA3o@{+spaXSgXpS)s2=gzgb*0teLz%F|SlnHmZgy*YoA7zkXKlarHGz90sE z5|v6iXH>28T#mK}2ZJ5cH23yz{rso@`}ocsuW9D<*+wfCL8wH)0`I%;|9M`Vh9Ww> zxH>z#4x(UtG`^l+dgzyRB`i5*bWs+0StLnfwQg?&w;$XU+*{}6aJ=>O(WCujba(IW zPQRy@SH*12qc$)TwSsms5V~Y*K0RA$yLk2Td_GwvaZ9ajQLaawk*XU30M0o_gn&e# zNx%E-(a(PQlarUP^UHEO9X{E8a4_CaxThCa<@Ed6e36VF)@$|k*T;F|o_+M;lUqOd z^sB#pcY4e$Z`N}9YT6lgw{|x;Lu0h~4^75QV=I*7-ObyF``6c(v-9cg?AA{oKHDCQ zL?HCyYWez`&;~-}imrcch2?-u&!xc?{3)KYQ=q!>x8IxFT7}$;s;S$g?s@w&O5%nR)zh zPu44DwJGb4*PiH`u;u$PzyJQ7i_>c@RlZ((o)8Ft|4tCFH5@!Sd}l8jJ-Bo5`1a#g z9CNVFNIAdGZ;q{;`vL@GPU|G{$D7?BeemA)?l8_;|NZ{Q|Nr|hUcEdG;vnm_qcB|0 zR;$I5b0!d(v$mVI-r5~JefsdXzy5W{3%7b%=rbhC3^Pq5SC_3Ab>iobMw}rKf9hWaB-3-rG#@h9`xV))h`YY-`?LDj=C+z zg1`#^9J<;xQsr|_O=GIrVv+U+-=AG{CNJA@h{CSQ!mSo+B{w(sfBf-}jwdfS{fwc| z)(r<6Q7g5KM3#jA^Isp`>c(N<_d2OZ2K>$aw%1*#qGWAw>P3dS03}6(SQg% z1VS)YH&QnZXW)6f6-NTSFaL4${Ceun-^`V{xSW=y`NRKxeR|eB`Qhxz#~(g@@WCJd z^e3gMlXdH5)$!W$T`7{tH#M@_kTQ%hLttbBXqc5wH%ZtQQDpUuoU4ub>hw&GcV-*C zl9a#KXQiB7o~^b1>ldH@(`O1q)6$=y7D8t|enm z>&oXK0@0NDd^$NfIX<6HAxqNnj_IXgCu?;wT^HeWMf0m~kDm8?J@4<(RESOhn@f;A|Pi_1mJnjIbeldeqwZKZtF5ZbvS_tDV1_w^NP_d_VgY30 zDDdNuXKA3Yi)Osr39~U1!RhgLhx@zN)k-rJw3#PdH}?9_Emwm-|M4^Db&AO%0!TOt zhGb1Mom@Drle8;>&{(5o4MzC_R8!QZMlQ;{Tus)UM?1Zp(Q;m(AOI8svQWxbd!s-?UQ(P({rb5Sg> z`&o8 zQPytxk6NR%o5gT6=s067LEEgE4BX~!Fl=jRs) z*clH|@XtU0GH7K72M2XsxBJ^`w^r5nTCY^o?`&n(%385NBxr#lArT^S#)$wdA|W|s zoD1eN;zD>q((X?&Dhvby7qWI`<9MJ6UY%d+MjdnqmdCr>yZw!={-E#2UL;y*J-H&m z1CC5;WsPwJV9_a(MZH+A{U{m?ynd=h)XGM;?&SFzK&2a*0attJoyQ+6^7U%94%*$^ zF$JOLM3nlvBv4}6_^N5@x(R&?L{dsAYom>GR1{TJ)vT4DclOb!x-4>Qv~|`wBe)2CUwAhL45Rq}t kAus?WaEy`fhk+mc3-8{+M#yqv=Kufz07*qoM6N<$f(j1N`Tzg` literal 0 HcmV?d00001 diff --git a/datasets/example_images/buckskin_s_000031.png b/datasets/example_images/buckskin_s_000031.png new file mode 100644 index 0000000000000000000000000000000000000000..276e335d0273c49b98837011069c2a9d3a70e441 GIT binary patch literal 2248 zcmV;(2sihMP)&tA6Sl5*NT z{CH=2ce#AMoWHbGgSB9(Uycp3#b%a573wN9>pH}Z#JXBa+5~CVn>JL2#;z>7WoLWT z+ZvCr?`%)IMG=E$<=*wl^z7`vZS_W0&u8^wwV7qUOXUIjynvN88+PSnI$dqvjEZsY zM*^_iY*y#bPM`lR&R?>Y$Wc^WH}hMgqy5o!)oOlrEQ5YZvbZ|2*kq_ur?oB@)hZv3 zG8nwGb*n$wp3jb(_5~^xlwpdV$Wn}}S=nx?b{5+uYKsVhC`8VwoE+b`^+kya7~@sW zSugKy72Xee-PU&;)A^fc$4~#Ot}|eBVGm6QJKta`>*}J+`_=i2sLi^rFljB?AKxB~ zZiNhDQZ#U#qBoxG?;jkM+;>RGlxKN;aaJu~CtNx37A;$!ykDHn#m`P2mRyv>eV30n zN;aHkgK4?%z+|6tqC&vhsG6GCo>o`K5;v|-de9$b;;cQ7DHh%C>}sRn%E@(C4vNlo z=$7STU7nq-L|(4+-i;4)?nkLJ6>uYT9YnCO0j=uhJf=jhkg(FlNnX!ai%W+dmz%Gt zUSyciP*Av27Ttq>|EOwKd2dqe-g3i#hSNVmXup1Z|MdC&?ZHHJjpVW2L87*vwd*(Q z#i>{TEQZ6qwyLb2an4cHZExRj48AiVmx*98+|GJ^P|uWgdp?7?UOc2QS51BIiuUT_ z<*oW%)ja|(BIiOG4~#Jo*{+HswUi8b!+8lvZCtGvE8i)QHso?? zT<%R>Z&u~G86#Z9tT_CT#ew65YKheVAcA2s$0nJYb;uwoL%k z%ozi)0i{IhsP-=&oXC44*e~MoKRNZW+En; zBvo^M=cb7vAOo2yA|jE30Fo*IiUSNWUTrp=X?Lrj>h$@^AAc+R!}k2K)NA+7HQBzm z$nI?4`CeZ1I3+|iQ!+IGKy(@eRS}Wc5CIVZ(11Xd48(v8sa@Q<<`4EVGrQ-C)INnX zm~{K*dh5%RGF~j+d~x#ilMjFJ8_EmQWM*by>h&4e_twhq`a5j$c6Qw2m3 zi7A?aYcbZkV%tjOS42{?DY<6XvLswT_%*{kE@`MgcoRo6j>lOYUx)nuF(RC=^QkGf7V zHUGK)kC!J;B}q&Ih{&07MrG+;o~dCp9&aeW$A`44Eo8lokO9ftUas7?=qf z5)$WR;PysGSuuHie%WHlm4Y<$u--q|I*;kgll1C!==I$B#(nbX&*;lXDKrF;K@gRk z0GMNohzMZB1duB*5u%bAAQBi3im@-RKVAGAydMNwuH|&4t%GHd(d{1%ij%Y1=i0mZ z-bX*k#)r=yKQIFWR)vJBppMXp5CN4`O~FXfz{t$NNFj+6?hZRXzkM)rVexR$UG)!+ zA3g1qyaSnj{NVf0*}HZ~gWedp6NOTOXe4H?1P?#A!X-uvqW%mcsO517ReRYV{& zV^^C)12i)u03;*>R73?cKs9Qc=5ltrSj*t{_l`dL32Yyl`okL^y?FAV815`DUVQcP z@`e^=giJF*BSAAZ1hXU>T?33}swQG!1V9D=r~oLUfb{z1@#Ba8IlA`k-H(4+l~b*S z>Nn5-_to7`e%jx=dGhqj2D<@DN)!?zB{EcfYXT*Si{_G~w>DJ449(0C5K+VoaDKIH zQ>^lF$UCM@Y1xxUf0I$G5kKEJbHhxV*|TR)j_!O{iY*C^0UW;fA)+%&K&*%fsFehmf#;aS zMH5jGU_=9;^cEZu4Bp1JX;zM0Z@6CtjtaJzXB4J~cQWqD=9!`FPv1poyPaH3b6=_v zki{V8NI)F{ngXCRG*MZvH+36D2mp}~%$D=BroL*Uy_#LM!0qYfDrl?b>Q$_lN7?VkgWag}?K>}r@n4@Yz%o)}INu*np1h(mRXAHezZ#d`%)Xd+Q z46WZEoo@qN)m<&}no@px^DZRK%o` zqBbigGV%_cs)RNU2GehT@Z$t!y}ZPf7QqRGrom{@jD$0C$e0rm8X~BPf`SREM6}u| zk2Zp$?(LB&XFzNQ4D1kNN^4a|1&FDcB^LxEFfcm6WK4(#kPKOjzz7kLK@9{%QZ#jq z<)V}^k{P+&cgJrhM5>U~K$0qGbVve>#LS|=#+H;72vp5L0T2NQ43Gekj97s^*y#UP WLyHVc3`uwZ0000e1 literal 0 HcmV?d00001 diff --git a/datasets/example_images/bufo_bufo_s_002202.png b/datasets/example_images/bufo_bufo_s_002202.png new file mode 100644 index 0000000000000000000000000000000000000000..1eef4599aac1c31598b41b28b81fceb2e7cb12ae GIT binary patch literal 2472 zcmV;Z30L-sP)C1ad1CS_fVGKxZLAcTl>1`%=& zKnS%K2swbMl@u!Yhx?`_MORSt*k`|0VgMS_p|x*yBFW?HCw;<)&F7|2w@?>h$5{O zaM)T4F|VcLJRy`)t+jKmD2hg-fe?B}^fJBWgbBgA&7y4lyM6I#uz#WfLx{X%gb}6Y9XuE zUO$f7S(Z(vZ?DD|gkV1k(lpiDe)IL$Kl)g<`X|8m2*TC?BP(mgE}c7oq=v*=fDm>L zIOiZj))`S%=jYES_m_*sqS;I+CG+WYx!C4;PM8xy_6GaCPFHFb2HyPk>Y37?{xtUd z*eF?KB8q~#F1A?#A+5?1VT_bgQcCXm&Vel3-rS5Y-u!EPeWA4ifRu$?Z6(~+pX8L$u(zSeEDX6KQ9ZRq;&ulWx3k!BtQU|ZF*0PvDR9~7-Kvuc4@k} zxf{oExJuFW+v~f#S+CpER;)MkPP^0Xb**ty%FQ|reJ)kC+pGWxYaxqLKw|tLl(JZF z*IFAZWwl!ogisI!gb=N@5cPJu-fWg{uiyCGa~9;)c7MOia0H;L>dN=y_a8la{rWsz zERr~qCXfmZ_Kz*4)8&#fD5IQXtBbZYv>KX#D%Ql43(aC8PMfbON^Tjk>Zf4V!&l%%TYt?LaK6vMG)B3T(^E>x`1<<%{B@(z zcssu8cDuvj0mc|%T-B8WdNNs#j!p^1wXV}i-XFwC2U;WVZ*Q{A8hQ*98Uz#p0F+X7 zU3-2&D0dFFTAd&WwY7C!UtM0#r&G@PM<0DO91fEtnJR<*EEz+0W}a<}U@eXZoPZ~i)d^Br=sxfcUOimJw>CJ1Be93jLx#W<)% zhN)X`m)K_yPtI&5syttZBF;Gop|w_4WYET`9#H{U3O z2g6ZkZ~%dFR#WB?=YX}A5Zr3DDzS~rJ<4YP`T5T_>&dHE7ro&zhNzTaG&o$WR~Dl< z8JVp-I2!f)LkqA4$SB1_|G^I*iy~XjCt;&$jj6RBVE`yX*;X8n;qf4 zB|*B{I(zf>;wAU-XHPz6q`JO*-Q3@cll`O7DS$)>iLHf3r;8=$dmn!K-!Gp28A0q+ zy-sIP>lR|976L+0i7Lx8Z9v@YoV}0#{`7fP=$}7%9AI~MKSM43@sEC7+&}YKDeJQL zun!Q~W?PP_&pkwtajurC(df1NN4xD^!y6z#9HXmzOSFYj*Gko!-TG>Lv&%)$h!AVu z++KaZ_vR;`{NTa+50aBO;BjO4fltpfrHzpQB7~i?(nBG_lo2PT6vFHup5DB^z(&ML z-!>Yu-jGUbYjL^}zx(|ko<4nE)fRI~Fs_PhGP`D+KY#WjY#r9s_WhGCC6*%qfD}?7 zNVQQys;aI%&tsJCc6kzco*(Ds7DtqjCSZ)rSL-(y*I$15)&KwFsWJ{hW@T-i0RVW7 zp~S8B@KLuj$cy4`lJ@r~@kkI5=rpp3Q4c}@A*7V%d2X#{yy0L}h^ePGzJbtqeEaQF zsj9q|^ZEK<*r&wy501b1>CZgQopX$Fhcu>$Bfpd)j{9-yjPRQ-`b55!-^84Mv>1Yr|ULljC z(F2SLW7HV4SS%T1#uz0PA(S$P5N3=Jf*f){JEg4BDrkf}3@TX{MU^CNX>cthCXxi) zfB4`G00%L%(%jxo2#GtrBc)UrhLqB(s-%+ESk5`66k`}S8d}M!uC=z>SgmywM#dNb mkW_j)ogpE;pkwsZSp9EO*3LyCt3KcW0000 zei|-TvZ1)5;K>oCm{g3bh}O1E^PHENLkyMKOal>-Q!xP4#+EX_KRhBLsIsg!ONrS`Mvt=Y1`CeDk0a*GcLkgdxv)=vO2(A2UzPQ-;hW+J+Fw zb7e3?HC(vGP;&JY1Np#J(P6h<5kcW1R(#OrYE?vnA|Xu6DWuf&&8g0-wo!#;$uTxo zv@UGHTyV|@uA&Aa>bfY=iWU+TE)3>bHJ7Eb_!L|Ss8}@*7hUF@^CGI~V4jXH0;5!s z&;&|o!BsT`1pyOiQuLn4Q2`$7LXJG=0wz9s1{UU8D*Dzrw;bkLYiQf+53ef-qdIR0 zz>F%kiqSh|DAk2p9g%>6sWPj9IY*!Y8L^;8Ll@9BVCGWm3vZ`e2$OP56Xl+yHC!g;Rg^M>;bTFLv<2X9+Jg5Ssrcp26 zc{E!}2DNF)nCo_ZmY2K<{!c%8@;BfA^Phh4#lQUf7v`Ohw%Z*rrL<0c+oMuB)fZ2n z>l3Bar+(FfF(Dyh2rh(BE2D#@OrnegscGA0@EYYPI7@!muVZ}g^4+WUoj8HrFI|y$ z8jm%Xd1*pA-5-DVKfm!lTwGjub7r99;lxUfkLb`uODQR~?W$j<8Ndvy$+B*`Sm7{b z@AMD9{rVsN{-0lb{mKz?ogP1WH?I7bFZKt~!^0jc^+QkOQj>_HuPk7g%VH{A3L*8e zcSc?f1q^YP*?A9(>B2YX>&MqOx3|Y1{?|{79KQS6w~u$ZY4qxPg`_bOg2aa2|KxGI zP51Y2Je#qp0d%X*6Pmfg!d0s=24q%2L_}EV_W7I3O}Dz(*0~%Xj@yp@;`^UEzMgjf zX;!KfaTro^4IJ)9jc$0S_oMeJY-XwwDAZEBp_8h)X4j$xB6d2LCcE+V{q@j%^BXt& z{hci3`f4~I=;LpFdU)vn?5{q#d^f&5o`jIR*KAhZ?r_|!*DT@qA^uz6W3 zV+7wp;9Saq>8SO7yl&bj@7{1(l7*Z1Zn{R!H|c!4zPVoi;IBU%VgKc`dp1P1!+u;Q zHin$JPn{>i5IqrgZKt(@ffuW{r#dz}KG__H1$y!4Z9CuXA^Pup^4-Ue&cQwW$wwb< z&$iEB{3Ld79}aPU1O}8+1Z>!Dq?CD@oC_WiOjSh8oU%k09gCYxr#YPaxL$1%3|yFJ zkImz&kG}WW5Bjwpr)Qsi`+J*B`{lELE0mw#aY;Q%ner67w4COUoL5imeDHA`Cj{u) zH5g9Q*rV?*yIiv0riF=fR$iL6eYAc5n~y%dT0as0dJ1W`K#6$I>8scI7r#GFmVzUX zLEk)xl+Oy*PUmeXQt>eO!|3{+3KZ4vh556k6t=-1or+12IMyW5*f*ZAqp z_{*2C^uGT5cW;0H3Qk#-)CWu<0@G&Ll)0Fg3rJ1dq|~UwG8F*wF^MDBq~YR%$hEKU zZqPLtqH`|F+xf-Q7e6_owwsS3HlP3U$Hz)%=kN8+s!PdKxyn3ct_%jdhut)e-b^dk z-mRKd+xNk>>Y}lUtMSRz2d9VU-AVh9(D*dn=KDI|l|S(92ai8F$^O^R{ugkyoQf7L zx|kCaC#;n#0wNJ0dKKZy%v!Yw)QipFlM87R=HY6+DIXWRE9(2WjfZ^FZsM@qef_ih z`f6OJW9C{(5$1dw4aPV##Hhms(1S$*UNl zTd#(hm5aOm<8J7q>h1o~?xgea^_%k#&c`zCPWN?M6sA#YUG(Ag&Ip^e5@70*fhwpm zAiAarh}eXddY`#^6GTNb2@dCJi5?BZvP}Q|^B>jI$Rbal{!FD*WEVXk1LJalJjv>! z={i4;vp8*2(#rEuL};4lVc0CEMUeGOy@>iIuw_N{&MAbKf1Fd-q~qI;+$5ljAS@8U zY9)tlw;>Pb?{u!Un21Oh(Ton-zVSrVbO}NYA@)soe|KU-sD@;< zW~RWiMJH|Bp7KNpMpJthyVjvMmI;i(7#LV=cYo~rZhh7t^6~uf`81Bg?49>qxN9N- za*?j{*Ehp%cZlc-O_QIjAd?Cy13DrEpTztL%z{HhZ&Iw#{&+e(98oQ`K8E0cLJiIZ zQnB5e-S}qYsh;)+WigWw{OMuTQo%~!dl5YzCsj0{@ie1>BS6tQNnXTLIPOo&R4=ct zz~KJn+Xj*_JLgo;pRG3QxL#AY=~n1giX%`E_tkpgMtf8ewz}7s!++(8|O6+xxp!=!7PbD*)S7HKuSGCWpU0teSGKQK_`n0i=$< znBF}M9~MdOBi9MD0{2@@$C-1Ms=BP1)@VFgts=2(jxK`R^S9r8d6lI6DjH&52x+ly zHd|K3(luFzev*t9D~5osOI3R<%<_}t^ZHin(eC!~@r7I>XsywRSmK6GsYJrSheTWi zE|8I;8s3sQqaX}JLsIl|R57YJN8UV^u~e<8BFZY&^15a13GJoB?rtWq5UH1zSUH`h z`=4)TaS|pelB-gCtv(q~q$-XnA3pe$#w_LMZ?6Y-ziu>+4v&o*`-{K)llD&Y=-tPo z|GuPtwg{Gao?P7AY^--ASvfo$9-j<%zrTHOuz!0!zj%GMc=$)z1TTN~0&Q+JmlJ2u zY-0re=a)yTv2WIhp&LA7QRJtxTCA>Rw58V4x%6moDgzIL?RIxVvdoh5}9RL9H#EVN#iWe zv0TyxkRcB8C{+o`8M~d%3Ry#SyMJ73h*mpVd!sQQ`lXd5 z^1Lcn$21Z-#-bGZ5fDUOt1_R-C`S_DS)Q{zrjbx^ghY%HMGUaq^OqRIJma=rN%Iuz zYHiTO(#f2n8t891DUV07@y( z3n&1?(#zG7hy;nK1eax`N~-Fz)JqZ~W3z6;)opTfG;Qj25hO-cW{`}RUYx~I7|-W( zNrBdyX~8qOmj@`% z;Kj-0@BaErjCrrQ&OtF74P!s-H9JTWr}x9B`!=ss0yo8g2td!3tN@%b1`*N?J*9CR z#ysb)=Vv)HBxKpu`k?k^`jI91aQJ|7o**oa?ic;O$tblORS7d6iiDTlS>%Mt@OGGk zl=Cd5Ip+Wo3^?FLAz_l`Gzvmc0M10}2fC`(O1ch7G+ULll3#?2)dK$T)%SLvb!=Nz z)B?$A#gaC6T45ah;-9|whrj*R-OZ|8G63LJOD7Uc(=^L+g%DW~g@SvY=eu6$htAU1 zkhI-ynX;O>vHLJzt!7BZRjYJ z02m5r>)GJt&jM$ebskj$>X=G3^c_)zIR^?tQ4|p%2%tz(T4cOaZ>qS!5~6^Hd76m8 ziLy@J9?$RMFh_P*Q)Kn4|9BHc@&1oDk%k12%aU~PS>JPf2}>D;DGif|Vv&egPV%In zu`D0}0Zl;?6dTRDBqihdoQ70@MU)1q01n@tLkOC-jdu5Ym+ze6LoxjOlgT81`RS8- z12d2)GAegXED>kndv}4zG0gytV?{x-t|JOF%A!o*toZuP2Zn`azmZd#C2?)7);EmT z^S8e5I(`U4H!m2%SmIQA|JC&`{^ozbdp)O9MOYAF3Ei7zz6>)jO@e$ma~H!|MJw0L znu1G-UnDWFHTzRHs&~5wKX|^g|3uMLP1a&Jpfq0J*=Tgz!lUOl4QT9Bkre{uUYhFV z^3(0^ddFRX#O71-I$+^VkG731Xe zS`e^m7_%s%EKcGg2pFUhhS8wc!iDI&QOPn|&sw0O{NLM;NGx*56q|?|t&vEJH$Y#cbsTn2hHlR7>UR(ZyBopkMEF&rj~>ry~uC&-V5Sq>ut4 z$q~pQfkaU@wl{Y=fyGeD>A%^ko+YjT((j0UU#$>If{`7Ya ze)Hw~>+e=^-Y)Bf*;dHffN0SflFMIta6bj_|(fQSNYrB>u$znFC*otkfy?OiH{l^EPK&DlC z^km~<9!FXFXXH6$I5alBBU&u54^J zaLa-K@+>32{FkF|zqu(W#gNlDyuBJn0myTn7f#vaSk@=YbUcdO3DxzoZYn|GP9Bz5 zAFgeqTBM91K#+KQeLot_wzjwb;E#S^GgJs6V>z@N){kEO*jc2(GV8Yon}c?>BE9r|X)Ut-N6nxnKP1*GJ!+tE!|ZYLVeit9^fd?~YdjEg<8aR^{+$1G znuotV{2ItyH!YEi(DTEDdIF4OiCEROM%{)$7;IQTa_g;1iD-4R+FRFt`g`B+_3hUT}VLSyE7MP>B-f^5Q}edH?a?G0Jbzf{}S&I`U+(DB$yC|I~FS(dT@NJQR$zUNW5 z)9RismJ>ISfU&<-UpaA-2)Ab=ge0kCsk(vPMZUSw^;vj(Hx8zb(X4_}?%o9=Pp_^= zv-1y4%~FiY<<)4d*|xh4MHZb&L{~ZIg)D3T^`EaxC8#$V=NGf`6`qZvBEqBl;r|0U Wk<~+$+gSAg0000 literal 0 HcmV?d00001 diff --git a/datasets/example_images/camion_s_000599.png b/datasets/example_images/camion_s_000599.png new file mode 100644 index 0000000000000000000000000000000000000000..86b77d960dee42f6cddaa1d069e4c52f23c0fad8 GIT binary patch literal 2372 zcmV-K3A^@*P)0D%Dmi6g~H zq>iLma$D?yO*V((P*tZ+)j9j@z1Q+ZEAR{a-@n$y_haDj@oz6b{rt3@mAcc}Io#df zUL9ndCwdz6^Q-#h^B14|>R-S3@BgSLcSrAk7z3QYI(hHl(dp~g^}I!8m{rIU%qB6y5xK^lWtZ^=F@kX7b$+jx+#-)hFL4FTecdzk^6-lf-1*Ebk!X-Q>~xhx;3AKRkFedvj+l>Pfq};%bDh zO(;>MjTNAUn_I&Oq7fi5%4qKz=j!V0>^w0zEoXoCvw!%_$G_Rkvd0HURXLNU)q<*R zv2H@^&hBpZ_V)La-tP9$vk#fRu=0hwKX>ECHzK%%>2kjd2pRyg%&aWuHM(x6NV0-W zYIE-O`@_MY+sM_Y|J$@~<`(m&4l#hJD3GEI#Q`fi~~BdS8c zck`OyWV&jCyN>06{C|lx_G>tt}pkP10W+XAZQk> z7xipbf`+I~-EvtrJ_Kv>!TKgD(=2KWN?%^Hux4z46+ud%qC2^GvlF7{XbjL|V(Poz za?we$h6n)!001-yP>k#w1yblt>Uev0Ll82b9B=Gx7QJxtyc(DLh+e_Ow2Ixf&?dre zQiPQU`hI%(_SI6gw4Npq=S85dN#v`|9^!ef8|~EKl3g-S+Pgun#o5xc6;U2i1*r(S3Ye^yh@O@e;YISd{~BRiQ=Vj)dQkX_&q{&+!b-DGrm_UiX< zudcrTqh@2c8#rcp5g3r#?akpw6O~o9QZgYvL)QcP2o(cQ3X3NvtB4B*TQ|_8Mu8** z5JCRGKY(UIqtIIA+IfKH`u;q$!4^4ME8bl#=k4?^8NGz4TxvfsF^hF`RFOq-p9eT5vFW-^;PaZAPv25r=V zXA=Xl2iKaq!T@Y30uU1mI|B^?BN(MvSVRCo6N0ZB@{(#BL(r+UR$F6~HPJf;j@+^Y z(Wv6wdQ6yOkQhO+CQG7#+r}f+-fOlhig(^eU=|TT01^R-EFokzg&0L5V3ZhuBTI}B zRS3|uQP{K%hFVFRScn)dCS$hNC|xjW4v9fTYgrj(l}!PNqkw=01T<>iHpbe-7|^7R zB0{Yd5k_DTh(g!%W;N}^RJ+h94iTvgZWF+eF*XGsd}^*Y9(~iwR{z8Pdxs3s7$YDW z5XqX(%CaTZA_*aYAiz5Y5H=uVz#FAISy$zKa*b|lZel0zSW~EiciQBtdhljH1(D0ktrPrU{F=37`p`4~!W6qVfwCEJ*!!JcCYm#mBxRvjA_OJdDf!-(H;l z`Fne-A0BTHHks0HFUu3Uy}lk#CkxO0m8Ehv&58^OwJ`U4-PvT^R!!0Eh%gfp0ReL0 z#q8nA}{yJ;yAM0_AKvEhnSVXfhpt=Q~gHv^YLESXt@ki3JdiNO{ps(j+p0 z_DbvJ{wj)U+q!M*5L`Oku&AvyPN_Nb_aAI3j=jO^X?geh_F6gx5lqJ8JjN5)e|_?aia<6A9D=W3 zo;B{o5CM7Wvfp8RNBmzJUf_2^2O=#LyDbB2|W|P~y@o>1ArpCEiL}H905}6Zg zr{!#1mP(uP-Q?M4&#vB{?e1*vJvr_ag+dY*06;{ojZ*rZuBDnclj-92ZZwUVlYu9R|(vq=H~!V+VQBAdg97jMr;*H_Iv-`jeyv;DBh3#~K>GQ_})f=VeO z0stMEuW#dT(dz zqmRCO{P-}>QcVg)gc(5)fJI`A%=~VXB}R#Y#^%fY_2`_d7lG&VMNL3NWDMEFuCA^= z91fEty&PTlI-T|PwI|1io0}VXk`f|w1Q7xd7G?lIt+Y}K05l?MO(@>C&e*iSGMLXR zKp{eokvX){SJUZiUXfNC>zl7$eADlBpFTNyu(6H+N-1Uz!M{@^B9b;Jidg^w05lP9 z4Enn}gAmv?uBzs6G_5T1>;C$x!Lw=M10000)wlLG-7~hw&SEQx9K;fWSb~B;yz#~ZKZ<|B?})@hB*GI9fIyIq5Zj6E ziO18^)6?76U)_6n=ppq`QmL-$+~u58^1I*vA*jX}83;`wRa!`;OrEt0Ol3x4DjKiZGkI}|L{_W zCPlBJ$W`8^{l4#ZMOp1*ogK6c5fRBwnvrxf&kiHpJ@=6$03%a~qH3PSUDxyG>S?>5 zhrysdd2^kO?~Hn_29cb)%#2`gaClqoTM9_H0J$2NQ!h96^PIs zq`AR8x(&-z?^nCW|4BO1lR|o}@;aV9NoSuU0w>%F0&uz` zL#^BANOy-rRaBI&60?&OMMxTLbGdu=MOl?7!uG|}aQ1vs1}p1OHXUg>i`xM}0J@VD z5z>LfWIdWYNd=sU3}Oz2iAY&-A0mCyUl&jQwstErr0YU4*{6XFr@aGkm^*NotwKlu z1fULPiy=e-AmU_pPjE4%_&(@$DA!rCgjthjb=U_rrk7pbxNOV8wj6XukaW7cyU_>{ zkUaFOCSMRNc z)816jXUj0T0T2-tcNY->;7)fliy{Kj%~b?+xC6m7I6~8wMJMg>jux>V3~!z0O|13g z)}pB3LhIct?zu3YAxD{w$tN#4?>K)?KbD^ z?q){1xg8#&JIv`GAmRruk)*ocjOh^P?m1`6eG(lF$7DLW^XeyGKK$+*vXtHz?E`o#o z;YI-v;$Q|6^pG(D6+l%*#6hQnR7#}PNe1K5ci(;Y=y;ZQ>nd4U%KbOq3dLCV0h!{W z1~-E{>_Am!v);_ya^G3bG>`5SVFC7a4{f#sC zIf`o}2{#9Vs;K6^CjlYs;7fG#*ymmV5i$3m3c#|N=R%`cR`F3?{rdf%smeEJXJ0)# zi~FjAW{F#>5Fp(hfRf@obWDOO0dp~GP!$m~18zb^rATpDRfS5R*D)06+az~pqi*@~ ztABiCFFsG(mqot{;`#sFvpzGslIlFjSO^t~?u;P-FgHN5Sb&6pB4}Hdld7)DvWzln z%BBcS#K@N~=ey$1*ng#(pXO~0sJRZ0Y0740R zIF@x94H}YrGicPP*{tjB?!DJDb^hN+N3qb*T`U*i#?v4)!#J4j%aNqQrR*J0IS3$< zga`lu0)>D&lWGdstXCSXjK$?@w^^@7gVDP`dGB($MSe7DL)q$aRqgt1pZ8d=cH4!v z$JKbUjw92d>W$VS$#12ppr{ChIj@&jiG8TT{A#saUvAc`$*>zX?dANUu40IkkQ0z| z-&Jj=7G_hMJjhk!ys?T1!bmX-0+28W5Cm>{zMQXpcQiVx+aea_YO~v}*0ipv z(rUY=YQN8&acjQXEL%z)+3(oxMyZ@`Ef5fuC{$4dfdW8Kj}f^k%eunq+OE6pw73<5 zhN>FXRmk12u6D~H85+_dkFH~qVl)^uf!idT>lX>bZg8JM^Q^ovBLV^u4Gi4Bc079c z;I*P^&KB#9rK^p1yL@|g{PtT9fB56~%E1_#>bj)_0<`{>lktPwN3TpLcdB~a@9g{u z`l~2ZCKZ5AcS4jw9DN=0o2S#)Pj8pQlV>aa_tVwmi(O`%PNu1-??3$B(dnz9X%S;* z29>Hxw`Sv1Qd@1uOQ)iu&2v%fCH2Tch6P zu98r+Nkm>gdNDXV|M?HzdF5mhwEN)0k9=QB-HK4*QD99Oi|BU;&2N7Cqj&C4Yxsx% zdi3vSm)R1o7lXjx{Omh-hwb8O{mGNFr`P-5)Hgn;(z-7PO?~I#_kZ#JPttU}DneO} z*XfwG4{x7p-BQcgwDq{DiNTwKBjajkzS@44PL`%5jNs$*I!3ksnyQ(dW9 zSHz$wJRv9KY1-!m#OZ$MICUJO51UxnMJ7?X-^+Tl1ovpQW~PQ&xT#+K0;1MhMo!gh zP7RZ#0D=7c>o2V}VLr#T)waS4A{IV#0)vI2&H$)od%53rUYA-(2xCIL^&>OGf{CaW zowN6IT*J_Q(UA#LRWJh03FxV92;gp|m8*{k-?mkCW=5gn?)&}_By?Cy!;D*5(xU+h z^P%Qb)!pW`2^q<6$KIzgkvx@#0i=!$KuD0*djSM#K5c0gk=$Wgzq$$(PNIk+EX=ps zD#~yh^P(zkDXnZe$2reiZJ=0K#9hOpmhyPKYxwiH-?#hHmPX|`cXCWfLaZXK7IcgV z;zWvcLjeoDtaHOa->(ZIAz?UV5VMq4WzHGq!ZhY}*<4Fo+qTwqx!pg$yo}M$>qN#R zRF`WJBAMG(*Q3ujy#m~N_e=#gf}jZN1fdzbO-X>-s;Tz#l(q~T z5o-O~mJKEL>yZSQlmJC!PW}7e{;!ofa>%W8*FB9G_o?K-T*2ML5!XG0Xoy5E_wL!mJV{P3q&HfBk*^>E-mEn9b4LnPd%QLIS#ik^qw3r(=qS z4IO|Xw2V0w9NmPm$9(KZcMYcD*BE1|MTEY5{o{FjgR=d)k5YI6(hMy1Wo@8V4Xc1= z409&Rsq)tMNKZ_(%hLms055mQR0tAjYrFOblr%|;`Qd4O{^9Ab@82=}<>iSgb+==x zqW^%60)*K-0U!g&K_1ot5)sfZb4S8es%DINZLKBbm=nVbA!ee%nBT|s`-gRTdcKp# z_5F-vUz-@{Z(!~|BOP*>syZP_N0)>`_}+JQudP7>15DSdB*YOBkbNpR+_ADwefu5y zQ*n>|eEjd<&#Q0GPvy^_A5Y{E<^c#Ap&1Dzk9if3Pft%8<_OoEfbo34rRTZtwG_oP zro9_z-j-F5@%!Jto!9qqTULPM?Y-3ck8eLa8vf-&64X*4(maM+B#Ns0G+So^CLkq* zyUqb|Wu>vim}bF&#lipCPdQbFFMPi)aDn>#>f?Gdy)iGc7zZ=8irz=xk1?H^)Z6W5 z8JCS?bPpHd2xlVCIBjYesH27sdpmY{zJLDlM`oFb2ZM4wM{X~-pMLuE;l3r!Svh zUly8YF_ln?SgCwy_pK~a5z=)+KoKD%1a+;2iApWY#ED3p`Lb@|hu78%_`Pi(?)-6S zU+dQRzyE7qk&SK}&HyE53`opAu096|#DZ9WYT!+ZAY?|Loj6*PA{BrgQs*dux1~A6 z^B+FEe*J-;X#Ldk+5hx!UnlC%|MSbsa$9Rz>WU!e)dLI#L^2x^QLzYS2>?qUI`0h= zz|1K5N(Ti3f$+4z(%S1XVj4!E+&{|KzkI&FUtt>|L6SP8`y%uVBCQA~aJZp^ln9UT z#8eARM9)P8<|Hnjc#d9}H*B|;wGt1G)9natAJ=WEmNNnX8Pc1`$EEK5T}Jr41~MsS zOl6|NgvpjHiB)nTTG~QV-{)WlA;h#&nTcvC#Y+#?xxaqC4Lh*skAL{`?b|<&-`>{c zxm1w1tIs~R=cnPL_r6GZUbpbrb-&93m=UwKH6UV~FCw?K!kFo9=~suR?X#qx=s9~S zjG=ZsK5ff78fbsIEmBHCAdy8TRPn@NnF$2wo*G(7O?bG8z*ZL_a>*csO!7ptLjL-^ zkwte0kMr&OmyaK1TYr1JA8(zO#_PhE>aLLhBuAg7S(DiqED_)`-ACuf%d(i^aPErN z0HUTgA*|L0TwP(Uiy-(MBm!;wb)@|I-q91Apuy(E0E881!sW=oBEmc^CPAi^aFlwE zBLOYY!weOCIA&OZs7ocK`M&Sly;4cd6idFw6(gPjKizL;xU@S0!8CLk;WKQSsoV76 z7%|Uj`orsMKOX(u11TZ(9N>AQ29e>;f`oQF-uGjdh6uG5Xq$TxCSuPBh@`wx#c@8y zS(z5c;FeC~+B0zMS0SL4r<0nfo@9}&N^%93Ix@kW-27j^k(-BL>%-N$5xo*32XFi;`V zm}n{6_viEV()rh4_Nt9>n-i#sK{E(ujRT+)f#SRw+DwRLeE)X3op0JzOw&Hk$D<}2 z&;lwMp?Vc*W!tv({r#dtKi+@f<2}}@fg#4u?8KX4MFeuB+L?hy(8ZSP_58>3^8BU( zrUHQdQV#$rXA0=FIkh1}1$+p7>5<~@esY+YvZdgr?4u_D=>`Uj$RfRu9y~#z{yOUS zbvxAvp+X%xGG|-$`t6-3+I^1TtZyQsCJg9;&*6|kxmI-)9Yr)&PHy06qp6zoA>B-< zUPnFE_P&kVN;W)6Oc)}9j_AA>US6(lfCEH_-Un5?oEI5Lj(Zm-Vsa>>tHKysYw6@8 z-6mJ^Jg-;X&eq?Nbsi2VXbM48@9sW5-tA(TmVrOU#WpKl)S&mR)H-B{pyA}udGEQk zE+P)$Qd?_6L8pEDc--&)@|S;` z}}&sLPB zP9f!-8R^H%kNi*f>GQ*vAHSVC{_Wf@xrKdR+IqPxjD5~Kanfv^A_rm?gV9C`5g_!g z>$oOv&_Kl;KVEJ__J05OlgJPRP63qG z-VkJpUTF|GPB{ViUV9xDyzjT{Ao_H8x`R&>Kg;h|0fR8bxSJ>AP}b$!|9$z7|NXDu zUzgqC@#&bNw%5zjhE92>-PT)4nb2!#OLD+NhC}GXdLL^owU^qbx+L!Mx3ZqT`gxi& zg-kGeMjs9IkLB`hS=H;q>}EuEyG$;6aLB~8pZ3GnGA<#cI#w15Av@E4Z-ugJAgU@L@+eFT$k=(H^<4tUC0CD4J$FKNX()2R@T;))>~ImPT?VY zpNxrb?=Pn)o{RQ^P{`0gDkb{No+fZW=%)@PIrTxfqPK=}_m$tdGu_6~(F4);33`TDI$}$CiQ_6m_k-`t+yI z4}W?bT+ELf)~j`&^FB|#AFoSqFb+?T`Pa`2i+{T$lOO#er{C|>J*vdZ<_~&}JzL>T{64K_D$WLZBJYqn zt#va67riHQqjvA;m~(b+&J%`^J|2Jm=YND4dlPSt5jW^xZ?9iZ->xn*t;s`sg?7e7{>^tgX5MY}7?u@^s8E>pT9kCTMdbeDjf1iETg+XI;?l$ z5aNE)l-E-|-FnOk8Hd8wZ{IS%XyMT~#@^2Ph~ z>8D4E;Y@CrhW+8w&wmH&%~Z;=?hh&G(xl)}CNP4fl$+K;fM7BD5lWNZ8=#pZENhu} zyTri&!IYn#p0;h1)+@j{hYyFl>ETo6{2{_H-EsOUrq*E4?y#Q<1C93n`g$et(_DKU zD#KH4)sPrbRXI85)w?jdl4G8Z`!r9WdiZdU-sh0+KYj$~Jp0y4t@X0rybg7Y3c;8W zXms5d{Q_JbF7X zS0jjNO2U0el}4U&N)aQNvlPN1kyi@RRQl*B-FvTUe7c_Bmn)hvIwmH^=O52wjQ!zY z2uw5_K#Xb1m0gUUR9gTaf<#o*>3;UvuS^&SNC3Mtyq<3zg~&SxP6#`wINYQ!y>8mK z(Tnu1Ld0rtE@OoJ;n!c>E{Lm!8_h1?WuabSyY!2jU7lQPHvkpE^(F`jz?o_9l|%4T z&UGtA)Df53L?p!s4v>sIbIfb4m*1Z|c5!NG8ll-xH^{qN*~+at&q%Je1;C9qy!U8A zE)*F|W|4U~LV(^Ifb-t>(T4G@Y(zB0eUT38nEYkEy<7E@lP)1fGYfMnTa!|!IfynD zbqIkyGqpCrIj*(8yquiF5Ir-sUVR|Xp|&F0-N4bi=gj0v-4wf`3^M>AA`$6*sA{Q& z7!bAh;eGPnk0GWUgXh`@fDz$wJ~)EWw$W@Y<~=JYu{V%m1rZec&;~d_IL3&G0HCU> ql2YfwI(?#I)0 znzqz#uXFDrF&WFOBB;n>5RTo!P&6(LSSQ>2ph6& zQ%)$wFmz))y+49cSoPu4cP1ByKGn4;Dky_08**$db8T6Q#QnYmj{qenHq6Ym2{XqK zq1EVpa86Z?AU#q%aSg) zGurn4r*~wj8zVJ5FR!&_01P2;ZYc!D0G$gYtve3yyMZpQHZ?JhKfimrOvQj{T%^#@ zjl1!9x?g46TFx~~mYNes7J>V_U6Wc$^Mgyv#9#q{W9M>7=P8Ycv6W(4i@TjmNdXO! zH3%rj)2Lg!8~bb79=<-lJjuT2j&j&rQ{Clxee?DW5Wjr-)C|U9&=$+$J;uJ@vq42b zANnfv?cs&HaVz@!m+z$!EP1`$v=-z2FnB0gap~W6)5FV$<8j#AyKx^H=lRHS_v_Qs z|6a~I4yWTTbUs*4*9ukncDd~DPS(W=5QowChs#nfQ;uN|txPZ1`ybzaefTO$dru$V z*pKgjS-*{YD?h&jZ$7(c{$gVg5f=dE$6y!&q|4HyP?gD zg51jNvR(yz2!e7g=xe{OPqmuRAgC*4kywQ(CUuHg6)8hJH9-F6YbT&FL*}uq@Mm{MTRq`rm&k5tcQhxLaz! zJzqY5zif#p1TjR;NmFfYH_*qA@Ak)GuIYK5zAx$FJYAk&%Q}fGE3Bbs{`~uwn#wqi zq~__j$AoMAfj_OE-F?43w|UMc05e}#d3>7oyLvozAiAz;z2%ygaoo*<57+f2({nS` z#$cregWktJ%34^P?hd21_RH^Id~+J$<@G!a`#=2o<3FE%J1@^Y(a?Jmnb-XMnBO9F zoq3&>>+7b3eTYwU`TOtlKOV|?Yt|_5v_IWhZ|hX4L-x))?`o-O$$;oA)5^ncUrWQm zm;;8Ee;P;F*0rYB3BU3@wD6jBK`UA7fCpsvRs$L!I?5D>8vGjT2% zM1u__H!GH2>E&`+)}=+=eTeUF`pdzsYrCpU#1BO;SyI<;M$_|co!7@TB{Iz#5D8;{ z{P^RtBN8rK`S$IBMWm^Ap;uE9ZqsdrMWxnTy6k>A{P5xMVI5As&&gsgbH(SJsem9a zEmitDXF72gjWf=a2WF3YlQiF1|KaJgPVh%6a{-r4i(s{oWHI5_FUtq~}; zIqTZiW)_BoNzz7_E&6`v><$&6B+8ATO?r8G7SW*}?%y1l8Y5z(>bp*{FhN;o9`xhK z53R@UFg{(bC2#khGt_+yQ-UwIc|+GnKkGUvbl&A!*gHm=xAf_sU$5IV9QKH40Qm9q z<>r3J)SXA z6HU}aK>IK&wOkj6*6HboJ$Bh$Fa%>=18Cd2a0qc*)@_={(=NEXqU3G2-t=*~9(Np# z+OD6UCutT3_gFUS2STrgR7o9d3f714_E3$wJ&rEKJula_lv~%0ap)Kg{2>t1<_qdgmAd3NDv!)CMyMe~j zfsHc}qP66Wuh=%-=4tib5A1VG!lvZbl9J(K8y9YpCS__rhVue>g&;r>q_NYa zNDwfvi#W0)*%qbHq&XaFX2{{(&SmfY-PTGW_yzvOzxv@jKlnJtEPy%Zm|`i+EQlxq zIVE5bW&i-I5ddOFB-ho(8ROKXZ8Xss#>^#`ayjqEK2y$yl&#%t(zeFx6#Ed1fI=3H zB)}yX5nyI6g^-k0zVVYuTMA!Z&N;F5X13mTwQFl{mCD&8l$^^E;j6R=2}%jbR2j9m z-sx%W6qb-Umka>#$1VUM0I&#RGi~;EH;ZJj($B}LGF@e-PF0GL$XhasfHiGS)5vF` zG?m}nSX*z}iBh_@MxkEzVaSjf1PUtF#xn>YYo)8kUf)^U*{E1jW!8qZsL>h%$ceFN z4G0oa#v+|IcXu|n>#Fv8ZEfnv7-7wYE2|V5$$1q)J707bq{^A;%ynJWwQhadc!SKg z-L%Y*#~fB82r5M2#0)7GOyU);dA;T*+EDE+XGx)Gt(DVcmB}F^0UOiD(K)@f-p)GT zPPEZPkWz>MKtxuPucRVErku(!G8U;(YpWHAA|Qp%SVIa7C`RqQ#as|@QrX%mZHUSE zR!t^e8*4~AT@h)mlvYG!2omIA3Hr?}Fv>?8Tg*3$>y`Ti#fmdCw=L(NwNDrC+@ zK#JB?$FeHpfJE~V)^1$;{MVoU_b+Y-PzrFR1KWkeb`y~&#@s2Og#(wNmF&EhE z?R3*kQyU3ODZKyQ!+-tl&o-N3qOhc-P$>mkp$1c_T4$hGTqbvRu{e1A#EQaxY^=td zBuGtmW72iCHi6Ver~L$TXuG!C*+qkrqeh0CyLWf@D4mw&>gltmUp;$@yaXW~vLanY zYcH3PNT=nnv$<)VaoPfe`N_fRWPiGM(;3^=O}Dn48A5_p$*YANUA|6al_aBx6nVel zn_V^GyVHAU{_Ena&(b36F>AzjndlGCejiScI0jWDrCv&@tF~zO=*`(*eD{aXp0~M3 z>vY$e+WC^{c)@4AsMRs_=MNsdfAIXp(b3_JJ9j_$=6+S#peJm-sh2vl(qQ?dPgnC;nqpe7M*OQE|D@U7|GzJP_vxd5(^Vx9RmoC{03(3J<<-ll zo7-=F@QuGT8@ET?T-%v$-e|pY=qdZ%x?H{d@_**E9^>Fr-01d?4o}VwpEz3H-sgw= zwZ3z0yW6PGU%<_Kw_h(;k($gIb0*Hhk>{)H_x|+y=GN)KZ}`pGQ14&jt;c;o6kD{h zuK&@4`osIz6k^+MjA3>5>g?bD@uxXl+`6+lUrt_L82#|!Jy#i#`)}QU=O6y%pQEs# zv9Vw*l2hY0zW<}Y2~<9R^dH0N>nfKUiC-OD#bu63AD_NBncp)z?Q%I{hWX|gYsM?0uvWOA@Cn7>*3H9NZzY43>{q^^~^BM$z zLAcCXYx=YC`Qs12{k`2Aw?Ybw`TY9c?yOroJs+~GF8XsCmoTUN`k;USi9*rZYU`YJ zzV;=DqZfxLTuL#vzP*2UH5OX+{bISey1G1i@#N~{xrKP`_DwULCelOUgC}1=zsj5> zj6*+Pj>}cJ9LI||UgWqc$YMo#vW@Xg-E?cpSV641?wvn*xW2o0HDBt(lcUAr^4009 zU;pAildxUic=Yq1kIO!;7IEx>xtq+czx(#HPafrA{xt*u6rzYgNhL~zg1|(i+iv#f zAANT=U0d5)U*Fm3CbPzQ{PExYeF$TWaadgCu*xi@4oV+dg!)38`c4m=LV8sofm z-ubGjtGceLrm32yZtA*ed{r51HCaU(2><~F5J0b0Kuv9Qhp?4$=E8+b5&4=dfM~T= z!T_R;Qlykt&X|P-g^O-#Ct|hI zgalYAQo^|(dL#|X^-0Ru_pAQw{49*atZl2>l~Ssz+S}P8q*vz`B2d@$WKs=d?uUNS z567hMlcVXWY@r$#ON+ zP3^7HfPD%gRZi*j>9Uif$DPidi*0DC1tfFJ=H0NaAw z-D-(!HoM6pi#5Ed-jFZny!S?E!Ir*7Y$FcheE1(f{(ds`o_zX%3BUN_)t$ljNAl$D z{+0Ff(-&VC`(iSh^m`|1mPJvrtJ2MSJ&Z>WZar9KZ$#46+rY8jWHvh)A0H1-CexGK z$9E>Zi9m;e5Kz`lEB4{;bTW;*$AfBDEkiesFjAd6@ss`+|NGxrT{&4Qt^4}rWoAm{ z^AA4xmnZi=_}MRha(ze#y$A(t@};TmVVg7H@pw|V1&~8Tu5=|aOXX{b3mts$@uwH- z{Os9gyS{?;`I9IA7=rRkRWTHe)|lgYW#k= zUl=Os|DXRB{8F)Ja{tAqlG5+^AqP>@=+)|Jwv(5a^NO~)KIq!Y^lTm2^!NYupZ4g* zp>wRm$*B6~iwZT|DT;6T;wNXTu*tB+~_r26Y`{gW7He%ugK&= z8IkS^pg?F_wMGMnDzt$6U2Taa?8!Aw-)_wF<(1piO;%P?#O|ZxPh`jZ<(og}=f(Ni zI^)C3)3uXE$V6oO1NUnC$F!Jxx^GepsLO%P^4ft;HwxZ6dH8zuHlvhupf@!W(s^aE30)@rrp8I(Qq7k${1=} zBY+OOgEj^M(xu}?>JYX@>T%TT$j~{o=tC?0qZ_vZ-=qTS5^8TH>12+(k)1ZF&?PrxN zWF7j;!^(5~i}P_mBEsS^4ul*R|1FJIKZuBFi9%0RR{U3L!`!I*3S!fWSn?Xw!O4-Rw5I$;l{s zAU=NoSQdwUThwh{BG705_lL=#pLlMdl&*Jd9Kn`MYqcRFM!|ppK!E5ti~;}w00jaF zqgxcz3KfPC?9F9sD6eCX(i{rZ6u7QsquVy!r>?_}tsHJDBP_LS4Kpw^u_k5!08|Xf zKnx5FD1-nZ5EzM|t;txWu*&9T@k}|gtxIHd$QS{>=QT}LSX~!I&r_B-w~PP?j6ldF zm;rzt0Aw_lm=S;p(UHtRWQ7oI-B_)qQZ>NldMmAMxY0n;b)+R7M^QtdF-W|~&$73# zFB;P_8fFG!W=2E=U}ho$W)K1am@PBYI|qmmm9@5RYT*cJwP{-bvdRUw6HM>k(w3XE zt2bv?jBwPCuk^0gh=>FL07N9LK|p3g00ck=vcNAqBgZB z!eQSNPE+P_5NQlf$(@1n= z(CM_27-+lNiyBC|(n~)2@Sz{Y%k8drcEx}E{PO&g1QF5SiUx>?@a`wf00_VU3>Me| z0U|pBUC$9|L#?Cky!YPm(QQwLS>1MfBSP`=deOgqYkccA3F`GD&y(VB84mjWe)0|g z|Gx0M$%q7Gn2aMai91qwQPt}8V&Qq6dygL9fBbMVnLYpZ`Fwfx^`HJ!qk-S?eYiAbSsP8vfC%3yivkb;1OtRYkVJ`;Dz=D0w=r6*S zaTK1uiak`PZ{92yS61N3$&KxDXRQT6nl-WXd?h59)>f5f`pz#D0sy2^o-b#!aW{%& z-0yZrkppn?=6vn0uDbKQECz$#a5S6GuV1g0^Yunb*AX%dLu<8klmwAyMcXvQ6eocn zxTa-dmQpHT0fIy^oD8S48;mT6lR!zC9Wvq94L13%y8i51f^vQT;_@6w&xYf=ZWoJ1 z6#3&}QXKZgyf1TssFZ6hd7hd~x@}Vvv-Fi8`Untrk|HZy-w_{ve9w_WlTC(^@MKYB z#UZQ9B2V`c(04r~M$3V6g`oa0nM?;wRd%CfJehQRi6fL!Zm-*kl8$nP%cAEcN!Tm0 zTyPTzmLJ~#(fswVO~2`Odo>>hGuJVmiz++h2qI65s@l9Wy+0UEXVcA^dPzK;9it~# zi&fx7Nx#RY@>Cdvu|#kj1?(ITqtFWhKo+GQjt0k_X;Ye((J(E6PLNw5`!(Ecil6$Mph<20NWtN<}14pd3x6KOBq* z=r_Onr6>Hz^Y$%D<@LHruag8pP}EhW?IEv-w9ySJUwLAEy-If#IRd53vz&=U2=$>T z4zkx9w5>)&CEYA5nQA) zeY@PQaTF=nYxf6h&=_Hj9SoFkJOOT}(+Ps$@q>qP7@eJ6$kSJ^lv3C0)zNfv=jcuY zTvz2G-5s*j55nPiR+Xi|)BI4B+7zYDt2Rq($?G^8YAedB)OCeynkMS@qBu&zz;_%8 e;8ayBg!5Nl`qR-_FZ!wg0000$!L;mwW{_FDW>g8}a9=0il>o2Z1mvaKK5OuR^26wDyP0&DMn)>1IKfX!P z08p2b2a{t3P9Q}9;29~3ObxbPv>Mbj95RM_vw8C;~D{Ipv4rQ7bwqb8)$* z8sEQvSLQWM(A2F@+UQWTs?4P});h-p_Q#0{5txykpu+3p5s

_`$HNtE=b}=d3Pf zU0vm66I?_>gZGoMC*7J0a&!Ae3UJyo=2C!MNQqfgjzDU=g7Q??RfL$Bqf}A^Ntg-= z6Sc~+JTKIs5NR%HTi4q|f9O3x>Z+DY+0`v!5fePzeyq!iB)~wTODH@+N0Aatlm$vM ziiI``X)XctkOlB8VwYDeqKV*BiZKEu5xn(I=aNS~$+D7C0*0=+IP71hDSdhU<@xE! z{oUg@_I2C79(tfDSljoe%;r_y_LDX-E^I$9{%ASY29A|)p3f|PYL_#_1+SynE@ za1jtC&x*X@LWBt8q+?7WB<{^@<&*|b@~m2~SCo>olOr+)i66#(cN~Vv5<;%8uVtRO7&$|&EL+tlcK?!sL&{et zo35y4^Lg1c)&yr%w_4rY-8idtKYaJs@9HW$U$0k-1r zu+}n4r(x`h{CU)DJzwvi6(!mjXRLQwiiwO$zkT~|vpU^=IQ;zlQADSWN=SFpG~M0} z+R#E4gu$C%W=b)A{P;1YwBH}foULZ92*{i#F_Gx(Vc&mxc_J~z5URF5 z+ni>idiT>?B!o~_R^{gWoCdHuYc zZw8}~lW`cM4c5ooyE{P=rzh*h$=_~nKYjijLb$!V+wG4`@cDeMlG^3=))>Pm9fy9m+Yj30 zMd^G9A$sqv3x|VwetP=N7k@?stu=||{&xpb-P-vuCK10(cXuYV4C^}@bdh8 zw%)8y*KgjxMTUqZrD%*J1Y-gqV6FS~`MH}lO6%SCKTssH;A)z-`&YJ@&7|CT@3-4+ zN@?te5QAXS8)vj;G%xGA*ZsVkeR2J(H#h%BfDl5oR^BIyG{z7@NG^W3{m>jMg2ema zy}P*F+}+;pc00xhj{QIY1moj4st^%z&_i7|Z99u0h8Ph@RTORAtmezt<5p|Mxr`x2 zj3ER>q?AyK-fE<@%=4<6&zkB*W}z;bvvxQhDPwt_b4o)9j8X#0T9ajh3nm$68BHNv zoNa#pyWjoyH(x8IDdhxNh%u#<07NGK@YV1C;d=}(A=grE?@Su+lU zl(H<#7-QQsKA7Wi7o!`7{`SM|H{X0ajq393;_B=og@{0^JSQ=cL`a0PDCb>Qmwp(>uCCZG zl{iO4hDaG>08kVK0C?{iW8QmjjWa6G^So>NacJt|;%uD>`rCj0$EL}J$Y!%nO{3OW z*NtMh?t2?GUl8u3l?M4At}A{!EXC)G$RnW|M~IZ=~?8tk1>pjq-1P>$c}@_1Y^br zwLcOjNKB9n0AyK)l$CWcU(A%&f(vK7Nh|%T2$tLDSIRk0K1GI}`MRi_^Rwfy?8jb(r~RmO|Gd?RP_oQ9GYkiYoFY=|9nZ3mVj)YUG^L=e z3lRt*DWxPJ5kQLj{%DQ*MMClt8Dj)yv$`tfWW134=g(Rh>ulVQQ%WI3`u`ckpblQ- Rq{#pP002ovPDHLkV1iD=nT!Ac literal 0 HcmV?d00001 diff --git a/datasets/example_images/cervus_elaphus_s_000903.png b/datasets/example_images/cervus_elaphus_s_000903.png new file mode 100644 index 0000000000000000000000000000000000000000..ac4d5a00d7cdcb60973e5323f162cc139cc7771b GIT binary patch literal 2284 zcmVS=x1j5UMN}F|zl5wr(7(1VKoms#eW#jzE#;L?=c| z29!!}mdg->pXW49S5@`&dg!`NYb~V$*0Jp_gzZ>Mo9=E z(Yja^yWQ@?hYw20rfH;-ubr3Et1My}Z1`;!Jf{|?d{z-xz%dDtQLt;SNn@1Ny{uRs}w{K3d^eO`o8aH0I8L+(|kTP zL_%vt!!$b&ND`6DJTFXYl$KI40Fpc$>%cU7KAv0WW1b|}7Z+c>dk+yMDL_n8lcWhF z2qh84IL>|FpH8)#S;^G7K%egpkrh#br~oM|iV$KZ_0AM2h}nBa*zGTxuD$=^e!JM- zTz&EDU;pdz+}S8zU;5cfK`8*0WqEnMTCJ17V+b<<4#Nlxvh5v-LEUzkH2*F7i*+gTEF(@)aBFxqYXZmg&$AO}d z5i!Jo5dCZ;$E;eMyM_d%O6SUi(JH|GdW$hWJl;P%-bW6=JWrEz4iTqmQc9oCZGG;9 z(3{P!ESFkq=bW8p={W!d@?H>H8;nV!q-bQ+GRv~t+uJnFHrG|~9+ITBO&rpwq;tdJe3B+f(|oxoAMPLO<^Zgkx~}U!#z2T90f>QsfRT}% zo0`+nJMS5unzJd*FMs*->#O3iJ-%4ibwk&6NtVC;@+-5boYQH(tJYUirRX71D$Ue- zvzwinoMZrEFA*jwyKfQnZejRgC?&j_Lwrd|>o~CI^ll0}`;qmEFDU3Li#LPqh#Sog>Jw87m z1xZ>m|9LyYPw-D5fE{h!+uO(b)9s_w`QPln*i;w$-F4lbdprEaJH5@S=%>?gJPx%0 zP9zYxZah~xt#&`N&PZndb0{LhPc<>d5CU@)LiCgWx8MHv?Bs_J*Y$ZCr-L#gFH1nV zNH<^ZZ-SrFD*4^*$B!R>p9r+`SR{pz>2kSzIT0c9kC%V|Kp+Idz%c@wB9B@=yuLg< zJsFdjB=tz6ix1DI(|OdovQY+=dQ^w!^RNEy@Bj7}KQ~!E1@Ix{X5sqj?$iGe5+DL1 zA_)czF-B$v@F9>8sHBg92&=LX0#T9+Zkp%1ZpXnEi=|5Ptf;($_SpT?um17dZ@z82 z>FIEIemy-sK6u+mL;^r&AS8wmBO{{L8UUR4d6rw}FE-noo1f_Aior)g?xa(-?Pucz^Fe>@Jen}*2^eN`6n z|FtoRjKI#$NF=3{LfYBK$dZsD4nylhh&}>y*VVh-E=|+1@9U;fT5or|H*en9K^&j! zah?RJbx{HPRke@^F=AkVJTu;h;2jYuk{lueqeAZ>2#P+;!6A_k;o zlwtPQ`)!&ElPNhHV!5uCZCeYO&~~|i=oE38D9OAk%5Al}Sg$EC#=sO!mXgv^D?sK% z>MDm+JB41ME{p3gZiEt_Ki_`;r;mMKU+lM1s;(bg2vTd474omY{>%UR?spg4{k!Y8 zFUNB~&q^sFL|>nYqL6aAC|%n*=Yc?`Mkr+fv@t}KD3ztDpIv>fpO43LM6Ypn9t71* zd%52cvHWoTCYAbff4weNuiNu$+W^De{au;pH&++4cZoJ*k_wYf&do*zU?GGy(mOvp zYn5DWH+fNvedn1c>)U>afhnLesT4|H?RI5ZU zrOb0o`>reh>gyPSAQb|)O|vSB*;)snhqlZM?_H*iu9nR(ZgyLxR6kE1fjxOo+l&2T zQCzLBH5raV7K?)H96hJi0twr`RiYxI*;*no4pWh5o6UN%w&{lz?|t;sI7%tIe$c1$ zW>Fo^=aHh$w0(x$q`T14=0000hXMv(XH28Mn#O^oQ*Ls#-vit+89EXXU3ZCW@DV6 zr|EQmXO{^`Tp#u^u;9Vikq}x{D`O_c$ub)Tid$<#@RU+#EkfX&b0$KF034BV&KYAK zfDz*DqBtpYd4K9cFvd(V*u(Ai@4o)5sh1D$j<@^U&2~NX{ps8vp5AcE?%yAInx+7V z2>ZifS;n6?W867M2}#p5P1ECIIA@HabMvwcQgP?q?y&9Z%hOMu)LpmU-Lzv@ZIugC zH@BO8wcb|*Bl;C(j3Hu@XbQwk3qv9W;S7b4##x{dC`|KA5IN(Q;|c7vO6YHX{qm20 z`1|du{NaZ;r^gF{XqPd1Ta<}ZRBL&?T<`8aQbD+L7682W*YgFOC&4SGVho;QmPs?W zl2Szpqft_ReK?&9{?)I)6t@5N`#*Dp5Xj;7#xJw!rYlH-^7+$e%VN5Ah=Ed}xN}Yj zfryCl>61g2CFkPABXL5FmsS3dSvmbbYes5Xa*qr<5W_?_CI? zZt6^N76aO)>H7BE^CZ(Naa{&RW0}jeNKR%M%%TW$h+Na%Z6Q>A`Rc{x;X=8a?Kv-b zlI;lv0J2%{xZnhl8e=rWO+qx*$j29l;tm&{JDn(KrgBOBZ0Kul;|L%+CeERnIkVO7!n=@T} z`TX|nsj)VeyJz0}-~+~J=UHlognC1|vk7(yaq_{><3z!wT4|j$*Ji!h>Ad721Y-;# z)I0ef|8{r2e_Kd6Y{kvG`ta`3{q4WM`R2Dm){}i8c3NjimL@jRWw8_VQoAHcs+~l{ z7{xGzNK2vfd3O0uQI$CFxwTdZ0RW!pzaHoA%oAuG!ev^(_ybp;q^Fvi3~8REb>YDNx&7oBrDO-9(ru%0hUHQ`lAeUw-}K z`}_7^|MTI|XdVGw@ZS3v2zaH-b+PG&RP#JQ-_!X)Xc;1t9- zkC&#dG|v;U%;PjB^|f|(F0~-u0TI_Je&~ZUcD>3LW12CZFY%%K>o@=;~oi@UNOf7O%WAs_~b5r77QvB|O&9;U;SjWqP} z?%34rI>|2M%(YUC(6Q@pZf>?!F}8Kvwo}_+aFl{n%DKgkK?q3!EU>P=!eL9e5{wZD zQfe2%;+4wtb+Efi?6zMJy?^)c<6)(ruXZ$s002ss>-5c+> zha0K%Jk3d-RlA#h0Zs@(??OZZ2uLY~7yuwpiL7veu6Db0v;XnKX)y~Q$LZ&48e>8T zc~RtuLQ1Jq*YrtNP%iUQn0{#bi$VI(xR{@{+zgxtLI@=Un3Bv)zICy1I!wWa7G>e(kxl~nU$^0ERhm{8?v9ju+Zwt0LE%fc{l#)Ql$P)~%J z?9!hZXN+kG9-OgL#{kOJEfvZv^W2S`@XhunqT-jP?YAndcQ? z?E0pVnUrFh<|oP1vaCV~`cXUQwNkngo6SaLNiZGP=?Vp^0}F0xVE<&r(v%cA0g}o5 zXW*OwcR>E-`+trB#$g&4n^hI1lu~Mp>6>m{t$zM}5pkJki9~DdDPWA3^Eod{tyT1n zAv|t|_JKY8@jYfB`F|lA2IJ zBpk+=u87tofRIvjrVBQ%w;N9arEH#En1^(;l1g(fc??mfTqPAx6vhaIFejwaiAn{R zF-AmWloKwglp;n%s)UY6g;W#+Wl|^xBmyIha!M%rfYwV&1tb3lG85Pfw--(c00000 LNkvXXu0mjf;{2-) literal 0 HcmV?d00001 diff --git a/datasets/example_images/convertible_s_000295.png b/datasets/example_images/convertible_s_000295.png new file mode 100644 index 0000000000000000000000000000000000000000..98fbf9f3ce4c5779f45d7eb8b4641abaa4a8f50b GIT binary patch literal 2344 zcmV+@3D@?CP)h(wSn0>z|RBXgn{p($Mx ztIV|fmP&yYfrE5h=#h2La(MjUXPB5D0VtnD9VpCIt!! z5r{Z|x3&sh*S0&4rA|En#t=|K3=sqoQ2+smeY7Yj5mkUL`XE9M1VKO}K!}b|5r_u? zA4!}97X<-P+otojiStr$}u6`>NrAOHj)0Hg>7fj9)PL=Y7kL8X;eA}A6)YAZlW zDV=-Oi<|;Pi8kZ3UN2#Jy8yRTt(_huIyI3MvVa5$ksQPrflwg#697m|jp{?Gq@v+ECeKF_t!GUWw*00|#c(|JWNT zA={W5$Z=1pQI@b*3e2PNHdx}{gf+Wdls3Wvpr?n2j^K{hTr%971#>K}67|n`i+xd_v#8g3#X_6)HoZH{u zhIWf!?BkMypxHgScs73a{eE9zFavgs5JX5(p5#$MEQyG~6ORXZ0LW8%tm@zW_C-F- z$%3&oRJs5hDstQH^~-PeyBcGFVBJi&O{q8c#l>?bB{38f^VkW12SpN6oeF22w*V3# z@Tfr7IX@eikI%JMy~QX99P2z86@78xasSnyUU&ni&n^;FVO{R7F9%$Ha4@p_yRU!q zAB)+TOugIIlj8+p0wL?ACovzdnoOcYYI(rkg{Hq49{kf!HPGa6yyneXM@X8pG> zK3l$jpC6ud)qB(W-8ZlAUi{aOJ{fm13C-Q^&9^sIJ?ZJwMKR77kBwVzaeMuDCqMWl zr3>2ymcRqFK{;XwX%=E{I_wjgrfciR-SXSN?O*;rk^N@%zcwVH*{kmD-=8(#Im5e~ z>+k*W(-{g$dVV?|^klQUbmi`5Q@?qA75m4di+^y&NZWGisxe!g1VPhu%cS;2eQ#H* z^8UVWn;(797_)C<2Tp6OW;6ZW=g*U*_)WFa>hTAkOwd)|{NJlvOGzfpHt5jjU}JgL zi&a7jQ_cMxN1PP7ZkuI!|GWL_A>;0YG#-9PfjCsMd5n#5eJzIvW6lQF!ER&H(dg}J zwb=oh{i+O6Mk>TI^mmrJuFcSX|DzMZz82x}VwCEjIb2_Tvw8p5)06yQo(t7O{* zn-Ot76VOD~;pJ7cuY(m`dj`R_^(y8zi>+;&s;*;kxi;I!Sgp4lO_KK#W)2D*y2r=c z|M~I{U;p{lr=LFg_}TE2=g&#?e7(c086C02hs&$>*S~&s_pUm-I6OLK-vnn}jKTZR zcBbt*V|^!q)htcD?fmxffeSld%o&iqcXzjciT*Y(Sm#WcZbfSt6vF|+#zR%s_2+-O z{_>^wp;zHL8Gzli-iHV=#1OndOcfTUEk=WnhK2M86A2!~fjDNtrZ(@dUl*$S`A-hd zKN^tUch!~8z4Hm$kSH~qFV3Dk z%@Q4g-S4+7vCGpD_~~XD#{I6U_Mm7y9`&;!xwNa_A@b#$8es6`!;9f?Y?>?6JxnHA zKt#m(=nO#2jE(E{R(QLZAB=|MvZ`w9l3tG?cEjNqkYBvGxqh2ca$5}Z>9j3IW;p4c zo>bGr{O+zS^6_FmQ(7M^4qaQPd0JI<+c;|hW1jFRgt}`tch^^1C4`JxXZMSv1**&C#c=qA^$;HHlu-TS%L(aRrS1>27P2RlPFCW$jI?o17 z8nCl5FmmVY<7#<%c{x8mQ`w+x9NX4=*gv^A>-T?V-L`7#NW3^aIy(L#<9xe*x!T+q z8qpLR(JQC^7i_!YHHYSH(4*8PG^IJZ{BS>V-Q6#5dk0|QYtE8 z)`_mF>izOTR2K#L^7^)RQIm9Kb^W&d>d)q><3t%o;&oMbE)vw+hi~7#zeuE- zf+*rl9sI7B$K%7E_W=RZA_q-%+jJJn3IfRa*~!sjiqS|k2aCx+{_LlJ`RePo+5-YW z^ii>*;3b5>NInDrkl?wOAc_!$QO?7|;bNSMM4j;T;F!|v{^8wrXKwCxF5=0_$>HHq z0%p)lhJ)ce@@zJ{srHJMkbl$mE>ze3{5?7>zghi1Y9gbAi z>s%QHi%pXzK1QjkayHE7^SKYfIiF=2A`lTHqGBW!MMNkf#1M$cd;k9w+^Q^<)qLsz O0000qTP?8Ud0tf5g<3&v6}ctJV39#esT>b7J*ic?Ax;J;AIHNRT8#AyREdX*`(5ll}eW zTJ_7n{pA-fJPlGqW+fLfdNT0cG|30O{akA#Dj_-ywPvFdmUOOdo{dL?C&OyN>E#>m z^*cj{KoL45ETSVIo+Uy$!qLZYv=9-~XLoePzAm<+*wC(#y77zH{Z;6USQ9!NjZ7R+pAPo=v3? zC04IJ`{H9O&2K#Tj&@My1xnl3_C{}@6zL>eJ9|1j`I%}|ynXq0y(D(_2BB<};6wS- z%QphwJGQZs8FB9H>E$#zCl?(@UKcxwLAO+P%N0$*`sVhL#n5xKF6_0#Zq4-qO`U!; z9FK|lsdE?4Y`<75t0akPVz#}0T-tnVa|6;LZfqQ1Utjry-}~CZLI0U&&R_k}J7?1* zRQ2xGxqhW?m5b8ZZ}0c|{b<-NW~0OX-j&Ngn#a+_?d|1O({r50!s6?{@rFax2s~6S zaz;W4Nxtyj`tv`<^I!VSFz}Bowu~`>FZ-R&uYc(a>^C0v_8gMdZ~f#4-+u7%joB>! zwO_h$>CEF-I%a2*9B+E(Po1b&D`BPj5C8t|2iLEKE04FT!RFeLqemaBxa^WRb@uev zzw#A1GnA(`+B+m7!ML<|q;_QOAOH5v7oR@ya4;;~l~=Y`cbnq9n|C|8`(Hb8t5OPa zd3B=|R!Z7hw)tr1j-E$9f9HFpa`DrNuA{_uC-`N8+T^{@XNF0L)Dtgg0NJKabZ zb~1~L={P7W5FR_Olyd9Tll|FjJdF^fKyA4&X=1W8n@w34l6llSh^9kXt5nO4R+3C! zczw$~U)V#EO;=)T|5F9^qVej@860jddc|4bPZi_HH(r~JkBO9kqoZUWIWN|c@ zk2>2+&3d_PNsw}+5K;gNK<0QJ%Vw<>`mU<4fAzOs-`d>#`rrJ0uKcY_U*ufxd~$sp zC(;Y*i=LhgSI)oMdhA4PVKFR+m6B4d9pxMg&WuOn{c(4>V$UvDfY~xLG9Vdi0TyJc zi++2jUag@nI^AxsefQJd9V*xTunCS+I(eZdRn-KsiWGRN_ zAV?_q=Dp#Ehn+8;T+&%;nJutoBxd>2xvf(tH&*M_Z-4V&KKIL?Ut4ZYC!=XmOa|?w zFeFuymtx zN#xy6Z`PZu!L%IIm&;*gFpko^5N!M;DFrIf@z1V)@*h|4%~Q>c#NwH)O_`7T3L)3C z(;FK_k#Dx@KR5Y}k3SJoF$+N9mO{ZX3t_Yt^H~y2+_3SSv>&|t-SXxc;|KY?-JJNcoGK*JV8Gmx79G;fco@gK76CKm5-IVvcG!PmQ)DWtOD-AKn7C zt509_7nZQdO`-Q82b&DYOnL0Qvr{2TyICT&xAg ziuq_HL6;Rj*>eA{oy3x6X_6hZ^&sO&}4_Rat8 z-M?3=gzIOXon><(#Q)#nR0;uB z)!l+0(kND-_p*!6wabG@*-wS7UG^C3$wZ(=mDGdA_6EJWY#yUSO7I zahk?XDZG8<{l)baVKk<3l55v8S~*uv8jQWS(=NI8Ne5($zB$NWt)&c;cKomp-h(ZtpT4rEIW)y%3)>h6^SY*Ny3R zdI4~;UJkd0N+@5{i`m>*o2F@=uiNd`DqC4=0YE8943i{*;H|Yu+zlZZW_SWv)iwEm z$aPVZb-hjsgaEKN+3ckX04vkwiShyGh2x$GDbyy^n#G|Tx7KChAy7bHxqvX<91X@Y zidsh7XD^QV*05b0CwQuaT2u?`tcsG_HLh+(ue(wMKVG} z1|g9gq$=~$84bZXtNVj)94F3Hw(b*;(zxGB`svp0?VZ=|rQ>msiApwk6kimxWOIKO z)PKA|AdWR-$ubW)=|lP znJ-t(Myt<}WmhY`s=#;=Nts5yt|+dqHU{ImshrhOZ~yZ0EMjJR-2dRi`{!pGW!`nD6bjMqA^n1uqM8a(;C3>S&ggUp{&A{Q1dZo{Ip;O=8Yd6zTJR- zep!zClXl$t`Z%w#-QK-Bs{<&W^x9tg!uwgaBpx@wS$nj5aJ^_kakbjJ|8C*TI}bCYuHuxRHb5b*WliLD1Q85T$4hSupGK!ZlK~lhHnK8KBrayuVs4^E^We=NB)n z@w%$pNit1ED~h)#!`9^buaBOq{X1!B>L#=&-PLr;`@@N@W1hCrfwjgFC+;wW=U@I$ z8b_1;H-+>zU}aJEwgii$NYR_J@kVO~WCLB!mnF@!@_cl?%JX7f*`c&8q(R)j@#ZFH zi~$7eELx6;iQt^L3iig<*ce}}v$`m%#)9)c2q`*g!cAQorvpjk%&{~#7-Drfzj*rO z@6g-e-Mo{&@&308m8`3R*SawX)@$j5A}`1>Dcf954h|?v&Q^>4jfsk5hJtZXn<~pz z3_@-7*~`n-I%86X0JWz6WV=1+t(UW=E&|c{(f=l#cSa-b!8-&_G#RKndmDpRPo>f3 zutU|2(jt>;XLAxo%myZiw0E=lsw|4*(^tRv;#W_PN{i%>BO;NNaVsUp>e3lq&91T{ ztJX6P2!sr6c<=7s$KSr6FwRw!W!2Lc?k_+2giLw!jl->O^w#}%ukY@B`Nda{K7V{Q zo6U20^!UkS++iZD4G##ZBp)CE36%gcHXMmGs;VUyoR?Y;()hhMUW*miHdt*}dG+wq zhyV8J&$4VedVGVjv)5j~{qZ0C;M1S|!{7Ymr$WWQ{osR`;PmMD!{7aIw7GdcTb;a| z8l!;-fJBrAY`Y)>Q`|MWY0TDZeXq@*|HWS?Y3GCAd2oJqwmt6u=^y{l1!pb0dU;$o z?(8=w-~0aWtF)scb$GBh+`P8Ps<+>{mt~8Wmy4>t_1WiNonJ0e&K#2{W6MRQJG{0& zRLe;cMuXn|?p_oni0tvNzIpcih@roGxO;kaG#YQbe)IMHgIywV^VZ?^)@IV{7*ADg z0O0A@PhMTl-h1m#D@p(UpZ;Z?mqaKj1je{PZb7_$ZS?+|2kjRB=%e=qix9#e{pjOW zUcY*Iku9>{93MS9I=lJiT_fnPKKa}AY;|pCd;8k>owx1`Hnz%I-#)ytb8WZDi~DzO z0@6?a@#js`fOdq;`+y()$)6qc{rmS0Ip^!Lij!nA>W&88ey5YhiHxHpl69?>V5igR z)2BxlujcE#xR{=bV88qQ@9pgGOU_ZKb+*)=@}~LX^Ix1EpZ)TyZ!Qa8IpB3Zb*&EZ zm{Bwswqwci!WMpgda=ZSky5M0R7u>~*=~0_@7%x5g(4)Y?P4}#LP{kFscahSY?O2= zbo<*I_io()$sd09tXx$*pS>E~-I5Xg^0Pw^Ue%gX|LB(X|J}hEMT$7m)UBXl`r!myS(I0X>_UA^X%0TOS_f} zmlt(aXz(^^3(@6CcVg=4BAcIFO=np??rn@WMx%|L{$P?Ook+yw?D^SMp zsOkzul(f1SwHu1PW<=b2!;K4)NguQ)&H#vzkV(cQBW;ZXZ;WZGqFCjte32EoF^$oU zsdZJ9)QXl~5fdsYMO${p>E?9KBH>pf``pb^9vlioS<1vU+95Sv!ijdiN}Gm^Hl|_TDHNU%z2RPS#l0tPy|=DbJP8=I zwu*D_0vFr{?U4Y{SpyIvr63?Ubl&-3tznFD0&s+kaX^NI=o|~oI2&y3>w=<~li~pa zK+r~;T1Uj3b&O+GH`+Iml>fgGphsXx$T??Bau%2*0o=KUd4b$o#%x0n&^kv*j0n&f zQ*pt$cTOM$Pl8KCwAM<_>$(X6wYGwB>n#!1wMGCBAh`4nm;<30^V~Ya&;nop0*{<1 z1V*5oH574ho;R%Uwnj>UQ*8_*uWjwoOC_AQ4vh_#5pu-fgC{hgEwh$u>m3mb4g;ZQ nh=O|uh=?8nGZr*}6XgF7Cjh#EO-+>#00000NkvXXu0mjfnNRc? literal 0 HcmV?d00001 diff --git a/datasets/example_images/dawn_horse_s_001453.png b/datasets/example_images/dawn_horse_s_001453.png new file mode 100644 index 0000000000000000000000000000000000000000..db4fa70890cb4e3f00395350666324a6859a1029 GIT binary patch literal 2127 zcmV-V2(b5wP)@Y_>X`G)q)X?M8-HB$f<}xameGIyg@TJcJ=-tIK&zp54jZA%ZA-Z(IOS#y$wUm?=YvTsi|_6N98mgh=e1l@8bu&tHQ zcIY<_5VB|=LY5TLF!sT5?a>T@8qY`1RMnfBnnlH~;u14_g~Ig{5Y1>SUEV)EJ!io{0bukpP_ch4*f? zT7B~1;raQKqr2-EQyAsuX8Z2l+pcRXJ-|KVbY zZP#W?8haUsoRXVVRaI6%m`#hKX2v^rZhwCM_>0fakB*MBq<#qRudm;{zPP!*@}7SF z&rRF5<1n6_oXlp^$z)O##b=K`d-?Lk%U7?qT?;o%-c)i-am?9yVn-xQp1GOK?%cWc z#TSq7-#=R}=Vl?LoJDWi?)qjUl9v06*<#wa;qvlrRaeC1y*IFGuc@aqAs1zsFgDH1 z0J^TFKDuVoG_$EWChu2^)srWmpPU?3Rgq%M2%;paVGJ=PP-(k%wSREy)?&3{*I!=sF%g1e zGy_6&kIv7JPmX4@y{4(ra2R{ZQ6z#XA`>AY<&?tM`;zDLxuCz<+@u)Bp>Ml(u~t(A8~Op-(`iAL2#NRF9^LI`6BLr9vU zdw(@wu0p@K0f~P$(P?g`|_toj$$cl&+wCy+IY*Ja*=$5q%|gs6 z>TbxiDt!9WZM(K#;}6cN#@Qch~3drfoicm(5&`TEY-1+&Vvi85cdHvI`zc>RxB1RHb ziOD%bFcA<@Q8lxo@XN(QH4_seIOn{1fYQt)OIeda>E!h8 zbUEeHxh$pzOhnAc;=LyV%xWNNU?%Ly7o4-282|zR7$6~lAwf~PgVj8Z?Yq!RPeOa?N03ako zGzB7LW@hKi2!H?xj0^8o4NRT0s)C~Mq6q;3(9G}yJ`ypg8R)J>0Ah!XswOIEh#!st zz+6%RBKD4mm|f0-WZskWC4e78&+G{^;BN*1A}A9A8hj`<0{~=xbluVLXN(lf;DJiAaS}BzXA(Yfg@dRXj zVF?%+>9oPPVHlio&KMD6gh&Jch$w`JW1*yyN+~6^(uqi-D2~E_8<@>z+v_T&ZUU?J zJx4~#ja6wNdag`F7=;XpkPwkE#u($ABjUosLbDMhiAa(}N-3ojLMWxe(C_Z)%w#g~ zzlDg12moAz&IrA}z9xqTwNNQ9MGO&$h+!NlAtd7F=4Po>ib8=%S_4u0V?scs0I}Uq zKp@nhwE_SQx(!35L1zpouW=>(L~w;<5*j9gUH#x(=^uY%N)kuV00_j8b3z8nki-}# z&WMN@0s;_d#!w=HB&|t;21MitXyh}HB%)rgXiZAF(zcz9_FJ)nwe{NeBqGMzI|0xd zG-yP@6vYW75rhH|kVvV-NMasg=&;xTEhGYH;7J?~=)HE%#;F-By zyE>dyBw}XTmTf6S1{g(=h?69YeHBCs9Mk0nmxv4mG|EymxHSNpf!1xAIcNma?%uH- zHDo#pt4h`+wEX5~JqU!(6gv&a28o<=h~oN2y;7|znRu?XQfc_L>i3@bdgnkt^D-)o zg6n^-FP|yw`L48fD?tEKDaCV{oHlsHb?XbIF1Mq5WH4*mLF{|XtS*;?UnAZRN`+Ch zyt0yZyxaHR-q+XLSu6&v`0}}51gj^V!5`S!Vd3hAx2@dD6|G`NrVz`fP)aGyks~ur zZ96h-HD;%?{oP7i+NP*gPnijKa~3!P65nrmu6z8^6P<+u5ylEE1hwj2{rKEq_#{t~ zM1kkn+{pt&u=!@xWEu?9Vxba-*nQXWk+GvOQ0OON9M;xW8;$zH!otMF z+0l^^*L9UrM1V;6@wF>&UYTvUDefnYi4U<^Gc(T6fdd{m(X|x=P;pHLaYjMpSHor) z3MrBhgV46>p_re)I&t>>b03^FE$ZmV*6W*z5bg8G2@wHnkw3S-ZfD(*k)GmOee=B! zIu73}T(5igo+Obz1Bb|}HP_BhXS+KumzP{G?Qh0~o}Sh9wYT1SbLsl^Fc4#7gGY}Z zQc4#J-CAixLew}ve^qP6O*0IW-@f?5#Cz|2=f0zZeryl)r|v$e4OfB+qj2*5bFO9u zwA@_!pEHx?jg8q$SD$?9>37e)`_dmyrM)iOHv9T=W23vq_uXEpR1e>CX#3ERl#&n; zkz^uBg8}T?+^0Wzk3H7k-ACNBdFu4rXMX(GQ#0jSyxww%Y0weqKFZhEDX+!}l(hN~ces z2}G37x*ttVm98&;|JzUZbQd;eXGJ<=?b;r!SH3X1Yrg!cZ5on{w3iwg-g)q@gZa*U z&}xOvpxkUWTg|auqX59Q2CX$B5FsOyQXYHYn4g3fCnqN_Ud*Jk&1U1Lzx;Xcw%!BW zi6$@87x(Vnd0Phc&aS~b_8z$IS08`;n*)9QjIrAC%HsUI;^v3{zHt1+*N8D81eQuA z03bpDt&|c%D6KWY)vMQ@{n4|tGqbMaibS+Re^385s}-+?;Wv&v@bJT53X40^`9kDZ zmR46Qm1-gqXaw)P^Y-+OQU_SiJoCf7cio|sqR$UfBI2AAq84h$;LuawdZKUJwpPn$ zoOO0~t^}>A%4(J5>GKnFFTb8zUyj@)BprnjBCf8izW%S%=Vvd4p>KjrqEKlK0K6SQ zvA0rY;UNTjrw=*87KM=jJ}SbZKU0 zrdF@ph&?geW^tyZ9|X2x@LM70^E?>mS_rN6S08%Fvh1Hd_guMB)(mr9`IP0ZuB}}V z&3hI~_?zFoY{PrY*W*ZV$0LM%67R7y-JQ9$O4(do>@E~;0c>5{{}y8m0NU~7=#l$O zgP%Nka&B(cAl5fDWajdhFHN4&8%K!x%JuzWQnUC*6m$R^a~#`YLE5;ow4glKvF)uE zA#Raq2RcOLoCAOm0ssylKI}NosZ%eNt5wH!du?y=T4_3}jmdB7-RfaIBaWh3kDGl!$8C~kNx}i@7%fLli51Z00{qA)OK`4A|j%EzVpsI4-~q(%GI*4;$AdT zffT4s%X6LV4O7qBcq7o&T5Q_x*llCmhlXt1CJkGA)_{Pdx3Vt*kPuQS-Bz73E~Uz( zGbfHelFN9t+C~@#b{HGPW5Zh0YEH&g;!&V%!`{7beBb^%ve_&V;oP;WH%Y*jLje#{ zJIuEAXunlTwqxP+#pyTx{zhhfdEY{HYJKrStI=XSG%a#ceM3X{-T&pgkKCK@$Zye~ z|Fm=q;MQ88wN{^r(S9hUXag9G2O_N3D<8e^%I{B|S|owo^3u7H(OnNb_>}{9A9PY4 c0JOXRU&iMlQ;ZM~QUCw|07*qoM6N<$f~{todjJ3c literal 0 HcmV?d00001 diff --git a/datasets/example_images/delivery_truck_s_001587.png b/datasets/example_images/delivery_truck_s_001587.png new file mode 100644 index 0000000000000000000000000000000000000000..a86fb263d301bdc04b4c8fd5bef772228dd2c922 GIT binary patch literal 2397 zcmV-j38MCiP)g8#-6WrIYPOcDa3fDj2JF&R8Nai`tw z9`9UrtLm$7WYGrR-I<>E=&TO@=1+gK)*>RjQV0PM5fA|IA^{-;0Dyo<0#YBst1FBLCPqlfPhGpHo<#9M1+9wY7hYegb*&m z3!wKN02t!`-7fr{^8}C~N+G0>E`-i-fJmf}#^_=>ugk*w0C2Ge`rxf~fQXb_K>h~| zA+QVE3p62w5Yle9(==(NX__X2^M8N-<*KgBvdW7*5+dQGs!OencMbyKOh_pr89DEa zcK|>Lan2D!E)K#7K>$QR4*-bc@$P6e3<0cnwJ}9qA5SKGUw*k*tuNH(oDR%PL=$b;+Zcd1_T282Q2X-?=*8`o+8NRThVv zTkm}L&e8s!YRaR7{j}A!5Ik521OeI8M-Qwskw~eCwS%%&9>}~{BVqj{L2q@@xI)p& z*2^_`sOGt?LS9rS(^*3(A>`@dQF`wSVwIr$%f}C|jIWHgHXSAu zCsbs4Q6gnQMnH&jKwQvOg9N=c`D#g%q%p>!e(?BlZ`uY7jF8!MzQ6ynlk!xs$#hcZ z#WJ6fh}5+@K0ao3r8t+)yVYXR>TFhJiG+D=R%N{a+#K{PUFYS3Ivs%r7PK?e=*i*1 z^vIGRhlAmE9CM>lgD&e>@S~S67Rz%gNmUi=vSezlgQQt2)~Z=8mX&G<)*OfiMapS`_K2DFdjK%?$MLS zaVy#x?^t7K#E1#&s@A%ZGBO?)Rx@wY63SpC$6jo7Y#h zS?nElH@j^aOTvQNJU%}w^HML$a$Y<+o^VP!S@QJhlUlFGyFX6Sg!KFUO4Z026=Gf0 z$a&@Lbh)6E33UtC9-b6uGa1K6i^+MhoGljw*kG`oW__oEDNUTF)_ae@ zBT1-`>0q=yW|8c1nZI@W+h09-^3Qvpzy9V;&XY#zXOHf0@9b=CZ3SIf<4jeboSY`I zwQAO<=O~rZ$Z^s#j%6)btrpWFi>NmZI3O(B*dCJUc}YpMwX?gb)w8`9 zfV`D<5+PqafA+<_k5ySfaGujLxK*PH)4ZHdaxvXNUV=%P2M4Jo@Zof(*U8c4=2>3xHKwFXuD#zHlbbr^?x+ z)AqqX&u3>R>?^$55U-?(`UA%ptT}?PyVhci=4|yLvEX zH*VfOU#fq9@-GOUP*~;pSNnTT>wGykx=z!SQi_P601zHLe7v)JrQPWszuZ&n^>Db! zygfPCL*mi-IkL&|@xgpSQo6mf!Hj{$yf{ByRAps#Ln-Bq?H|54I6opUL zmSstjynJ~WAUF?%a3*-L)*GJ*(P_7<)tp6QwOmc>!bqd^dN3SxdKpX7G|Mt6#Gu=K z{q;BI)AOd$m2QGTS=gys?EIxbqMXTLk&GY4a!6^OS_wL?&`|YjG z?X9iN{;2oc-~Il*U;Vmb+JS%bjq8^-cbEq!1RD;zT3?EUJn!@tMQ*&dw(-_dpx^@# zdOn-&9UNS{b~Q;7r**`IT9=HmTX*ifdHs4E$Gu)}b2$3R4}bFhpZ+pAn{>K^pZ)ab zqwWS1f+um*WZ7t75eXA4TE<0PR&`BL*w`4YL-5L)=P&lFrskA<^Uc>_sn@FA$sRm- z_}S;5JL?4J?W`5Ts}E~?yelJlJ~?L;KycPd6AS?f0D2@qb4m$EAeuBls5K3t1PB&o zIcO)5U`HoMtu%S-=B=Wt&rVN`F(Cv3ptV-2N!lING#`KZDPzdAs=T#=k~EH-b3Qo2 zP;efxHG@zhQi{1z!8!0gct4vjT?ir)GK%wcIa{t8YkdIkymt-)#%Y$MNvqRYSL>%= zJ!4uctu+K7NLpD+8DWB_G8&|BWcz` zMDLt677;n;aT3coQd+A{zA{Z6#*RrvIa P00000NkvXXu0mjf#`>PH literal 0 HcmV?d00001 diff --git a/datasets/example_images/domestic_cat_s_000913.png b/datasets/example_images/domestic_cat_s_000913.png new file mode 100644 index 0000000000000000000000000000000000000000..305c578411cf07d55993461f169b5d033c8c97d7 GIT binary patch literal 2481 zcmV;i2~PHjP)GT+Zw$9jRruF1PDnKDaj)-sYIEY*r}PgGMBl`VQR{k{LI{3l~hhyYvS0F zEtwKca|HBox&lh0ni)Gcv$msKB@yK&t8ucxA| z)Xl}Jt~2VOwX&`RSVEB@Wx#xwGD4IVJT;lorr+jhtN~7luR)1+xYLgwIeNde4Tz)T zr$7JXm4}2%67LoZSMK=E-Nz4!O1n=F1q-UGj<0?UsOtJeZj?399guFv*(S&X*VT>G zWd#-yEjqQPkQK3KOe?DwtHkR@FFyUW6@{&;X?M^W?9`guNNnF0253t-&aPdSN6&utFQt);P&5^k zNze`zkAmUE=clDl^-m~ggBQQ- zdeIBpJggq>t}i*~MjKshjjTq`he8beXs6}11Xx0uZ4?1R5jX;(w#0yCfWm<0WmVqX zPYHsS>f++8G+NGo@T$e}NxnZE9X*}Io5f%L_P1?ORgKz@{9pazr)}SJ8Kq4vv}RU= zRZ`c+O2&MP+M)(xfC8cgYiZpySy?XAB)5`wJRZ2Hl}WeBV&=4nk!9NxjFO_PDy4I& z^17(1RM+WhHm$Q%0H9F@wLxHvqBVpdr3e7l0$RceA)|xw+4&C^aJahcKM_Zd`2?bF$nE|aUnW{9)N+cYSG>s;daZUh8Q(I#=-)8C2 z(<9Cp0pN@y_b!*`Gmmqo*V7-XIzg-Z-C#5tF~_Z%S~vN8asRMfH_)KNimZy2u@+j6 z1C|&AXczzh@GNbbhVytid;Iu`AA}Ai6s$%lrPf|#5#PW2Hmj=1lS7B`y>1Y7gR5Dy z*6wtbEbnJy0gQ60!7(KvI&`?8#2Su(+QKS@EK>n@xGh$M7{Ukw8eK2u(~oE8_shC> zbJtV_bKJa=jN0$s{$o7o+&(1E@QAfKz?13px@Uzpiqx?y$haL@qcUxbwHoL{Vuz#g zr;{PCH}^VSpylf{`{Bd;Z{K`1zq^%XesME>`Qmsq+3iG8UCW^DrJLo!_=$D=MtG`j zmg%-pHQLnoJg+URRYhG{!lc&HI#QYc`21&|zuM(k#wJ@BaKHQdo40SiPPc2%bvO;m zMrp#MoxQ>CzUxI{5S^WVe{}pjt(#>M=XnmpNT{;67asQ7Xzg-3BUx&>BS~ROB*qK| zg6~;g7uz@g`19>DyS`sBf??aOn}!L|7%Vl4R*2wla^zlcbn@>tV%o%pWOb2KQ0z%-dNfl zO~x%~qNt;g@|M@~+udG7P`d=s=*#H?fYAmS_+?i7^Xu=Aj(dDlHnZy=15X6)wq27| zyk5m|6h$W|uh#JjwSDyXdAhpz)vtd(Uu}_!yL;J~GV1sD_jV_bpM>FnQG5C0$JzZf z^a&wg4PhWu!|(6npd)yx)V8d)dD1ib@aXvR_Kpju7e(1N5sXR;Ek9f))o|!Nd-3?g z`E0Q%;?1q=I47@OhTT4={`vXYU;g|@rLx^YfJ zy+PFL*HuxL<@DyJKZp##+wZ!^gd_Gl~XrnwQi2*)}Vt^Hp7{+uLj5;K8U{ z7byh-rmq~G!6C(rUCZ=_DrLTc*>&M0vhDJvPTVz9F9LshP3t2|A% zNSGlaDRuVovZ-oqwP%?nJnD9=ksFgz#;ny;I8Nd5v+v)>;!3l+PEH({h1(lrbhmvQ6V{bvH`_$4&Fb7&LXGtzm@f zQ1JNSdObVeZq}8B$>HHI|Ls|M;LYwcGK_qgW;&IYTs{F zbzyAnx4o>$?`MnSD5NC+Ymvw2*VxuGouJj>#4+Yj4LSxQE=Bgp(=I+^T`M+e5*$@l=Yp5I=VmI>Z+ zgaBkM!Ie}X6(Ux3LjYTTz<2w*!(N{du#}#h5P}M+0!tP*7hnDTORcM@<0}P56W4Rs z>(%~ntRff7NeV1UA- zGzc9wz4+$p^1W#cP}g&P1iMq@b;K1;4tw-W@&e^gW>L z$+PGG{lCBAF5On<%P;?wW@{vrFdjy|mnT07!_H#1;M^f#3814bx5$8SMjeaPQo8%- z<#A)iRaG{6xr!Id#dNVcyL?!d`1)VvU}u~bvbXn%8+1PW^{e?p5$X|U>bgEX`*?A6 z*$LXaev7<+^E*V2=*osmMJaUv(|8_sv8d(NGwx>dKQ>MlFB+h&r8!3-9IP$FePBo_P;eh3m<7KnwBETSyp z1t>~ZSOl8}+v#>!ch`8U?r`pzr*GtW&>uhig^<3IzC~)wwX(K#F^RTvSLC6ug!KRP z>66RL%fqF;Uah-qdjHY<#YLzbsWO3J)y3P{drh^!B9UmJ*Z#HRb(7tFb)(Jefu1hJEWl(Z^+qLM)qW9LJ zJ`U;Q<$N)7i>6}CQL@>_JFfU-JQbpdlN&IGxoES`o;`0iyJ0CIZQDvII-U;x_T}^E zXIIo=%SCSxdKc#nwVkW=b(irZ=70Ua>;3xbJMZ0j1kUHzeySMi({{*fWn5-dYwjHl zhJ9rp#vpm{6-7X%E!^P22S@Y3!*zuIgULA(H#}efjchKcvvPOP> zDgXRrmn21}(=wz(%bE9k#F*(QT#Wm(k^lA^_b9Q_h6+5YFl_m?p`_4%>V=cGl^6p?n66SASa&!JHn&kiEt@$i=T6tX*JCbNOs?}9dQ9;_WK?=Qj{HUsZO!l?Z z&(0zqctJQ;($LMbKPPD#RhrlS&CS_2;ZEt>_$C7n$<6vNPi|wRUB#7xK4|KR&S>d)T(%?H0K8{P^s8}zi6MbYB>19CidSevEWSKXdx zy8|IxSM8S`P%JRRX;C%Eo!&V-yu0+=kZYE3$Qqd^rAxfydnY!vt0KR+T5U@cE5AfR zf4a1otCRuI^k%b{7=q#KfW;+8`&e3j_15deC9AEcF>=fGH5%c&&PeORwn$D`cXDua z?{GTmo;|-&LZtD2IOw)j@;`t0ud2*DfuHZUpxnAGEeq4XoF|O{#`=E8?^=SHqHJ5j zdRVot&3auz3^Zlt$;#tpoCs(q$M;Tt_Va(64m!X7<*&04bwX3G5}=_CJ3A!SrbOoo{`+MKltX;N{tupQo{z z&u0r-yCGMkSH?RfWa|eM+ho`e6yHt^I*-tYPvWvg%Yhe5mfl1bB97~60~tp(0&d#0 zO3yE^e)roC=96X9$lc8?u{h<8>xNlbe|OP6o(KPZR)i`8BX}+7|6iV`8{0?Jb(n{B zUF^N(vFiDvum%NSZ1iNu*3(lu;ae%okcysg#mJ zO51WQ444?N*PYy1Sfos<*bV|;`?q_8QCb%6wsf3*u7TixdhlRAAGf*;+_Kb7K|PZ< zG%KpCilcnL%lG?SDnTd_N;JI5iXyKnY>{}s9gaue`^mpF*gCu2B2hqgg&dEK?^wQn zk;9YA?CEue_j%q(ZDTC*T3KNU3^Q9J(nvup0${GLuC7)$yWO@f3n_TC-(B8b$3^z? z^rdO#!R(IXj|ugehN;WzY8SV5!RxGPa%rv8mae0O**3HNqH4n4U4v2 z@8v~)adFk}ck8ganiT<%{c$F=1fY$0rY7-rUmOq#vqqzMNYWTT)-2zCx5T+j1NSA>_I)Ap`*G zx(0-_(zfF&1(Wgoop-+47HxSO^)WiQ|M254E*b@&onCMDd1C4zqg)s+G%cDK5VCD% zGrw(kS#zazf7lD$PQTYR#%wm5DB4p>C?UOmKQHTU*!}5G|Gnb}t6jQIatTmL<9Tk? zw4Yr>gjx#thKu#bpEB2{T-AiqRw#;al~-|+8;t$HfAHXeZQE5@ZIfs+A2$uRE&I(k zA17IUuvk8Pbh6vU${~$MCnpc7)D-w zkw!@&xTFYhUDt#V8}nCZuMoyzKafW4k{tk0LOVU*Xp4JF)+kPQ_XiJ$qsis!{OtPK zb#&3{+O{1~AOy#lP%R0;T5ANxawu~xN-6by-xyQ1mC)kd_ugGatE@-Clk9@~_AtK(Lf8#uy=^wKN8V1TCeJS_n)Cp%`Ohj1VHrvWKrd z{Mt9Z`tm1F~(fq6^cJSed={wLwg6aX_4nuT`}7>n1KNe1`+c4 z(#yl1hR>#t6{b5QZ@zE;T{MNT9|TN@?4+QcK%) z0E0mW098#ZwWiEQ7$Yzk8_X!PEvYn@Ev3}99RNTGA;8*b3>Xlh7Da%Rl!RIsAtl8> Y0FkZoj|*@~r~m)}07*qoM6N<$f)#1>+yDRo literal 0 HcmV?d00001 diff --git a/datasets/example_images/dredger_s_000486.png b/datasets/example_images/dredger_s_000486.png new file mode 100644 index 0000000000000000000000000000000000000000..6876e9574b3badd91de2560ee4c332fb704caa9e GIT binary patch literal 1786 zcmVZAtg&Gkt7XfA^@7v%mC5|00PFllECqW zBmn@Z7@s2Ic9bGV1b_&TG&54600>AjNq`UpKvE$9A&mwA+~7k9VGAlr(AEHX^YP7A zj%Vc-VnliWk<{%xace|SK!9L?zyM4LpgBSeqXgwdnbAh>kY+G>485WxOJ-w=wt>Bw zE40m(c$?*@D+{sf3Lq-RrCcR@|4PX!L!DDoXD{iN0;4A*RDhvvBBYyi1oq~W;$Z1} zTJbHNG9iS6BDf8dWJV(pzRem*yNUT7A%wdTiOQzZ7ch*hkb)2+yJF<1!V{qYfg9hx@xVyKZ$cpQK6U7gr@&A|hxbo!)b0bSzm6#86F|-5tX`Rot}Abkhy1 z!ShfGOpr{aiQ!R`K7D)-z)*tKq_19_J~=U~avEfI6Yhuz&;WOd-t1_#n=gNH@S(R? ztL0hL32F9lw>mp%b8%SYrVQG=IsEM7U;X~qAAS1C#hWwA6Lp-jMe9Z(nM?v87h_un zS%`_+MeXgovy0PGs_c>Uk!ef>C#s8auj-+gg^=Zid> zo__cAFJFE2*C+q34_jm+Jc9=#z=1ox|DA z{$#%%avq*Pef90zgFI`h-5Ptl*qats-8cXE^Xl^S{@x@~H=CpoqFq_D+)t&H;_mC~ z^*Z{tkE$w> z5^(nPKfXFYezrMzJI{xU*={|1yFNO7d3<$#IeUp9O_``X})!PNDjq^IF`QH7X{XCnDN|rM%2DiexOBmo$u`gH;I6p5x zUR)=GY3JbZfi;uVt@rPDJ6g>uoW4E3UM`1-joI~dw)f$~qy78ylF}qS=IoJAGmJs> zj08pRz8acp{NqrLkQJpqk~t`?E{Zfjg=Kx%q=nI^(}^)h$cOWsfHd{8yk3<{y}5ez z>}@-$3PeONS(O6pU1J+|Q6Oki6p^f%2q82`I`8}CX0tc3`Fz3B5)^%Cm(eaS&Mubg z{?$qs*w2#OT_OxQPy%FvD47Qaz%@2?BS_sypFDBoR86858Z)$}p{{IFo5e6&M6q@+ zSvSmX8;&{H7f%L7CJaJ|PWcGQF!+t=3MmA-RU1#}qGriTU12*Eg|7P4dk)B`t#p(W zBFva7&?tg8%2V4O;KZojG^z5I*SEDB?`(@qA1Breg-d5 zo=`$05=9XLq$m!FEMzAHjIkY~^@8ua-plM|cCKg6t-JCtYq}r0Th(3l)z?*}qSL== zkWB~zfdI_J!~g_BAYuXl5YhHR#KS$nL_}=1FMzj20+0|fpddzT7J&ek?kgmc;R}!u z0Ad>2BO(I9(r^d4Zxi>Y1ppqxu>=63wM#GxqChf)0T2LIAOZ;~FcDd?Obil%(6+JR zO&a1NFaQxD0x=WukQD7`f>HzqS%x8^ApjyFumTfUM8s;Ph}jM)0Emb|gzWX>@wp@r6vbdPuc=4z{I#XL;?NV|2Zj){_WO<7^Au_|PSTh&^AY=hB zF~HbnnJtm_9mg^ek%HDxL@9v;ijwA3@%D4ijT}5My}rH?qgN05M+*AzZrzV*z0FIT zxVIteeQgLsxI^xSV;GPCbk5OGM2aN|O>>?rljM#Tl7*>}mD4BpJg|26o@|ZOQ5ye_7*KstUL_{FQh=FJ~@jaKB6pIMK8nOlyxoN{@1D2L!T6$yt-Ej3> zvT+^I`F!>1!@%!-LwYuF^}whFZgEm_y$ zV$K;TaPMoccNvs;o@uwj@~$1Vda=2>mci3GY(?NspZD!M^w{yo0Wv1FR)$Ie0)&~B z;z1a8`<=m{w|sYT{`PV?r?sNK_dss+K~7idJ4d2H0;k`P+r@lA!Ox&8BSq~Orv_m( zXm2?#0kGB*ky6CW!)z51VrJ%>cW(Xl{l8h_iCX!$zx%!RUEj~;i{)CiwrhO+^r@qj zl7D0A@=y_iM(MRkZw-_e6hpI0sM8&!Fmo%-|6ZC8JDa`EdZW2?Z|zbRuPStzGtH#Cy{#x_rSi;V3`8IzX_{%R z09Y#`h7}l&a(qP+6bhBG8Zq-)Yv=CC=U+H^@ygA$jm=A6tVDXH)oN*0VSE0m`okwq zJfR0LuJ3Uau-4!FDgeiEn0Z+KNH~gmm%sQV?JW(G)W&1I?x54{4Z^50UcGT^vD+Rb zQGBn_DUZ3X8@OJ{Q)A=xos$z&`rgg2_D%(rk{|ebSbasdQ_>g{MNyU+vBp(M!=BGl z9xqMp*>UX1Y!s%(if9$KlfcghzI*)8Bhv>aFI;|qditQNcNPkP5z(8gTRA`8GcoD8 z&b`%@FpRP|$$4sIWNdk5C5mFLb#ig+PG%yTq}FDWlXcfqVyV{) zqa-bs^4~pujG2=-338qYi4pNH|f?*)41&{3tG zaxPkJEZ@9!!*Lzu*m`v=P2-JL%kw_%}ib700AA;1_hChIg7a^-5SST1QV z2yAn6&|PcAS(d7i$gf;$oP{+ywy63%TC1EtfBx*b3-$VBwN~A~Z%4H zr>Dr;T6NSIA%de_tyL0-^B98d%8X&> z(9F!?gAX2j=%JenYagEZ*UIWWt!S;W{Oe!-;??iJ`sa85Qk$Gi((Iofon2X31<3b( zr8O~I5|i=n+Jlene_U%vY-U)K)^4Rx-@9wizti&oN~L0{lvkQr>6c%A>8-cE{`t>- zb?o?YAZx6xmdn*M|2XsXsgpra0JNih0G{s&;5e@9xL(e`-MEou>pJJU*>+cC739kQ zuB^(K3+d} z?!4<2mKqykMVoN{{=H9~diI@n{`km|qc42#g+bU2`$-gMy?$?FtJUfClQc<@dV1B` zWo?mZhuNxO-%}f{Rw^Z9Y?j$5N|QiIHf(V-o?H6V_ojbUM{m9o|vW@{~wYR4%QN;y9_ zR*MGxo()^pO|t|`iyK$JyzV+WP7_j87%7W#bE&l2&1=`LjMVDsAnc{a%HYYP2ej7O zTCrA`7(}d;SQ4_zA#Es=l$26I;CY@vdfndVi_PP?)}Gz6ZXMr#{DI_2YW?%sR(1^ec!T&!A0*b(vi+meL zk!++=5HbWoQV@VJfHgB%6aaz1BLFfZiGUbTasXxqKmull4}yTmK!52!E@{_$s@HJZ&Lco{`R z1O(o*0t7bm6=h^XSas_PlyJkHtzLnBdf z>J5JOSATQ&{QT9gziqd=MV`;6)4k(kkV<$T1ONs3tN-|=F*b@LXSeKYwhjZ-Wzp`a zRJlw6Rc>9?&bwGjXX<#+cdp_H<7OJRb$h=(7#-XxAFQW~>1xq0_f55P&ZbGjOO9hH z|MOq}C12%99Cey<(9IfYBaP!EiC3!}!RbX+%_=$_36x|!FXm|@7HlhL!>))-otR?& z?)7@UT5r~?=_4dkRK=*BDH1IiQ^C8{s$dpE6?%7gw10Dv53_+v z6H}XBz_vCbFIurWPCLoNAITqh~Vb4>&5(1W=Ymg8<~i_*Ft%UT-sJ!OPCO*!A1qAwJz#iQN|db z_Ie+Uj^4ajRpo9rnJ6Yb&s{c6+L8D5vas|1sU028))!KM7m+rl6q3r_Iz(Az$|#iU zU2$No4bD0ZE(Qi*8*EkR9HF;2>__b+jpNd}%lZ4x=^ejVt&|4t#ARLFEVWfn^Vx4- zJ(Cie?cUMRKn8EO<#s2k)pp&;)OK6eC#S-idNot+q~WLmqTOl&c;W5xai}o6GSJ@-FWy2x?R>S|gy4!3;WDZuHn>i7 zxm_;5e({|y>LDxW?G>Aq_pXs70m@?e?RGI)HYgVP2EA1(%F;{;m9ICl+3EhtlON~v z>Gfpt_|wmBu4ivvzY|fk+N`Qgt_XYWXxOEl#;4VKR)B>uyY1sv=gnl^QTo%v z;nBhA{JM&Y5`t@GaU4g;&}p@lkn*6>IUO9$YZsqDx6>4eo8_{%xA%7JZM}^M(^vrb zif!n%jW)K{&>npGmw$e6e4q>a`hUNU-d~Ab9YqPNV68tpAN}UHs;oE4Usg9~~dsw_(5D9%J-)>kX0hmSrIGE}BY8rgc2S6XXpE+&)Nd>NTTm_Fd_5OZ?00j?5C4Nyu7-MnoWvZgVWQhs%g;c zee&p$cXgxLxO4BGcFqQ_j4{@cREF6y?{?;La(g+M*0qh3xY=q%vAVgv+L>DRn$c)) z+fD4Kk7q+aXf14+mzDOErOBBmnZ*gRTP>&a`R$LMeE!Whzc!{0-V%qhEZ&XZRlB0! zYRqPf->~>{v7y>wNifZeeEs84NmMD>lqa+I6 z-hc4PHlGPfHoJ`tez-qu8dLr-(ONt2lv0QLqnqVc-aV1KqF`bmmcaM#p5DKA>Z~)` zc?Ui?<8-a7+u8j3`gXQl-Aw0YrStWoEUPDf_7~4yyjqr*xvq6xF%vU5AAm$>uveA* z)r$+OP%;Ms4&FK+2tZ0n#8D)Iq!2{RDv6^kjSu&F+oD*nH>-7FS&YX&$gFdDJ)2FZ z)9I`%cLF5_Or%mu#Yiu{dp6FJC{1MujD&)u6ey)6BJY_52_XPL2tm|D3e4=hN9L-m z7OU<1tJ!$`rdZsXZJr?SAMQ0vcYqXxAm;GF z12$4h0Px<25VY2y0T85+N=m6vMR7|;eHQ`;?+(59L{uANyc1GHDncUfp|0!Qu9R8U z48b4)5lJbXb4tZR5E1@?;e!thf0zeDZ~!2r5RyLxwARKLU@swHfM9FyxzgVI5J@R_ zyAr@hu~dqj^Uiti0}=v*cP?-M21Ihsdv8bx5P}3sp(IfVA%Igc1rR;}vUb)nAv1a# docBV&?*VDi_v8ls{|Ep8002ovPDHLkV1lRfs000p%1O`Cx zV`u_ptudB}08k<-V+qI@Lj=fBAoG~;kXfSKYD*RfxDbLdV2l9(W9%?{{v>@-MMMAs1dhNMLqq@|AOau&1Og(w z=tCj~96>Pr(ZCpEi4Xu0?ZsXY@hLSt)j&kSI5I*600bfe#0xY;A|eC}mWVD$1QHC- z2eqUggpma>+9G2dKr3Ux>QX*A8hD=5s5iqT0Rj5yRRBOsMV36*B|wX`C`-?AIY%I? zmFA3FW`)-3+plhAY0+#i_f~sKs@77c+wJ;{GlZi&t(bhgegE*AyRY7Sb?v!pUXlpc z1tEmX?>#yg&89ED^dc&dTHo2a7r5^A&CN6JhLinkyf?_t9{E zA{?h)Uuw6PH#e`o{)gAg@p)}J*&I&KzWnO*&pzAg_gl-|rAEtZb%JhZIyoy)Pkhl> zULnr5?%tlxr~O{*-VZzX@Bi@1tG{h`mc;SVfg52<^tL&psT^r<+&S z&z{Ua`uM}c;|Fow7tN@adYrrF>I)l_@yp-ex$6X>@Pf1P>DKnQmxIRM*4=MMN4sT_ z87z$FJlZ9(z6)YsGX~zw>Y}yEnJ+kF#L4ML(YBlTq?#hZ&!Q95Is@9#Z4Iy%S}8RJ-1DE!!)-?@9|^Dml766|bWm9lei8aC?dUMLpTgtCH& z)tb+4Y}nWvNnPvp-hc1i>61a=`HmkXz13zE_}P4{R9aP1R$41Xl1N$2$f9r}-V-Q> zr>)7kN5=YYFrQJ8*;*^UhS3UlC+Y6U1_zsD=a8^kQ#u*i`JiA=bXw#T9&2jIcu#B zV|Jd7%DfcL^8jpnd;7irzV8qd#DVkWUc7PTQga!FLONW+PwyOY6+VJa1Nky|$K`p= zL-55TVmVQzio7gB-?4zh@wD6RHX2Q`wzqQi{QP`xcaL#*KAwc!;fO-Gi?SGw##gSr zbnS({$g2vL6KXAS(cMa%{heLAkck5w-)Xx3@jP9=o}8Tyjt=)5^(0HPxBm6kVv%=R z?Hf04UcP+!lTSYzjYj?c>U2Djc`lW5y`Ye#BXKs#lO_`gXtZ^m&;dW;wa{-J&CZ8b zJu)&Pp@2J#?e0F@xp!}U^UA@&!8`A~GZ+jQu-9Ju-J5Uztttw^x#KuOh;FY}6uGe& zL{S_EuH$9%sjy^?)|QL~W0_$b1Vhj93}eD?R%U)Wo{iJ==pTPyzqDS9Yk8h`yItQ4 z27|%={z051fiax(D2m$ccABODoC~MXh#PUe(qmCol{PAh*y`%av`WV|GeR1rKmiAb z$znQ&uu(fd9||UdAoP8|-|x3uovN&Mc6K&4Hp-&NvK$dRoeluFj^9XHy?%!YdNMi^ zQY!Ai>RR*C+H$oe-L^=3VZ!lV`tPKT%Cnd=4ZW+%^cYblCq#+dnR=6Sy7dA(kD zHczG0!{M;qZm+GaIl@QclgZEz93&9ZFw4MC`0nBNkB{eBm6NqxIW8hiX;kv%VfHQ})VD*_5#{;FL=lZVecDvpF%1V+foj!Sz&Sq#K z2!n@vyI`!qtkY3KDShz~`JsFLm&*u@3>c~B(|k0_(s@x9hOFhx$+O&J%=H|{VO3cn z;$Seiefu`&qSfvI(PT8Lic(5txp2__=H-ozt5-#(%*BIZt**z8Hp*DT1J(?JMmsDE zoiFqvlWAI3+5nbJnAt3qQo63IwFZKs$d3ny4AEg+sY)ATEY#|L9EX-rXbVKd7&3Ig z`l`weQE4=h_8bw_{kTyx2C}RS5|K>`Wz&_F{_uQUmSt5{1jHBv1kRCj=DLEc6&#Yt zE3)j-&Y_S}X>B~uxF`Fw zQc@{XMCrN=0lDLHj)HT~bL&wUyF3p3W>Q~@>w>v1lI6xSV~c8m96195i-d&Q0s}zC z99J;1T1gou!N&8gBD0;5YjsPOk~9g2=T%`qhoRSu!(J_EC5@#hZbZrd0H5$plO(+f QuK)l507*qoM6N<$f^RpDIRF3v literal 0 HcmV?d00001 diff --git a/datasets/example_images/dumper_s_000805.png b/datasets/example_images/dumper_s_000805.png new file mode 100644 index 0000000000000000000000000000000000000000..3c4d3410f40340baf7d2542a276bcbee543237ca GIT binary patch literal 2308 zcmV+f3H$bmP)iM0h1e01yxn zK!F0f8#6O1R0zz>%!nu=f%#;1@x>Qkv~@Acv)%D%G#clFQ7_MRUOBvA$pb1-AN#ji3}s4LJ$!I4!{Z^rg>MsxIX*v<3Bw4 z@PpUxO-GO4Hd)@){`~aiaVYQYJ^UA4QfhgzaYKw`aRYZ%y|f>wJ^``7r z>p(>rm?#inU$7^1a`yaFH;m6wB&% z)Bfhk({)+)OgwvjJioqPE|+l@_omd#ZctXA95bO9ww#-dHxLFJSa({*m5 zXqx7IjVRdKj&$|#-ors#o^_!v+NQIex9z&PSQM+qv(kb>`+NI<>~B`9tBcd;r@m?o zs8pbnAjlW%ZKylVz=GnfVZ1*+7$+HE5(12p>~MN7vR(h)gXi<*<@`38><|yFEratw zq79LPz`U_84XIXMNk3|Kp2#;R>Gmv z92Ds!O%pF-qJTi_Vi@(~tVeCbE-roX$?2yw6CGrGdh4p@t;0u~X0zOG9dg6|_Nx=p z2pkwdnK-p}&4HB$520nJL#MD0E^zBeRNFPT^Lfwvy?gsFKfA`gQJl~?6Xm_}RBQ{K z#^dQ^6o%2wwFiE2eC(D50b>XPntG!hhAZcMmZ#(44kpPqjawG&c{&^qlI-;CY*j2< zxv9%mX>S(uah@Da$OLX`z#3pMt?RNV%c?BXB&iqcSP=zg20?_xXlGk%yEq+;4-ZrE{x&c9&~;f~4OBN;mC=<21mEE#*{R27$)PSwLQJB_S)0ZQ z)osJfUDvv5Exs}lINM%aUv(zQ4j!h;`?f>dZMW;){b@WH-p*!I*X>ihFVJ(oP`U=+ zW1Y5TRo8>P>GkaLfS0CjZ^;U!fTM@@00H7h%u>leG-u0r9=%~e_YHZggq;f&Ad=*0x z!->P1sbzI3%KH#s%>MHH*}a4P?|kQ5i^XnHtgo+^q?8Il3`LqK74bHJM8Ufd%7|PN zk!4>uu8JwLPK@p)Y3=P_+AV_`@9j+*Se;#Y6LVyi)!I7yaPnX{7?O4m?oZx){Sg5$ zks(s3yh0!l0gxE9NhwW%QYQ`d^zCNbbgt^!1v}Q@G-|Eabs1b|A_>7Eq1IAYb)Jv1 zH1pQP5hR9407z?1uN87&^hZM;7%41#(`rZWk7Nq4o7V( z0c6lK=O-`hci)X8B2p$oVI~not(4YEBIm3^i4-z>1W2ODvr7i?$;He%&w+X=?)1l6 zE8t*AlhGzJd9SZf3qVuX^P7_|pMKa@>ohTfU4TGJX%Jv$tu&YjQ4lfsfC3sZB6V=E z*VbXVSoJa*WaC~EX{`VlkVV)~-0u&vEKQPxnML%)#j$5A$~*1=kU&@=2nR%!M1~>) z5ivxF2#|AZJIUY9tE%>)193_uNn!!Ewqa&Xan{T7JWsP!YfYMjxvYKFI8iFiGO}H( z6{bn5wboi`1t@4t92pX3YsCO3qy~A?cD3y~VUCkXBt(%0L`9J%BMLib(W}@5g^;8X elD5vXhyMj4SD-~1waiuk00002-)|EP)ty?XT;5trpm%w4}T011Q!%d+I0CB?vmjKBnp=&GmbEWK!} zwJew;g0(Wcu0rMXaDOpu%oMcNr7~fcb7Idqjh#&M>e^Dy9C?}6(`j;dAq3YNgaQ*G zJi5BPeN`Epz^qjv^wQe0?8f2raJS#>iD{nK9LTjMX0b9R8v9Vyi1;{_ARGh7K0@md zLFzh_IhcZ5mm`6jDlkl^<1(KoI=#Mo-Nn?_W~Gg}JFlgcCDO$(j9S+i=#1uVC1hbF zcQ>tT=@73r9fDJK15Y}~8R|QHO*zNf6)=b@9j>qG1w$>_|Mus6k_vjKNy)9FSq03U% zs%D6AG2{^7W}N`st)wW9$V8=-i~Y8?Wv#_16LF+=I-iKp%>>Dsu1Mx?#BQxn9v;r^ zIL#piW{-i~U4%D77sVc5U4*HEFW?LqS_J@b_ZY-o7NwkW2TMBaLzEyQfjmcM0wjzn z#GFsd`s(I(nJXZfdXN}G9J;>i@S`@m${f&^sgI{4uap2pYb>)ZRlA%wiF1r5&Y5=pdbfR-XViyAJfB$)fI6lx|N z=Ar8Vrj!!^F3U{0%Q=j6 z2qB1YjDiU6CPD;`;I2&oz|0UJrId4r9rIb>uTWa3&Ftlc5PDDv!Gg{_i3h2fx z0N~~hPK+!BXozms)@9XJB&RGQO_hk<-BbZ!US=~Bk(84e5)mQj>b9G02oVs3c<4Jxk(e2U zG1R7rXyzOO5Sr<_u4<~POkmoyH3XNcZiWEVrR?sG(&cReM!Z|5b0FcEzPtJE{j0}) z!qD}HezzTWLpPku^ziWJ=I*2z)!dr8qN_7uInV1_w!0m;OH2s~nJ9*U2&ziRg!E4L z$K#8yuHW9j-DeKyAAIloo5Keey8}~refx5^+uyvt|LbS}ny@bghA^H z;PkF!VrF4?ZCZ(Oo~PE9ejFao=fD5+%bVNzFQ5N&wKk-FGwv$L)J&c4Zr{{puIoH> zNdz4V=`5@Lt|MvQGO#vue^#${b>HD&(d<^6wNw{E<7vj66( zh7&*wA%w85tE#0TyMr*vI-gAy!Ixz^J~U=zMg!ROff15$ zWcl#v{`uw3O@9A}e>{J0_p={8`{c#V%Wtn!8gA~&)qdNBn7dxh%ssly%K{KmN{B9? zEyoaJEb~&=hEc|$H#cGNX6QKPT~#ji&9vM;J@`H?hghCp$Rm03lOKP0oTg#is| zUjO3fA8z|_`{t$_HXNcN0T9Fl04cFZP_r&)327^t+@b5Y%<(uK3vlQja}4`OJvgc= z=fu*pQ+oR%j+?LV)~Ra0zx?jyH*fBad3y;?j+j#-NL|;(l-xOsY%Vt9VW(y(C6H_! z(wLTcZZ)7XP^h(*si%0P)?p2Vq2d2P0xL2XJ?HD`w490n{C}fdhcsyK6R607pbX zR#k8pmM&sML~tNR00egcM{q=NaCbLFcSj@yCq#E*0z@=Z2PY&4Fx7W_cK`%p2QVNI zbOr}RGP)iID;c5+EnA1v|2hz_JkmMiRq2{~^O(Wf5eh zLXu_iDcPo|8rLE4~kZ_{-l27E)+n1c_3B1l9@z zC6x+5De3D^fAeoI{&2s}OI2X^>eVZ6y=l|(;J+UDqDB;f3FcX*lnOx9d70TVUS4W; z7v{(3R%A*d0000pA`8I?LLf*4rL>VO1P}m#L?JCxZ9REMmY5KfKtds;Qc`NALg7&W zDS-ge+EPeH1O?6_Di%n<%)rbFB>@t907NG8maGFy+N1UCz4f*L=K+8jn7k)TWWCpt zBm)2-19>hzl@3Z}q+mo3jK}~UnWRAW-Vvi|Fi3%n%*cR%0KnkUS6DlN=)fbA5J*q} zDwK+$Vsrqiwl>SBxyzI*OC3dofXM7C2_(GD=7~gxI`kkS8M1Q%1PdXw1fapu5sV9D z$6%c+q?36uA}WbxS!S$d?JP0n#VoHgH5m@ND=VZ8D($OumW+#Ro+gt{t9e3R=td`O zWohc2X9ScInTJP5_I$e7UaM`I<>Sg?nhtB*kPci~i9jpUG;vdF41`utrc}nx`!E(| zIzIXG;n~SC`MT9=hV8|b&8^{ZKF=l|$&>YDBU`B|w{_{ftLj-@4G}d^?JRmC4;Foe z7Qv}TgvtsvJbql&6K=HIz25NPPs!OqH65bhc|8wX?Jy9-;i#-qq1cJq$=ds}E_0`V zB(k(%N|d0rMih*|D1>0iu9zo^SP-e_Pd*9G7FDZ@I;e&do28+IMk62uaz&NRBv~y1 zB#RIvXp95vtwV(kK{*K{K%oi{9e7X7UIGVNl^odG#$l+c$)K3eWE86)QVxYDWRQe$ zGb*b2ba2`r(F%++HG6gdLdwnUojj}alQUoD0)T7lDpH`qJF=Cl>Yx!{y>)jm7^UNp zqPa)ys@b&GnbA^8QByUav57-ywrxIFsVheFNEn|bj?szc!lkPs=h-|l4iGeWZ!1@_ z_uhG`1+sP2T(|^buP8|grAf1pziZKMCTsAy|VZMENcjww}CkVUU#i+Be zw6=NPADwKiv9GfE)Ru*{ZdO#ud^(v7$D<3-0q90=Z9Qss6+o?o^rB`2>yZVrW>zQy zM6hcsy&u2-_iw%Swh3Fchq9`(yoj5zS8y`x=b5MhRHv(%TO@bmhWGZ_*H4bmMq^J2!cjX+sZRxXgE+_I@L6~6>V=`{mBph=DTlx#}{xqoSw|mle4p8K957w9}E&9 zi_)GSpS9buVxAmb2!ZPxmzu3`l4PlGS_piRDey=&Pz?+SfXIkSgq`Tx=Cz;w=|Q6@Bj4=UcLE>E3LCUNs^<(qe-3`DdWIQ`scUz@0{gTe>TlV zgMN{<+g4VPGJb zO_CQc4tm{Y5J>Opm8BK~SS}G*Naw&C4Xhvz6@vh+y%We{Yh!iq<_)#GyR*N)ABJI? zW=PJLb!ojv$$(vok*Us$4?p0fcrg z3>XCi)kSXa|Nh=Wvo|^Jk6ygk-+y^yb6tJ#Pyg`Wzx(%OmN?``kN$A)qhG&#b9?Xl zRvgGc>Ha90oV|GaTi<)-&fepPyN8Fz*G@M8NvRO9A(a*=v{nM84C3?CV{hSafAZ5W zKYyHzFB*}Ino1?3lWrub5+s4L>B*Omw-=0C?PNJsMxCFYRrzeG9ke64u(*6NI+|rQ zGb4K=ORbewnv6!Fu+cd>9S9xX+P|ZWIXQn?A!p@Gm5z4zZqL)C^feF$N)|PL@zsIR zLWSnp;mI^FGwbu()sANKVmi&0R78k?LLeeaDH(+ftCQ0Ur9(%oOq7=Lt7rYmxKK|I z2O+}2;q!TsAw##_Sz7D`S{bc^D4fjG^Q8XzWVClNEs8>+c<}Lqub;i}UWy=I+t`p& z0)P}EYJ`s;e~u`!*<*KWH2V7WoyDakMEU;15(tt)fRGHp z#K2w(bhTp!KtvM=B^anG9208AN=Zbt);do$q9>ZqQ|EjX7~vg&7+J4Q`UlQ?sbpDI zz}|b00)Zh(rVO6Q7kR?kUakUr|)_qV@y$$H&%*-r}4BM+x8l_Am zg>#MpL>wlesk?0obrnU^Dj%h8(a6S9PifsWQ4Dz1M8u-g>0Das7=@(~{|od}$w{DS RNi+Ze002ovPDHLkV1g~pvjzYF literal 0 HcmV?d00001 diff --git a/datasets/example_images/fallow_deer_s_000351.png b/datasets/example_images/fallow_deer_s_000351.png new file mode 100644 index 0000000000000000000000000000000000000000..8583e73a7fd0ea7a6aa11b01bc382b63bbea075e GIT binary patch literal 2269 zcmV<32qO21P)o6c}U5IUdIYg6=1g z3^g0exL)?P!O-?6t5!->fsx&DcH=aal*Tw4pei^YXHqV?#yHJWE=74vX&R=-hsUl7 zL}BSW_2I?&<>}c;jCr2ZI2`-ngLgwww9VB|Z!E#)^lTB%&f)Ur^X)iKhvTstL&$B@ zsThL>l^P~vk^k`TKQP+!)6-#ySGQM&Y}dETWqZBdt`^H-n5OZ#X#BbjXU|?`Rg)$k z+_*oE8ijC}X7Ank#i>>WrIb>Pkx1uQ=h>8^{LQP^Rj{f+!>=6Avq(|M1~SCMRANHn zSmf%{4L2vt)!IAPhUPHqFwL{%7^gJP-ZQ8H0b>Aj1wk_-Yt<_J;*0YX1;D6DhN>!+ zjI~x{imhUT69()2?vWoqeVlfW(>M_-aI;y2IkGsxQ{LJdBnbhmMVqM8WEedgdG&HXyZ9nqhHqhTIK@>uEC7HU0U@7&uNQ ziV1;%$u@2rrZP=vh!Be;RC{~370D?VBwDqW8ORa=qqSU&DK+O@!8CI=|NW;ohr_W8 zo#XKF<9!?W7r%UgPJs+Ns|qUA+C|REp#wA%%%v1nOGwrh_I{GYH5E&sDqt#sup?0O z^Uoh{Za#a0W!JV14dYyE`S|I6eb#>S%`=491t+l(R%_kikf!MXKx?w|6ah>V8YB>e zsU&vRGUG5F(wwX%sL+skezv+;b**>IJS91fH{>WQ{_x{ZeP@32t5*xg$yD-fDk-vb z7oFDHv$Zy;mT4ZUh@#DCSxPk3^xkSkU?fXy@T^;ag4QCnL}Tu@yGhu0NtrR3K62Ax7`qr~55}uKI;@Zb~CMn@R=&MvZ0gjeYSt+#P+m?%4NI%H_#& z6|hP9(}$~Eif@{)F4x~)Ho4RR0Te}ru`XN3-ZiSQ-w#YSB~f#j>j7L9)n(}WracTb zNo_m(-FJVr-Rt$u{mbXi|L%9c{d{%x_Wk{jKff#14hlbgx_(O z&Ht`8Cl_D5h;x0v&Dn7obLTiXGo_l1GC5x~H(irbp65f?HAKN8tID_=Z;xY6x>_!$ zG2ec^o5%gK<^TH6KVE(KusGca76TG2pM=J;Jw`ap#)FrN<2*(MCC_Lli`sW#91FRy zXqTpH7pU)GN~MVOodbYzJj6Wus{8Br!#EhNU9+*qJYTh6oc5cZwJPONqYiV;WW5(O zJWr9wI19kIzh@*hx<5X4t#xg;-G45zT%28wMJ4IRw5_M@{%*bE%ZrPj-(KT1a2{Ts zguo>aJ0xnTZ>h&VB&o+z0#_Ftw_wMTib%g`d~4?U7(76Yj=f2_*1Zo7J-WtNW42H8 z>Dltx<;&f6j^h;5j*Ku!DYmTixsDaBaIqFbFTf(!khZx3MY!o#{jb+iUQmSR8&3{UkyzA;=) zHI<0iYcgCIu z`v>x&%oB4s%+*k{T&`1*(~}jFC+nZ~hcRX(G~^g@=&V_+d`%X5OBcO&=t)bFljRZ& z-re61X;?0or>E!Bu>bn2XQwCYL&@!8b#wjs-R&XQT8R@<6e8tXjAGMtOlU<7V{H!m z?XshjO=nG6^*xhSF>y}A7^itM08QH>vly%p3_7G_!R&UsT3s%+D1f?glo}H=xZshs zU-hCi97kr;EW)Xg>@8U>TCxJ|4R4qi%_kFE(oF{Gq_$G=V*%GJsZ?DDykZ8GC rvvVT#ZhIh3ixmLN=k1eq%tZeW{O*#Tw2ii300000NkvXXu0mjfofCXk literal 0 HcmV?d00001 diff --git a/datasets/example_images/fallow_deer_s_001133.png b/datasets/example_images/fallow_deer_s_001133.png new file mode 100644 index 0000000000000000000000000000000000000000..3e8fd596951bf51bec8c6ee5d37b9e5d9b775544 GIT binary patch literal 2197 zcmV;G2x|9dNI7yoY~NP^@k2xJ>jXi_pMie$6d-FG_o zoU@0jJV>?!xSrP2`tU8F@bb}RJM+_2Qq00Uj$_kSIqSY32(WHKE*XGQN&pIoIp!3z znF64ynVG7>U(U=B5fK3q2@z51Ajb#IIF0RYHuKe-zgCxW<~)xHu_@J705%nXT3DdUv4+dUwNBLqW( zoH7vsn5iP--|GLWq6P>+Af|SE*8?E{*t<>FR8FWAV@7sjW`d;3A`~j;nR>HnOn+Mi z5zOFwi~l#&4@p5jesrF48B$rTcb#{6Oz*b4m`e~wLIWUxtdL6~LSt415dpxGGk~hv zU%K({G{DT{lSlWE`R(oc71^gB+%KkIK7TFD=ZEv%Fuc7R1RF7N#ji7OtJ-luOR@b}?(} zu5n!*#wqQF;qCi%KTP{^in$*g5rWB``hjALFq+sKia>_ZYgt}5KP3_w{ zEax4eRl%2H#MHITqX%bk90-*Fgro`{0i5$Q)s&(LDdOQQOj8=7j45w-1ZD-ye)vI? zQWOA;NWs+9R{;t7?cO;-w5G1ewD!JfLfh0`+f?3%s%D{~-Gr?{AiOj$zR2$P5uwcQZ+x4YebT69h8{l(GpVAhUtn$2e@ zzQky1W{Os-sxEPCycb3xQsy9p=IW|6b#B{$I01n3B9%BK5Q5dFfA-C*#bWm1rBWVWdJ!BNvWY6E#ko=DJ&XLbZ$hyhZZ5NR3)Kq9mD=;&u({OV@+?=N0{^X%*A z$BX$%*Lop>=FPV+(KQbqeemq*|8f}?^ZNdAcdvwEFpTAPyf~Kk1kh38( zNpQ|HlIg|ePc9yPbbNApdU}$Iy}Ml()m$_cNZ=069vAQ=)ycAJy<=iwS8~Z|A^^`! zXar!Yib#wgXr`KTGBW}If`haBzUi9T!in_Tm1p(NZHD+C|MSN^cjpf-uHL>mzW-2W zCn&*?rc`oKCi337-~dU$6v0f@tZ2>%R#XvTet33pbaHrlzVCOhU%lve>uGlb9ld%3 zudZ*megBW2es+3tyzBRGR>QCU`4?TeTVK6o@I^Him^5dllVvBtdjqtT5QvC{kX^WV z{LzP>KRLd4(Jq$L6o3AU&yNn5*H^DnF5Auap5|YE_VJTn{?pC-)nc*K)y@0st39V2 zixyRY?GVxMWL{T6q?Ce4DaI+rao9`S{Pc?_pa1%|v&GUBSU800$#4E8PEW>Rzu)i2 z?cKO~7pgE{9=y4_UC!&qXu5d;o|a9R#u7!&j}PWeaBM@&5@Td>Bn*h+dn(lYo5+jmH?S+X_Rw$}d&rLPNxLL1-iHTIpxZ4!Nk|SzHQ1)Repho?A^XkPj zwY1r#*Vn7IZ5GSf!+S@dF!Tf0O>H*q2O*+VavUdO?)T~4YE#|&?(*ghaZ7}U%iQ<7 z-LRjgscD*{!y~mk48!fs_0vCm`MZDn9R&IDi(^SkTU-#y=^lwM!Gi>Z9_v(MV58OQCnFTeS(|NPglzWI7A z7D^H3Pe1-qaLzf8zGT&$;;`N4QbN;6Bs8)_a(Q{?JT8xyx7&3hcky8S(^vof-FIKl zy5;T7`=)Kj4<4=V-aq@((^r4~>g}5sv6TB~%W;gAlO}j(DN|%ZGt;a&MgSycnHUGn z1?=8w=bAu-jP%KeA3;g|v}u>i3~+G&khE<2yEFXB%jaM9yQ|rvogdB@vu55_74y`O zU?oqJL+0Rxv1{8jrZFWU$1o-KkTf$mAq1s)cd#D^mEyc-VFioRaJ{)Z-^VHGnANfT z^zprU*Pz-k#5hIBkqnHGO4$!Fm%=P)M*TE4O&|{}IE|BOL4q%T|A)KPjbkX9cf)jZ zy($^Dn{6DoDVL`0E*{)t$2rCv^E8YlCIBF2LsTtJ7|hZ%GJp(2Up0T#&@o+>yUG@VknYJN&68*ysyUnKBw}3 XTkJ}a%MYEO00000NkvXXu0mjf$kt3^ literal 0 HcmV?d00001 diff --git a/datasets/example_images/fallow_deer_s_001785.png b/datasets/example_images/fallow_deer_s_001785.png new file mode 100644 index 0000000000000000000000000000000000000000..9abf685b9fd8f10accf33903d81f61ec4ffa9eea GIT binary patch literal 2182 zcmV;12zmF3P)O~!$)!vlr`ZmSTdHG-wR-YCr8qh| ztwn*c_PSk_qFT@xKkr=aXXl8FCZnwqRSP?<>Xs2~V>LPAh6)Djgh0Iw;uT=X=-hP! ztT=j?xMeYu@rDt@I>`X<>2glBY9AJ@I}vydy0tP&6O>?mV?t57VJe*Es=c1kmc-j5 z&bwOz+}ha@u#hnpV5B}QA!*hCtP!*xV7ztKy6g255m;e3Uy?OY`8w0?8uKM?yAS}e z^kQK~=*H5g_uKh$`Q6`tJ)NG8`4=^?rvQaiRJLv_Isb2Yl# z!&>sD#L(!ywO$!xlOc_IBku(<=1mBsvsQFG##Z{P{TF}z#p^e3e=5f`-R!UFk%(Fw zNJEp`u2~z++^t2#)>;`|QcARySZmFLMPcJ^VHgJ0%~s;!;XuxE*XPTHAiQ|~FgY93 zhvP^5tNp#vpfu!B^R_vUKuUq)A=)8JZ7q?Z36aiQ%B8J`)fhTnb8RweD@npUX$nRY z8WoFjsMmG9y}S2j>Lo%m20W`;B=_8ElQ20H76qtX-n#VR0~tq1dZ%O>&C2M`6X^LG zyKxjkxLhw!$Fp~b)!*MuFSPP#{9$@^IIK6f2PDJXolzRtkSnQ`%&1Xhlrp$UP363u z$+^|ISp($s3COg_C98P>?@0~!>9=Fazkd78A0O7+H~;zH^Rh?q&mInW90gA|2NPo= zCrCp8L?orsE|o=O(TP|_1YNM0(Wbk^qjwV`*aiA3{GPLC<@z|! z-H+e={vU6jJ-^?-{O*T8efh=f_dk6?TjDlSlGV;m!BP z_AXJGeoD3WsEKxm6=b?>SwveiBaW1l(V$_}mX~<~W|o2k6o@u%zguJ6QjDd=I?9lcH__xs&w(aj)(EJ;2!Ra1P8NtfQK(&VmE52U z)XY@s_2w=pg{5Res1cd#7O!PHf$@iV2er@hbhEpA35WlC_g~vqj!)YZhQa|*_H)QQ z3Re*YO8Q_>TEx~$#@Nv-K0J8qK!(w>mmbtGyRhH+nMHsI4A&td^NWYq|MJiO`iHN- zX$iOU$QzEXjWNBqZHwHIy8@+1dmhFZ>jN6Y>tfN`j;NiP)W9XIlXp8GxQr>R1_Soh zz)rUXCEVUWeEaPm_w(~nDPO5X17el30OW00gK-8!m_-D1wV`QDlVjX2X~@O6()r@U z2mzf9V@S=TT=lDmzYBEWnrptkd;js-vzK3e^#??<2Au_K(2`!JU`f`UwpJlD<1*Ra zYnuWZVk$+*HP(55SjLEPYrp*R?!}9hI}cIo{^sZZ`61v?QhSMGOn# zrd+q&PsXD&F4wf10l^ut(>h=P3PVn4tad?K=^`>ORw*=O)|KP-EB9hZ_m+6{QVPJe zw_|OX`)H$2^W@B`B!IF*7zt4kC;;l$OYI#{YdyM%YN*??24nH%s|OH&IgWhEKmX-_ zfh#c7oJzW$j!!7EUzTZJ@)*~Wj^|_VD!9GY1`H^|)|3KUd(YX%BqSo!WK!BJ`N3*g zLMd$Rda6}Vd!u|<5QK8hA3ywTrv0euc7GQ~Vo+KWA|hx2)kO{Itv3Yi%q}L0+YryV zZPkd@DwM`s&etfY0%`gD_5I!5A)b;bqX=8ApWeN_KEBnob-Zl#3}B?2-+sN;T1%7d zDt*&l?-iSL>3jc~L6_5@{W2RsBuA)Fxe`gcpj32T2M;Ayh1NP!(rP*#@r3T|oKxqv z<&?Je_Q&7)7oWd>JX+7aY=@hhckkcdK6|##w+aNbqG_7A$rw;_Yn1^(nT<>-Em|dJ z6~N(wAyVKb25qJ1G=PU8tr)t* z&NxOPnYMHh1<>k;ZGI zhS2enxXU^@ZESBzM9$~U8l%y6uB4^)B#7rLuht?~BvrDUPFwO_l=|tXx4pBSf<}7( z%ZL5VWUQ;12g}Egk6-=G=Y)!SYom#1rH%C3l%OTASXr8c%e>AdZ8^4SHz{g97*uY* z`pkKBp2A*1alvkzUgm?S(fiPd``t5V)@4~p$w1V-8k!v0I!5;4pjd=Q>l#6`fSXBs z7O2)tz2?#zd4+8t^!ryf_}QX4o{{hNj3vhkZ2h#GLL_Ye4;-GY)68^?T>t<807*qo IM6N<$g4m*60RR91 literal 0 HcmV?d00001 diff --git a/datasets/example_images/fawn_s_001418.png b/datasets/example_images/fawn_s_001418.png new file mode 100644 index 0000000000000000000000000000000000000000..e004bc8f5518985b4133ad1bd9112cde49a4de75 GIT binary patch literal 2256 zcmV;>2ru`EP)B9&dgp&ilSvFX<|2Ni?l$26e#kag8s^)-)aX)ni{HXiJ~Nu%iW#Z zdp~_1{L`QQBf9e8(}&1zsD|g$Qg6Gavg7l(x>(Xenup!Ze4HNce`bl^t!`cKzW#+< z=cg|ZAqD`DQl^wmQ#ZCgJ{^g$9k$lW=`u>UBu>2R`li1f&r`pv+pCQ#N->fWejP*1 zm8|EyAfT-(?^dHtRW&8$s;YCzrIehrAi3LK&zFleHs@?~ZLJC}NU79$d?E#up=z|T zZQE?Zny@q7&;oSlV|aM`cs`%W7!;I(H_B+CoDe7_35}3*&c<2+IM2(?_4Q>sOQnJj zEKy1+`t3&71|{K2h-P`3RaITTx*-7}#HOiY{ur{en~jhtgk*%2V$QiA4ubT2IU^Dg z&C_JGQQFwb#uTN&Vc5y8Yk+ELRb-g_MU*J1Y`2|45&Xg_w9RmHw+Fe%x&jt)y%K@| zGc&%v{RO^C4e|>OxLo&FQrjPY{4s>p`GttBRZ>dk z6kW8kmMlX_Tu5lGD`}N7DQ3cyAOf-`i-gst>h?pDc>HvXu?U4ys;1SI*3ITnC|e<+ z-!IErI9p?4O2{J3vANyQ)$Uh9O4|&&X_~GU3Q)4CjcqGoRDqJRSCv#&RFyGG&8MX} z)F>b&6lB^~B3G5!^n+Fkf$Fw9+`X}NZ%NgrmEXPjcOR!=b9Z<9^~W!NIlkN*E$gO6 z66>-6ft29aayh>={h)2#9Xh0qB|W}8genz|TEXA_^40zEVzfdb>w1$>DqS1Qm9_*? z*0pl+lUqlvE8Eznu~l73DeI=uT6^!2m^sbsQf->AzkRd4+Z74het2_pP!MXZ{`iMK z{QAx7y4m!@Mk(|1@Cnlj%*a`OeEBabrt1#J`7Lm1?1s4jkk%puE|~yP2x_~gFtfX0 zNHyqw*EhTC{pQLDcCz-#cFq3x|NJlS?%y@-&Zkh^Or$R7lN`gB{qURJ@H?M=JdGa! zIp$Kh08=i73G24YF-RtgC9MlBuCI5dCX2F^6myWZ%|LCl|NYI)lH)S2ebxj(@oXg^@EAgFKHStA^Ek$C5FUhe{~br zl$JD4qt<$RIDjNkODR>kEce2^l;;{Lmdu=z5=~6r`I)E=X~J=qPv`cqS1eFg52ydt zRfnd@vdXAlULL(0_ru+^E=rrKGC3!q<;~rl>04P_ODVHppHEaU#)M3Lf4!8c-X6N< z`ttbs^5MUw(VD0PA5j`x3tMMhCxOd+oYTCz`TYK`(WeZdZtLlCY^tFex}`+v)=)?z zFrz4iA;22O_sb8qKK$zDUx&JfPqSYWwn``9fqA)Cm5Dhs3KWuz$-r}5SrH;<@+?3r z8)9-P+IFK1Mi&E=>|B;nYSeScWQ_pTZT~*39_ZdG8pHF&zc&Qd7-(7uOJb3n7h%1oc?M8D6C1ru+eN@J@O)H67;IUTQn8eTbe>ST7>Z^ZP z(hRYT<7X%ljo#jV{aQELh=+G?-SXu9GG{01S}G_Rg(Pi7MOFd0P}1t_R#i>QLKTS8 zXBK92_xhifDH`(_Qwr-EbKdvgnQ}<(kulrq)$3QcQ#_7wUFUht$)~hYL^1)h&p}j( zAURpg0Z~Y*ciVmni-#nab=q&gb!)rnOT_SWdT`}&zdNjR**6EX{e}c+(w`q5qq;1- z`W)66nK`EjoO47|3zC>!Ax)N)5SzB$W>tttgOWKWKV44xKGW`}&)-kW@wdPHBj#EG z^z9}WczFEF1rsMH2VFogk&B+BC?N?8h*U}`AtN#gB*KDVsu~0_E9XyAAkE9eh|lF% z57kih!{g~EE4DY=Z(lAx#r*Tb4=TH^IW+Pc^p*$%r;-a7QHmmf5=TrFE=w|4)r}f^KnSr#~XMkM13!}?3pg!#WvEF93JO2p~y zr~mxzn}55$`t`@@rzjjqnYq-u%A8Xv%%X2^LDu6kS1PV)&T%qA%5_~fhwGqHX7AEe zvhZmsKx6p7@c3Das^q-sw$g}rdi?bH``!!~B3L4%!km!{Gb5sqo9dS270jn4Euq8` zqcAAD&CTn(-@HG54=N`qQY;05Gb+uhS}*6%B#NyYZB$i{K0P0&7f2q2V$2|MURUNE zb1=GfX-XkHJv|pabsJOAyLFW=y65AAo2K*ghpd%0yJ361%&TTm!UZr>rI}@0y`J3p zc)Y)sT~oE59b!fnIcDUfC35yB|8brDGA~##RJE2sIS~L~&R@3Gce5nd{;}cZ}x3_r)8^@06vq90Cr)r;Jw55I}Nc ztH{}3JV}B`LXafL1OiZShr3H60C#uD>5?S5BYV8ROGBpWlnpG3ieLa_5*QATsf@4^ zB_|DlBuFLzfQ$n3@&dZtB{z3>xm*-tSq3>s8G?HeC|L+}$P=7Gu);zR1Z4d9sKa7( zSoi9X!z4K977*o5A;yxBfq)DEj27y??-th78Z_9+o+FH~?qGD>Kyr7{efguiB%=VA zySr0P3bE=HfD;5y&(02l90rn*vb!LQqjBu_nUc5JvRiiR6}+@L_}S+5lEwY+f2igUcGeopjhEX+ zWs4-qXEQbQMM_nV3}w#C_EmNYG!nT}BG#=zzS^&PD`nlz<7{`mzgTIHob8n>Z%oeg zSNnYY@UE2)N4>YIFlj@7Iv>na4-gb$3}(#koi?6bzSP6B=j@R&5)3gU_l4v1aPQ#x zqr=_%Nh((RfPp8<`N{5g=fh~@wf;&uTp&b`BIWArB`08#O93!<2k7pS$Fc$v6GD=+ zSn2OvPxHfg^x4Ua|GaoMbyVuDr_*+__5S$cN2wgwh-IO|GlrOz_ z(j4wd5`>%!_vo&aJYL=z8Qp>)Z&L5sK{{%}i<4em*PGiHKK{+en;Ta`Z@^G4+C*BQ zNPq%@$>h1sxppiSz( zKZu1{T#@A2iV&*cgd-wlgUp?6in9ey8q|&2lw^<)Whz26i?gKpqCIZxY0$W*06U%? z9oNSj7q9dO>oXhX#;t*w8NtlJ=&r=nCU27*2wC!w0Sj$3OEkG65Q``$zqr>bq`lc3vh&-Nr;4mxH*Y= zfvT_vclZAKmp`5C{ri&}H-7%k`&5xKl1qTY1T2zi=~8(P*_mh=@XVZ$b0%XE*(=%9 zN=Y%AA;-NLlu%~ObG5oUzWvp$UVpTG`R%eQvx6i6c_wL*l1K?|F+_yIm0fN?VG`VB zCI*kCF<3D4Vu-`243-5gEofCy zv53htK)_tet`K1{xO<&FPD&GbH}-Z%F`H3$gVeJ~lHKYQ-YpS&?x9m?TA3rI4DJEa&$J*1?vR8??$XJ@rv zwoN;m&;EJ)R!mJ7!v8@tNUZ&?}5`SC*nhogU9`e*XEl-#(5-sOvdt z?)miesIC)L)r)$vzP^5D?fCEnhGb8DJO2Rl%E~ZU!TKcC(`HVK#sx%c9)cz8GT_LJYwi{`!Tj@n{IJZQGPmRjrr>cO@4f&)rf04~~>` zc2@`pfgq&(?AcyX#v;TrgrX>mqBOG@qZzwQ6@s;GcK6OG0CSd{0utP4LCXQolbKwZ zG|&M-kPxF;AO*SrbgFnaDFA?GAZ=+wIN%W7A;L)tCIO@|3Jy8oMwk&GX@D!cfIyOz zCmN!0IrxDtDoWmQ-32~$%a~3AU6?aQ!sJc@-6w=mMgzMDL_r`rCal8<09{V)ni_yf zkZ9c<|Caz`Nv*g)(O~&K>%h1m*Mmkd@Z*z m*=+)H>3{cE;nm@Qr2hid!zqd93-t#80000K(Ps~?eR2;_>}9}gHEd5+Y)vYLz(5255P%sNfEj?E zis7k%d3W*v;5oAV^qBxyEX;KU03!wKY+gTjxbyy%n@xD;_`8G(@jcj*H%01IJEXLc5iHd$oav2LePWI#rjKfbdzwgf=L z%ml={?#x6S7#I;4(SvLl3lF2Iom8%>y(1(hW)I*Q0{;HpfBogk{fb#jfdbjYN}-Vw zLw)2x=ba1RK5}SAGVgBL-TtSIwbuIpjisXUWmB`jyeJaDM8JvDMO)>uqj6r8^>}AmOls>fAf=sF zU?2NHc^+hR>cpW~3*sOU@bb0g53YSN4ZJfhK%vtKq$vv}oaeG6qT@$rk1x(ki87WJ zxN^3tL(@=Q`Nt2}jfi!slXgnr=Cf9{zTtemxOnjVcfK(34!a>%DD4V~K-U{&=YwjrYy%>-A^TZgwzBqjKuJ zTV3B+Jh=bupPV~!Y({MlNAJG(x3t^+{wv2iT_wtKFBTTcTqH%Hx}4-=7i&p|8w9De zcftFLK(PZE{OIw+@%mcUZXY`SbxL%XfdFfZH*VhD+8+G!XRoQM@mqtPnB}Jr)()jI zn>GjZBqyqM?8##-nxf2lox~&&n>32OstYT}#netcK}cGyKvXq$ZFO}A{j5<8IH>EU zY<#u$$*n|LAGS6&>+Q+<=HpZSlm?ScYMMw-kwA$~duD&P-EL(`yK7?8?!^IZt7%6` zzO_D-TBY5dwH6rm?`s=Z96s3h?B{_}9lvt+#B1MsNg-2}j~b3+=;ht46k?*2R(GFC zvc%|auhWTinx;Z(7aU4PX*jplFyDIg1PQZNC$df;zxwj&Z=G5=bLwj9yp z+P&ppUpRZ9pFLeE1k3?AsI1+2=T|>3T)4J$cM^h3JJXew?FaY4`&O3qduH+QLMLrG z&%pfX;pXFk_mNbJF{WX&Nn{~#q(St%X`+a1U<#fi9SH$Ms6^WN!H?fqpA=W_uShm6 z&}pWk{ZS3ldv}o2Y+RnZ@S~QH^>lpw#?pI)Jt+t(}JxgIg`zteBl_j$H&jUSfP)Fvx}zpa+ni_Z$j6t5VC0zx>s&0f3oFY3DsFl_Uw> zfzpbUA|gX2%s?Hvt4u1c8tMkr0^(5WV--TJL-u$2whP7K+(Soa<$MWJJIt1Ls^Z zn+pJfM3FA?Y3a(zXbMQySqRPplqBH92-~`@hljPG^B$Zt%#E~(S+~(`Fw3QnKb}wX zr=NX^DHebugT)r!9*r(sxZoii9PV0codZ_UzpwmPh0uuO=JH@t)~>3cD#6xqv(s#~ zG_$vsft99izpZ1w_|dyrcW{3(UrE|XcMO z`uKdhR~CDFdr}q=K@z8Nl8`Zgl$u1mb#)=&_k7Wvl*Q|>uC%j zFJflqaC_^rXZh|kFC3jS5ovYWsFec=2qF@E&`OUF_B(#GdCyY6*PR!Imk@jiApI=+ z>5qT(^7p=tgt@KBD5Er1HucyR)kNJ$TAVb<7)6A^2SQw4S7QRMvmFdstSs z*J@EfaRC7XVojulBVh(W3) zXUtre#oDozl}>AK_prCJwz0AK>}dY}){P(%Ku|G(a1cg9lz;>PD$nh-g1(NnZ|x3m zk3kRvEuOV&+n29xUj>vXj*#fosZ+ta(g zSVcNi&XNEEsv6W_d3AYZ-Gvg6Q3wJ;KtR!k6)O=4ikt)i2qJYc~m;F85pMlba9z@Wtn2%`Qmotn;p}%HX{sQhTYl;rYMbx^l@+W|9|BRb;&mAr3xE8xg5qBW@||M3F6XB~sboM5z$Wcxd-; zR7Ls4m77nTIi01(qH-LuG|0u8408bR>fL|5Wh)8QC=a}Ua3GVJpU!48(F{Iv5H)LW zl~GAz%F)E9A!#+TxFNk|9M5mO^JcHu{oC7bU!M$#*+kS(RVOItu&ydA77G_iy$FO(0*1C`7*aMp65y@ z9-@ABWjy{=fzi1#y(-rLC-&U=b{_AfXef zoi|CkdG67yx1>QQDsJjTdkoWJI=l^r@&tA``s}ve8-+Dv?f9leb+f>$H{f7o0>!Mn z_q9rNa`}r(!{HE#y{k|uhonssH<~LaPk!TD-?@NlM`USrJ{p4RHGh6%0I??ga z=DU^;a$iQb>XAcfT>N}wop%_=D9TkBoa$xS?%rNE>!;~zK0d6h)26k&yr$7NdfD>v zwYby4Bz^qZ=aRvR;pAYneYFbV_Gm0ZtQZVVYwj4<-~t&P9n&o}9}ShSRR}DkqF(#V zxreQDB4CSo?ZWas8!M|T+n2U(?A*9{?bp`PRB2!6Vk5z?!JG zRVKiGo|Ajw>6iMw{vUq-3PaG$ij~4NiX#;X_@F3wA7JC$<4I~BdE$xp{(k;XufNvn zwSMxmpWl1`ne%U-A0O;H?^q=iBx0$oAEBN)s`5HQVfMbZ)4jtOm>81S^m=`zjYc#` z93WB>_6`nf@9$k*J9FlN*{u5W8-E-u^)@%p^_P~a@#yeiUl}a`)#2D^$s_u7XEdCJ z(cVs8r@dbK(fb!!xYKIKiOvSu@wF2wR)(0A(jGOVv~5cgT!3&H!b2Y^Zx)*&cf%9&UyF%0000}P)wwmsy#u%RFbW;`9<7R{5TKM)UL-*3(ttxDXtgea zFz`La&Mg-+V@=X%gi*v+Fr#%2t;7FYJ4DQ~Wgy5`${0XoM(K!A3d)N@NecZiP&KyV z_HGzOQ6udyR%4;uXf#;NCWF)SN}J7t-R*pX;jTJ=3xIj_OuSufPXsdNS2=49d?riVQXQSso|M_nCO|SwXFD?d?Y~?u- zCvntiMztvN18b|YSYKY8I%|r4KdZ_{(y?q1*656sk_2GrhZ1FFE6tVX;pF!E_|5BD zqy6&5^ON_-itymO$1GG~tMO>Q9*-r7WTzj*^+r<5meZTTpwno4x4ly>=QobMFh=DI zCSQ2gI`UNzcmX&tR_DdqnCk4~r&rHkNWXUT?XoHs)>ICI{llaE-Gh0yv~j}1dEL}0 z$!xRb+_F_tmGjl~>Z?(tb@Axv_e2uFH^xv>OFZe5P+A*eIiJi&qp2=TUQTDzQRGWq zRbP&e@9sv=o;_=}TD35g5gC%8l+MZ;dd920{_Wj|b+NM@|L_lgJDChd--5l}2bDua zBt;&f)Riu?(iwMla(OivN~zYXby2Lspiz(OzC)|axBv58Ao`Ja_~b#Yovw>qY4$;; zVLI`X*=?R|9>qIH<>W4RuGE!da?VjzRRrSd@^Utrv9-%Z=0`OP7%Ig}21!x9j-;Hn zY_t&Q6egYa!FTsBPtN?p^jlp>;xuSZHmAS)yT7NjzLho;=ce;HNuP*_1>a4^pT2wn zq{-+O!FD(Lxdn$p1-{AVVHn5#{&F%ErZlXz#Br82*6XS=u}}*5=IUFc)&B7hf8fZ| zs6CYBY&scEgD|Pr>tvl7PsTSl!?@9@Di`^rC>Ss1s3=KN71vt*?&kjf*RraqMxpb; z@FpA27Ss81xopLq**s%*t!7-63n-avf(jvoH*Gr4fi37mM}w z=5DrHl5iXS&9oU$r_&#ve!rT}&c0ou;niq5{Cd$!(xbk!`ao<=U%rRcY895RW0bq4M8-SOh{Livo6oq%9BJWB=`F5 z$!x5`C~dWwwW+iz%IClST4~p6_xiiLon|Zqo}Zi|NIwoG{{o!9~7W*F(Qpx)gV}WY=bwK*KK&eqAxW7Vw>B26X*ZI}+9F?qgVFUh)xuEKViMx`!^byo zUw{AX$HNB??#A<2kk{?{iU(g;Wm)*1H@LdeMVS@z{lkOTFJJ!U`3pY^d);0bhR!)- zOjT)MTWZZ9rBa()TV<&!^aE>blBSQJKKbRpetGluO;oS9eH zd~b7S>&eq6uV1}=`Qo>K`KN!D;JzGxXxxUg@$I-%E@&jtzMtZMA*n2YyJVsvH6fAYbj@+b_%NzDWLADs;b zP+sOn9cd~=TD7JGj7Mm8mo;1NkT}Y4G|#-WFdr% z8_B~*569OxIonSkk0s+zTbmDm_vHT3z2UvHzrK3I5*wXXUKQ8FfvyTE!Bc)^G@u=i zMkk+6o;-VGz#<4{prRV`H&=sJvmPc9d48waqN=KV-xoq4A~P37K}0B# z3D>%8_Sz4B|71R%E*47*)@!#q+gpdtlo%C=qAYTqw|l+w^NZu-k7b!Ln<86Xou0Ke zI?YZ;wvar}i=v2>LUxGYd0rTX>}+YhY_(+P>TxZQzE=+|S}*YZFfa^6$~x8zK`q`( zQWZpgRJ%I6xV;{0Z5H#nWs}5-HHMhEEKBE{rw9OuNJ_~Fz90=|k!7CGNv-LKgmT(h z&P!REHRh5C5fmwul7e9W{$ab@y*$5O%`;Q2R@v%i@GVJ_{{_SOW!61Sh6exu002ov JPDHLkV1oYW$oc>P literal 0 HcmV?d00001 diff --git a/datasets/example_images/house_cat_s_000064.png b/datasets/example_images/house_cat_s_000064.png new file mode 100644 index 0000000000000000000000000000000000000000..cae1fef87b8139cb12bcb7ec8b0f02c719509a8d GIT binary patch literal 2185 zcmV;42zK|0P)4@!k%wzLr@QaQOI|J#&B&1w$9586W?(r8F!M4`0|dy+{LKRdNPNlO$( z@^ZUtcb{{rY96S1t%m}B_2Kv7AO85?H$1txH2A^-<~l#;{{&8)6#7BMvj z*W9_bgE|sacei(MUcX6JuI3MC=NI$E`WG)=9L;tdFeOl<5JJw`%pwsX^zD|ICgWks zzFx061|mWtYUh$@G8&EmAcP>orYg*gP8_{DGRZ9-?v@w=5;;j zx&**g6%A2B2q`51aC9O=&9^r<-+cSJZ4-er!o`QH?Y0&2gK1NT{cWqtyjm~TcUK#n z?;M?Ah|DombxJwsB%v}aK?1W7p}0FD0zg2A(r2{Ox9_iSZUu2PXaZy3ZI|oS?sR-G z+Y3U&5f65D-n}~;5W7Q8T@3&Lv8rOMQc9}YG!1b8000*e0RSYbIcukHFD_r7{>12` zCVukCVb``z6Pu>)`>tItcSggf&%bb{(yl6kcH1qpBe<}Lh!zC^cNYnWgoq9xL=v3OeevZNSJ!iKA2k(u*_J|X?#?Wx*7rR# zBchqHFcT2~n3<~fskGUa>+NJvpBzkw6zbL}o!q^YLWn@r=d#{xC&Q+R#AJj<=m_3*ZM)qt!p?Y9 z*LB?tX1lvl==$)X_)E*B#%W?kD= z98%{|L_K}D{ctzG9UaW#U@)KGx|f6938M>I6pWGG)!h(1m*Vc|PG*LPszp_A=GRdu zmwdn6WOY^O+FsS7;Al`w2=kl!^S9@-{ez^Yj>E~$dc6VnDh33wkE8C61m^DU;%-7* ziuRT_ZM(BGLie_72?GG8QrgsM*={yrG=B1{7r#DwdR&uy_0`vX`}sHDzH+_`Ari3x zkvWl;lHDf&)zR^@|M<`Usz$qMx%vF_ zFOzmRi&Z?mrn**hbpRj$1fWM)0RRBV-HC{Z0Bkyx!%1Z82o(Wy)@%kLJ~?^8^Y&CMK%5C8xK!4WZ3 zk(5VOsNA0&?1d1oZdPp(?{n@`TVU_$GsSpuaan<7!T0l<{zlh*n#}e$+bkv)C|kf% z9Nb-r&<&YMTnmUe7&1?H4~EurMdQ7x`rG?HW0&g?n#pJ|nYLZJ|L2=gEwkx#v0BdO z^PR)P&AMX_WR3_J0^|+}h@q4W5SUpW#RdTC{M&zd`RTzpTi>OAmFdT;S4~}2b=3?8 zJJYGVYbouvT@ZC$cW`*Pzdyao*O6dTw73JH14sxV5EB*koU-VnYE%_CO!xO+d@2=f z*X!9&SHt0t#2P}(B{|s9(c#|Sba8jHSUgmN=IQbAAo|@!E~x_$b9e9%g5_kUj0g^9 zCPegzlq%dUH!a{mV&Rw_>nc`N1Pao6<`TgbM z{mnw15E0!0u$1EN4&d%g1PFvkF;+wph&5$&C@F7No2qWqY`tEo`F6ee;q~j=>#M%q zAh?#|=8MJRe_wq!UvwDi5JMFMfajc5Rak;Zj1d4N7Z4$4cEMil$J6)wvq|aOpB*3k zINR%z?G8r2dVaD$tS`^L)$R4c@rw|LZQFnU-9MKPo2-an?tnxLCIpUzNX$`P6X*O!`t2|$cuJ7mb+w1EaFeD^( z07oxMXu_NU0MOJ8JeTtE$&VQ_bI$$c<($BuJej@x`gbqC{Z|}P*?A1ht>M|&!3(=ogEz= z?C&4U4xf??{`U9pZWj;0Y>MWj>PTQ_?m&nDkaGrroKsPEM2aFvB#)%J10o_@Q3JlY zU!~N4I6n^zz#>wo?&{%TSqZ72bdH3)VdfDr%yiI9k#bJkh_AR!150R;mvBclBD^hF@m!C+LE zbsPl9_5AuOj)P)2+P9{)<^9=6NFo73A_E8pW3 zGmeL%_o)9M%0(dLS@|t{6Oux9K(ck9ts!J2Kmr6N0D%aem>GZ(fe4U+8HflG0FV(3 zdt|4;OBn)5hh!sCf?ckHJHx3Y)P~tvA~Mzok^*+f0ZD~Gh#mzqd+;6+5I_(z10w?w zA~Jv=LPm*R#!>`C2;h;W5YyhM7pL0!d9_oxLrMGAB2W^Bac@LIu2u^~FQp*BAEY1y zknjMG*&{OoA__zTA|#n8iUc8eKtK@0QLI9#AcJ5q@Y}Xp?~V4}hya~Owr$rXl9dbu zfh0m;0AM6$U|{drdyn`-9wua&C`^<@_5kQSvttzKyi1batM`j9&rZLeUs!<7389+N zurI?*ZgY-KH)Z zkDk#X2@<`2-?ahjdN`R#L4g#4P!Kv>H)Y92O8~*%F&hS8mIN~Hr-F+tFA~`{x19(8 z1MeN)+^)}-H(CiP!$?YPj5pnSv5Z5Nh2h;-PJ%!Y5io1(wS%yzOm*w_x1ehQ7=R>Z zbXFFxKf*RR7)5X0C$rRo;+y5}>9gx75E99FMuFaY1c;+33_~GENWnmZ;eikm0E{zr z+n7Y!JZkq@Q&+wzLtRS-0ZR6Z8ALejyGdU5iy%zXKYh7D*NpQ1u&n0I7EoC008t2$ z#Bsmq>9%?C&H27{X`04SEEHMq!X$~aJj#oxZFRMQa=~3go@JKAEr8+DddJu~SAo-L zF$?l>zhC;DVe}}Sb6V^9Vs5nQ7lUeFU0mLzY1(xy5|Y9sj$S!AnvBOzxBE2hMk801 zZnKseg%er>x11QN1zHdh`%pRP$FWpF(-;IIA^`Btby~+sJe^I;stUt!FdQ~*O@vjo z|N5KrG|p$UgIKrPd6PFfAJMoNjk35|t$6PvKxs624_>p`6GD_0%;>%M-XmgB^j~}J z;dC}D`u(C`7-OunhydV6qmh)s??3v(^KZTh${L#mu z#5W$?@8zjAMgr^JXz-Kw-ftE+pMAR?1@*TtHx=;kXxNMM^|P~m-FR=4Bsn~sB}t;S z?z(Qf-3lS*^Z9zc?)7p824H572%F8um@bW@UY;YO^a$BxyjjV*b`rDG#rfH~izbr? zKY24B9@r%N?T7!r*>zEzAd1~?x34PaIf|n7dc9mOl~Q@0#ZlrM@3!S?b!&`KGKk{@ z0May->-DZ$Ez7FH&K!2UUXeM zpI;L}ua^}?p;TCw`}urX?n;tmtXpliO37W_5DLnoWHz20AD>KSvowsyqme^)0#y_R zQoi%x!9TwJcDFAVi@C9`DEdb0`Ft*f7z_rp*({1et@UcTTCdkl+XCRx(b3WIky478 z<-x%?%kwY@5fPlbdiG~)O_s%t*1arSEv`QQ@S}R)6mc|P&ON(9ueaHjLI@&C(^M)Y zgzVbdTH`(EdEOr+LQ3yE5m{@cP>P)orHa#}+H9-qrDu2N;2|M_cIT&GW4#Z&C-0oI z^ZCMd+JgsRM83UUTceLpPU19;l3Iuj5hO{R#BmU)e!p*wmRVlZ`~7mYjBn-o@=9pE z*Uhfp52n-2Vm+809^F0u{$~07lc$-Nb=^2di_lmX00szgoL5ydUoTfT3*9tC?8mwH z9tjChp^!-&OC_puJ6|s6ie_4BXy`*J#);O)y7 z)#deHzWD0w^t9Yp`N5=jbf`v!O5(|2Jm?juIMUVT@@X;Aqr=!b#~6T8fXFD=GobLE zQ;|IX^zw@@*6;n|mtn8>$tNHG?t>3Leey)kFV3I**Kc-b=iVAXHnn~6-Gw-rP9Hv4 zFRSrzBojGwetWqMGwEFH4U%9M9*_{39C)yV&RRsFvTQ)?ibDS4U*7+R_uu>M=~uGb z@53~jp4_Rf=f3N7RfeG|qGUQ4FLuq@<&ACHv2(F4Puzf=WoGmQ)-y5^k_BLP!ndU5 zLH1yHG&me1A|v&B#XtS*eR=%)8+Y!$y88a&vk(5yRn5U<^w#SSgYkITc3~PewiA_( zEg$v!gD64m&;z3dKn6epWJ1phh4d<{p|#9bKqP+{GSfBk#pN{$q%5Ak_tQx+%xB|W zQ@6(S^Q2oWvY38+_eaIP*b<3R%=A|Yr`j@WzCIir2q>T+F^J1AzU0;RQ80h-Qs zO)Xc;+aOd)5)~&$6vumTOi120O_PSfuz$=dT6N8~^YMiqcTrPsoF;39vyvf-lc7iT z#p$0{&HnHI`KR&yX{pz?HUfbWoOhDhu}5pV2CQ{DNzx=v43HGE)@{|bo4O91H=?+? z)>o%5&o3@$FuC{Uqi8hHN=BnWmZuNzOkX4HjRyOs@}7y&GI-#oYySpCxKfc=t1fl` O0000Yql_4*p`&g@Ljba!3O$HR;_4x%1fRb6$ybH4MPQXT%;&wm4enTcreT?}Ss zW=&}^BO(Aqq^ux-Kp=rIn3hH$QX&NdWn3)yIL`2KeAOe&K z&z2x!0x>gia%$mhu}hXkRh3zmfuDF-xRvE(vBwKM0)VD%taZ*hJmU_K0)R3rF+)K_ zcEM|L1^_Xi0RG>>v;d?OgSD0z&!`C&Qz)SP48-Du&Vt&K>2rYpGXN&U7^~?qpvDY{ zKv*IG2>_82l!b}dDk(?_5D*iwFoB=P=Wn`EQXvvfh)oJkQ!Y!<2m}TIFo8vQAu=z+ zU!;eZNjeEvLHu=bEz{3rjOq7!Hp@~BZ0$k<0*FYSlM#Sr5bIJWW`4d>fkk$o^Y<*X zF~z#63u7~votQjtdlHQ$U1onN@;9=L=WoE*1xPfjW`|D?^G<24UHm^WKUppb36T&n zSQROyloA1|NUHeeSSXQFQpIF4+27xfG3p6PEPbB@w*at-nFtvo!~_vACxDTVg4i5b z>I-Hd8dAd$qO#kqjwimY1&Nu!tXNhoFDi6#9|7wTE!s|;?caO)RU5m#a_jWA<3zrJ z7$QqZBus2bL*{f=&F|fR6hg}L{4XE=d3Wc`&dv+PaMfmACVqC`2~YxVUKfL?z&$ODQ`8VLJA~(62_-WNnrIDwj zXS_pWn^0Ebrv9o~4xxy__7=TDNz&e=vRWm17uI){LZjn zgm(V+Ti+{-Y;^Q!eYJS|2X7va9^Jh0@uwesw0Gl^y-#jDynQ=XV;h_C!vi~-RBOXB zYN+bk0QK~!V5gzJcT}0IySBCgM&0=6{o&(BKD3?8G18@%zV*|e{CHZ``}<#BzyAK- z?fq`vdF}PrZ{7Nj*zDZSg>z?jtl`ki?1STmGUuR&N5?N+*j>r|O6f+^e00=wolDV= zM$OZ4P3`QRx30Er!%P>yb>V0iel-D~FFh+R2->eRgUZFFUS?~DD7 zwc+aOnzaaz4))iE#c)_OZRJ_kH!qB5`!U3#=uIY*rfDaK$J^_}^Se7+TU#-B!+dyj z*eg42NJPYJ=d;=;%=hnV3P*>B1igEF*v-t<*I(h?*S^2CxmFe~X#DEY{TwN`T_1Hi zomgwzwk4t%Lcg26`|gjncVBq;@ZgOrmk;jVIX*h7yjSE#%4^sDaR1?hc@yipHIkKO zv9UHNyYBRN=VK)C5V_1(RlR$*kBEFcogEJPv8n5CHra7q4WvmQDuX00000 LNkvXXu0mjfGve*! literal 0 HcmV?d00001 diff --git a/datasets/example_images/ladder_truck_s_001799.png b/datasets/example_images/ladder_truck_s_001799.png new file mode 100644 index 0000000000000000000000000000000000000000..58a0c840101f2f1acbdd5b0a98c134b634c59f96 GIT binary patch literal 2748 zcmV;t3PbgYP)EImT z;2ik-FPs-jRYw#@FMhsvaQM{Jr|;gMFCLaA$DazvBQ6MX1Gx~ABvDjVRg@%v5d@HO z1~|tMK>!#B9B>37gb)D0IS0Q2LWqY4hP3S9ou$QOcW2MgAQI)CL}xsbQZpS~0G-K> zX1n#^;iKMcFJmm3Olo>aKuFia2tgoVj4|L0f&X9q3AnSP`^@RHa;hT}OGpSlezGYu zIDLF#eRCr-*qiI=2b_){8xDe?ZCMeFFe4=I|as+@C(&?hJU zTL1tslGV3=@>9kcnVpLn2@XK^Sg!6g8h$e?@XA46RTL=}Qe{PzRRCBbmSBuEn@vFw zupn%1ZUV+z4RdgK*oYXKrg6??S@wM&LZ~PTy!jt*P}j{SQ)^e>wbH5H&waKplL^Vt zM#09$ReY9zUF+@Z^IZ4bxwD25nV+A( zaN&X^Ns=V>_V(h=q2u%SKdRnXUU}!8(DYR1*|RtxdvE@{xwBo(#Lv9^QUKB3#zwJN z%x1HeAXdwDO$+(HF9?DtU=axu6BE%?^7hS}x*oP|yHqNQ7Qc1v`jspH zdd76J0+%F|ibQy+w)5VNpl}dOPlhOD;qcJV@YvW1UDvI4yHu({04QUEAjk-3yE6kL z!ynDv`_gZ|nC;0ru5<3(IoI>{_Vx_JfDq!*@v$%3&#O1?z+yqER_*(9?Lx_=_1b{m zlZh?=_)6M{3-J&|;0RGo(e-%BRpIvP+R*68=KjIq`d;62XF7V);}a7hK|;3F*Vk9x z-*sF|kB2#D_)A}SemtT4pKJa-?<7H3+Bg}JWj zai?Df@y>(wB~Z*;|G6WuR>k+@Rr|!mgw^tTb3K~tZXNEs&|}QUoKuep)4%ieLPuOh zVPJ$p5>7u^b7$`Ko<7}OF7DsBHTrwsX?uaSv@o7Kkt$Vx`TpD0zBrc?+}zd_x%&Pr zC2aAd#fK4v#?w!I{)-@;h9sm=8AAaBhB9L(J13{?T2+fA+hLhe(B^)~Yy=}?(UJbf z(o+A$XKV!hL}=of&$x>V$_Lk1_7A7y$JE{&oIGL8Eimp8PE6-eE;Z(UaW(q<3lIoi z(JU;lVF0*|#REyIGT#SYk+299c)OiIMl3TDZyfq9&_!g+P|66`P~b3P+O4Y1e8ubU z>J+l^>W|(Mf)=5oBm*5)i+R^SKH`QI)#dlz`2}W#c@PyT1(M=H&LI#0x0&A+4@VOQ zHTQie0wRl0t(#CH3ZS6v+F$$BuVs65cvuCL_mr#{aGMCaUJwo%RBSC>d!uN`UbIV! zc4Nv2!U6$65Opl-n$Hag%f2rq*mNOC2G~5h1W0=d|8*T+6iIy|X+!tK;y% z3%?=wKGP^r1x^X|0k&Ix54Q%e(vr&?>JIjN`>0Y8Q=MDYO8N21*|CZKsD|pTVC6nl z4coTBUcUO;-zrcHKbWiEyLVJ-8USTFdmG0Rm4gGzYbOjfAhujLRVX$xea=W<41^+T zxF#Z#b8Okx``6y?I5o}7CH3fNP=MI1_#b|_F?*kCI&ry-(e^)H#!z6OxsO+m1c)SF zNu@U9=?=#e-N5EV5|KfeSXfUJ`xZOrii1Ka?7np5y@~ zb*o;egAgF5jm5)a*W$e42(lDW+D^+c(^%oWom4|U;6;@Rm?ylNVvEG%I%Hm3qIOVR zStzS9{P^*S&pl&{(osM|9zr8I5DUNgN3WI-D~2kxRgsLIKv+?Okv=_>>pwN=Q`$W+ zKth@zXd+b@;UeeQ3b2b?A}A8CWl~}Z2GnCn-QL-ZYML!63lXCz_13zM^W#qy_p9#q zLC;V}!nG$_Jl&tgM=5nHWkmNq|Dd?53YBgHI!+K!A8P+Pxv zM4#*yqmj`I&q69K%+O2l^#*O_)4;>*0YT}dpK8wWH>$W3{ zt^i{_UD(>IY(01^)}0EiNL8z8ltQJt=m!4L5%5!0 zXWo-rw(EHq2DP=d*5$XjRd#%L&*+v#e_HLSn{{6bC#$Gju(12__TE2T$72#YkKzjw zl?;$Ht-2;kO2{Q_YVuRs;ci3*W#H8!G8NRkgvPpZJ%+(;mVD*6ssbMzKaFb=tB7#$ z=_#28y+*R&S4J(X@Y;{ux*1_|Q*s6%7&~lLdXg<3lr^Iei}YVSe|>&wd~B>Cd(LX-8-{0?>v}kiu`d<9~>D88{tfM1^|GM4G)R3==;I^ z59aUQoCOGLD*ow@FN?X%&o5v3{?FdPK0^ETKv9trI(OnTC(mC@WOD-GB7v7KUGj*p zs;Vr@8FkU!9V%aKO>AV${6F6 zbH*9t|1*wr&KTpI2b{M!XFmPJ2kcjHpR6n_-M=;KwOx}jlM~OjUjFts;P@aoIsWQj z|IOC+4xt|R87G`^9`OHD&pBtD`81%^W0VK~oBS950t{zPNK}#l0000xTfGme7FHsZ_c}>0|&w0*=L<)+8f{-EwDau2n zL=g!PY-6w!#>U9w8Mpg3-KWp3PF3xlhXE<;1J>GGOKU9*A^`va6ab0<0sz9d5C9K= zAS41Hh$O5=TfuN$>zDBFroz03sq9fl&Yu0TEb0 z5a{G~L1Ew8*?6>gbQBXWW)qY2$;5={Iv1S({N{~2_wJm1;lgWCSwzXAV z-MM>b^k}qycmLlX|MSj&ZvEtEZ=bnvkwai+W)?v}009jE^M|7k-h1!EKYcix&GIzO zH0Sz?Rx)`sT1{v3@rcB6gv-g%nX{J=SZP>P%fowj?jIes*45?m*7Z+DM-P7b);omg zy%%N{L=h3u0P^`KpZ@jFf0<7v2Ibt-PxLh`j_w0`+pM@)l;gvqm~U@y7)?!a+|-T% zpleU9_ix@>JS>{bz+yJ>-u?b}zy0x#uBHhI05hX7Gizb_@2%U*`LP7n8h18_#q5#V z+ldS=7V~O8rC^uysqdOJjVFiK>s8AHX}_0e@%HxCH^&F;tp=Z_>3DVY`HdTwFTW_v z!XU&VqMLetczCyK8)q?(;;Je#v+Iia?ftK>efpV!&Sd>%Y3l~M>d|yEx6Z|hF^Ms0 zVl-uj>N3t9vyaa25TJLpTGaO{2!-J3h{*Q9i zAT*9v(?zq^>zDIM)mX=zr6x&@iF79z5mW-5m(Y45v3~L3@aV$x&q@$fA|m>$o432x z2a(n{40#k`+g6j&QQ6j|bsA}26%VJ2!|^mR6hwDUO5gUGH;g7CL?j|+7Uz(dLKuxk z&pov(cwzzTd;52n%hi0b2tt4k!J)Y6WYRHD%Sth=nzrh^;}A=XO4l8;w}Dieveqmj zS}Q^H-jfogs4Pq8z0z8kMMOWdcN$c@cm4&X&FJ121feVn@5A_bIV~*)3W2?VD6#_V zToZ!r*d4F3L=Sr@B8mVKim>-VDdrGFB!qw@%&dR@i(dvo1gML}Z+`tN;ks+OdtV#-^jnxy7pKWvKu#2VE0veN8rA9?a5jf=51s8ypRa3X_+teZgfcM@xe*E#r0f0{& zVAc%;08$(l%j2?G*(~l_A3T}3??tmBAdCppIC37XtwW53MWl$qRdvI{2!MzJ5XW&8 z>A~7SSOf%41Ptreu>hBgc~_KGf3WOJFwvYT>u1NP-$d#a@Tep2Pv(K zmC^zULE6i9&z&3eqEsvIJ&+;}D6Eawo*6V2^9ecMT7Q2!bHLmEc!)vAdRn%}9vpy( zYdxa1gG5EZMa#8yI#DDXq4cx(op;_IAKY!KrDp&{2@Jyeq_|OJF`uSs3>@lWKFp)F zM8)Z#Wq(|^S)LWGTP!L~94qhvNSQd6&O?xZidZ2S%|zkbF#uskWMR=F48k0oZ|kki zVF(gwy%rC9#%QFYS;-t)?;g#Ubz>71l40%vNvTAUNGB+$iHzRddCcUwKdEa00Y+pN zCICbbb{$FBSReGVXqd&DgDgu-RV_PbgM^~28y7kxA!R*R)-~2^t+F)Inn;m}qt~xo zxp?W44*>+^1OUQ9hycPII$)P4dS`nG?1S@VQ_qTGQPy?axxfOzA`IXHI8m(&uCrOB zV@(izp5^DxU5KLOyU7#)0RRypW=4?ESp}S@dUK<12-i0@ZNTxWaTKYnXM;zA$RIO! zfxS@YxwBnFm?#wyB}!MTvRqZn%)%`E-4ovlbZy;ME2Ds&^_>OOhqGeVgjv}xs+NIR z02IZ7`Y4qJHneXuc}Tev&4+Q78tu1?@>$?-3*biil@&pcQg#V32S8;MF%?xOCy` zFFz7c%p4Gi6p+&Q_U{dV70oRWCs#G8%~5)?3$s2qF>! zGZUajon`qG&zyPf$`vxI^Nzh|0YoJN%vL8#74yZsnA~qBFQ0!*2bafbM6k87E`l6D zDeW9PX8}Pe)QAB|A!sFpki~i48$N&jd#BHzKklZ34cF;qoaR+iCwVXlXmIYqH}|qw z#Zi{8ZQ8{{4z8cYmSJ8sb=@GT&UVfPrH#=_f)J8Ih$8@zB+b{icDHt)1QRL7jZ@0T zt~a3LqFdh_po%m~SRbwlFd0~OlxM@q;n85w8y(MzW#IyAtwe+f1y~UpKrf&W2!M$C z!_BSTr}oZXR3^47yO?$D0;;lYo$737;#M<+o!y=Ja`~_8*N(;yUVrg9)JYuKwywI? zGHEAX1era8L~)EBfPuXuRJ~qruzl*{0qA}s{W(lU3- zT-!gWtJPaqUj-(kl_K;kkHEy{GcyqsfxAw%$c3#%L6AAjljM384+s%hyG)Z8V5=|fmP?}7PX#NKUXf@QLv|mC10000b{saGozrnx)`>pliqwt6!IY=&N&ekhkoo3($4U*Ec`}CRG&)!e$MIiAYsV5CR7TCM7kh zstQ#QEJFb>bOoJ=2&)pnYi<-EkTQWt*ie+FP1AS?5gS!)JSZ3qaO&eonOR6*A-4=t zQd$9iRWbdYmbDVJ&W-Ly+eXpN%?&`RVmyw%KkT)Pn@yLMAHMx#7smjTLRG=U%!a|Q zC}AQZVzNqB$q*YuY>0^oDjq8BEQfQ(#A>*eq>+u2jn>K2CuKQq@83<61QCNsX`RIq z>52=GtON!DOaw4Ms;bHa!%D2(PX6YdZd0cxSL2r#S4C0Gv>I%0?)G~9_ue~i4}x#N z1XfzptXAQblmMl*4g`i822>pbmbsPZtt@{2^|QC{zkhIV3&3JATeg#?o>kJ&bQDiY zu3VEjsDY;-4ywciC0j1bRYM6ND&Bh{Vw13HYNV6JNuCuMClP-1)*kzC^5Qg#Y>^i4 zJ{nw%M)7=6US8$x)_guM?JXREVu2$-6(IyW8c(t6TY>^U1XX2fNJ7gtvOrzNu~(qn)>kE<~B|{Ke51U;QsFgTiiab$P#^7VQU--RhUTS8LL~i+eHvwmUW(|t+1b9hxhZFJ z=X@N;gTbJkw*%lwDM(#6E;Pg%oKqF?wO2rxRc+HWs}Wd@8w*)7;@qNYitT=qx0myh zNTbNK+xc`dolGXv=`@5uY+TSNj;*yI-|rMfD=+eFGS|PIepQJuM@H0!AS>nt0##NB zQ5Km;j~;#a^AEb6!q-)?(eC%Vd7dtp&G>fu(MKPpY1#x&;$z5QYpWI0ptpgbzNwKC z8xWB)h}qV4MX!Hv5LSsJ9&Qcq-`z_L##a|gn&oj>&c@^GD9Wy{uSFzH)4g72x8HA( zB~VpP+;~=b>Gu1*ZhL!w=W0A@D#spwrwBv@wJceZpT7KIyB&Qu8)7LUiH zgM&v84i1LHVW-oXjV_j#mwxHS<6DZO&Ea5gcR%a(KK|m%^U>8}QKg*3Hnz^Wbr%`K z;?qcf@!khJy^bZ6v)N>F8+?eZ?f3fD*#6ew-o1O4%_o2SFDfiZ)bPd>j$ODeDU(|*oGjg zqN)VZ$n5OjK{LO(9iLrZo}Qj2NwV4NGn*tyj!!P{-+yy&ILwkHPqKKsKiC^=4E9Y@ zJpbXvr=NWm#67%!SCz)o<+JDCe);U1@$H1(`G;Stt~MoE=te8v-Rj-lA3~*RY7zC$ z8Dj^7K~WTgLBHSaY;}9Xt?hgxOFL~!+MhlB^2w7w?{04$Ja|L=^7&s6|Led1=iBdp zs9a#M^**dRNC6SKT+B|6kH3Cy_jU&DqKK@`viRQKu)p1F<$0VXHchfl(e8E9jg3iF z{pqvEhcAylc>mq~;o$7z?C~dm_~g^iUmjgK(EvlL)_b>JUm_}jpk$(|Y3dNVah$hX zZ`|1_ihQuUH5d$v&SswHmdvP}T^yYqo}6W|{ik1jm>d55$*2GM@yB00`+7EST%!R7 zhM{U9K~+>#yoat&P)rxHpR~H&&B1Wk?|1LNbr41IxNc64FUB{c$>_FI6mLI#xHsJX z_M2z_@$dik^y#0+^Q8w{8>zw)2UQ7TeNzJnA*c^4D54sKd>|jF(;ITqnY-xO_eYb- z^xH3=&PJod7eAbzpQ~z7Y>a2Kx~iKX zwS)6mxfxw)AR-|Ih!AMC-6*)*>*aJl{ra!p{p=_2>~{Y-rMkSnI=s2Ly12SHJ-@xa zHYQpumStJa=kxh;Svg-d-UVqq1)-JmA|mSD`-!TG2t-#J0Pta1&8J{ai}r43W4GTa zI(e2wY9bIvY&JJKA~c@N%5pZJ&*#gk^5PXrrg0L&T1@~pgz(w~5s?B=1sOz4Xkt?> zYPy_IC7LYLDDiP*T4^>b7fs`5vsqPD&N(kqJLiH1r4XV z7-NF>6&#k87#q8gR%WA}$+*Y_W}ipB8ZRzM5;=}Rz+3@MgW1T zcoCwQ~`QL5kVm9 z@?9fMO)4Vdy%%Nznx-L8W+MPqRh4DAtd><(OCVjjL}(BR;)s}D37|^<2QmvTdU*_4 Q;Q#;t07*qoM6N<$g40AM%m4rY literal 0 HcmV?d00001 diff --git a/datasets/example_images/lipizzan_s_001223.png b/datasets/example_images/lipizzan_s_001223.png new file mode 100644 index 0000000000000000000000000000000000000000..09a79f7b11524a63c953e1377f554e795cb69300 GIT binary patch literal 2360 zcmV-83CH${P)w9<}@;6_9g%N}#F^0OTuU?&hy1(HDGU zx~6G_5T~{uT%U-%zPh3zetg)fvpJPgYtCXk9$RA!=e#V-qN*@NEu>P4P^uGPQXHA> z#+RzjGBH2Aw9dJdsMh%~48t%erRMY5z8z0P7w46uw9<(W4uqSuOZLHoYzyN))EbuPF`6a_)|`Q`bN^RC}# z`6WKa>_or^0 zxKwv{A1U?p{`Om~ z^Z9%>P;RYLN@=b8aVm=0>p%U=``!MGXpJEkBCGQ((pB_74%a^n*j-mL9eRv z{PM;-6P&Z-G_UJCqr2V1tSR|V|NQ4M#uzaKmt`J+gb*C0EYlZPHzs8&EAM~!T{}(| zBKNR3j_HWu1tnR2c-cN|A2P|SJWC0ix;dLK&M$LLDFeS!Tq(tm`z<0;N&$!vQtQk) zS2xv9e)jXLo7dmHf4=?f?VtSU^Z)+lUE99EU~~X7w&BjCZByr*USBW@#thD5I}Kb( zoofJa5h9bv(Q<3|N+}_Qw=fQ78aqTJgi=}k!$1D>cJp$3d(%|e`uyV4=6x0r5v^-k zi+1n-Gewokg7>TQ>)Gs;5OcN9LC6K=tFCUo-4tEhZtgth=EW-hOT}0 z?q83GJq0w==&X6(?J_NsccV}L`TfTi8)aUPee^EBee-9RSC^L;7iJospC0XK*m?5K zn!Xn!$};0kr76K_oTgzKUOMMvp7op;MWGkVX1+MPSgje?MN<(BWicy%^Y7{Dgdx4Y z{`~8&|N7T|^{b{Sl@dN!JDH&!yVH&m|IKgy?fBh+6V8iVVGPc=lu{;jcicyNoG@awPR;!6Z~yM^fAQ5Xl@=K7>3EpT#E6v40cb(~nsVpi4n2htzA!1$@l)!X4Ntws!%eu(5 z229R-7c3xxi+#Iqi#%auH1AIRG)#nMr|v*W0B5{m0Q7V^@emLaIh{_{`)Qi`zL!D} zN~@~4zP=_*m1R-awd8afrx3hQoO2d~_rWCaO37&&9=BWPY%^bZ@B96y{qtsZeicGE z98cUBlTuo(R>oP`D*&kL+Ieq{wVYQ?k!4vX#WW7>sV9_ucKfQRblnsx;{<}A-2LX$ z!`%;yvolW9X0z!J+i`3$&Lg^_C^+Z*;^N}VFTXr>?T5QNrBt5h0kJ(EVvJ>35dtw- zGmgiGQp-S_{y-P|wcKixfV9=F>?MGc9;^@0Imj7upAAyUfK zYJF^5r4$#Us_W@&5vFl@woI5Fw;M{i6zchTcYJ!hS+9(b?d~~t?Zc;!Pmi0s4?nzp zdrOt)ERFrinchrdv8cPjajg^(*gZX!O}$<%z4KB@m1%aq`tbeto5#)QBWElt8=aB+ zkMFl1-zz1W*<$zU!xIJ|>GERP_xo||>%3NqQ%|cRAN#(U)oC04;y~BUQ~kr6JlTlTt2U zUUp-9EX#^lRX$FWQoJm6UKGwb?>!-jk|67xF$zF+mX(XeFbwOn`tqy4D9aoXgAbG? zL45F7DxtML1n!KpF$tE+vcQxe1x`|soB#l4KvSS8aDpU7hS7VU2%%g7FrDfB^G49X z38rWuK|=4Wjg(TtLIfn4g^~h;=o2I*cuFay#K-QCfGDMcu~T~h0LEAh9-{*QN(q-q z${aE7caP`>Ca5=InkLF9BgA_jV`7vOZ$wsdnu513Okh2xgc!Lo119MEqhNwlno@EA z*4hL~YmJzK^LbHu=ZEgd(M?100T59s0h~m@2n3N-Sq3b|Fp@CZH~_&IF=FJDP>c}+ zQceH}5J(XdVwNdND5eC2JipwBsO%JDB$5)&QlbnJQeco0V~lYTqwBk#q6LzC0>v4j zgahZ!`WT}SBKTmf2_dAE7$qcOmNS)USyoTGdvB=}VlYgIm{P!q<7joJI2S%5LX5$I zpB8mF4r4nF!A%J!!S5&WAesKQV0$JUDsJ_3#~ZkAp{{rmT4xW3z0E7pPwDN zDZ1{kGr3rAA9v36&1@!hE~o@51ec5D`SEn{&WuCbw%s@mtK~c(D6V)}lp(~F5*3-$ z8P}B$-o}(AU_wzYwvU@rdy+(gPsZb2XXebDbNW)n!#GHW!1vHk-St)1SKakt9XtRK5dabZ!E313>)CXsDu{##00;np zh=2f)W!YeBK=dDnUV|~l0005Pe?=Yt!iKH2Aw>~k=KsZE5MIUpPP8g7XVc2J0l`^UtyauT zgsPh7d9T+2jalYYb(7t95F}9mMWoFt0HFFxO2T#9Ue0Ehv)R>hSr!GeEsAoZYTJ53 z$w|iJ?OV4_dcE#<-++x#1wj$ z_$yHDJJ;vhMY;nDH2BmsO?E>$tV|J`qy*&3T9-G1}U zXXoc<55EMVR&|wR9>jHrs$i_MXjl`oDv-;_^-NN6r8z#@&-&d@KL6}+cjxH%SV1>k z004?$9{=+nNJNB^q`J6RESt&s1&~R3=ADgMkMq(J5nBc>ohfn~MQa~K$uM$>HlB6n0T}t2_)6m)7T`JBx#DFZK7A#K{jj{i8VplE&Kh>x^9=v z6o`;ORIGD8hRKVIj^&U7GF4Sot*Xi8<;M>m7)he436i9=Sj-!rd}xG>Pnehtq_QaT z!4Qx+L9fUpwuXUGMFXkH$rfy1oL@Zt;?s=bFwti&mVsA@X6CBPoF(8NmUg@ zL=|qP*AdB~(G)?6Q2S**zI~hx`d3#s&9auz#89)#2HkwVtc*aH^5x_zrRH*S2^v58 z^G7Eq$MePF$=TWL=BBPI1L8pyL{VkktP*F=Fr@?-0r<|r=II9A92gIK zh5%BYR(U;S(WhrRkQ?(39>y?j6*7=6BG8^rU_eQWB zRpv%0irS!{umVDamdVH1LDBT%P{G3U;r5-*V6+aAn3JUO?s#_j^76?yD(!4t z&Cf14>la&l4zj)e{^8D%$+D+kpP8?#X0=*bYn`*Rn7TQw46LfvI{BU*6y0Hecn~ou zx!9(*ZIw;qJu%M~^V$cS54(jOjMC-RO%l)g0>ICwFGZf(EPMHUVkUJP1<*&1(k4~k zh9v8@Zld>Y?X9G;%uMT8{c+ZGU9=4|E^pjs@4gW=CU_^{g< zTwY!-mx~lzkqC%L2mmUIjF?fk9PAHw{PKEtJMWi8)5NAti>0sDq39Ooz58#sA$Z?L z3Cxz*g_Nq*s#}zKmVfp5QB|!8H-Cw*s&x`&=9H3%l%0~DnaISC7Vo`t_w9SPc1C60 zu3o&H0NBZ7X7j@Q{3k#5!3(Mq5NCBQ)9G|w*CB+uZoa;_T({mZBT?|7TGgcJoDCt0 zO73z7bk3~T)v8@bR^ z=GFCNx~^OO7NCkMglZW<5)$Tlc6@xCWm$}&u2-wo+8AEUr=-EzqxEVbl0;;=ToRGB z7C?=$rfI_DdN!R^O;lASB19x2M5q??wy9NgIiCT5-Psuqdu&Wo*CJ6QNQ};6r|6uV z93AYBV-jYwiLR=u$=oc%u&=-n;h?_s9Ki5uzHG8)Jw_L{yV`dU4Z)poob9kTB`L$EmfpEQ|B=XDKECRh1Cp zVi7h4bk1E}O^il!IC}So?`#jdzFFJ63jiSmRo&PbV~d_}E08354XUal3V^EMT(+~b zQ!T3)BY=vC01z{PfGQK^ncE%h-MjZaND+Jk1geZi6d{U;swx{}$w!qCHAIOji3klL zFaeW}cJ>Hu>)RNjNJi2ZdljM*?DSSw=EJbht+X3v2k|6(E_VFS>;4Hi{ys*45EC{hI zIU+5E4^b3H9L{iNx~CsiUGKVe@BOkMjO6?SzjMyw9QN@EX{`Ym0Kxwg5okgJvbZUW zgn@DGfB_N!V+@Fh2mpwPfPjz;2LNCOKm!;928bbA!eR!QVQ&CL1^^h>z_>sG#Ehn< zc>2YszGL6I`@Y}l0ucZK17ZXO!2ZesO+dg517pA-Fe2ax!r@ETl^0c3RKRto6E>NSi}Kak zNg7{qs{1_)7+`uHwZi( z)u12Q9#pa*%}AOSSW-8OO|~h@*B7&@XePr!KXMIIU^_-w21$V|tHNl0@qN8~39RVW zvEO9V^HVW@EtqM{5&X6mhiU^|Pj`kid6b zMu5f)`%zNx`DL-G>bmBgppR&!n^I(T3Ba`0!V;&8#jC4@MkHnkhyiaJIUa3|deN(s z??d1FyAR%7E|*FX=Ug?it!rC|BF$D;SN%b6ofMz{>FYupV*&<7a71JRIWmUA5|(Y( zbz9UWa0KEA!~~Ufv0kk|{K0)$Y?jNbb&}LoRhNxvTVkwMuuAeO&(IQ%?-f;@g02u!OFCfkSmI}e|qBeOUd#S8(vnlF=h6M7z6{P_69=3F3}rk0g> zF-tFJ%dYR7&sUynDJgA4)ks2R0RThN42>~w7E!lqYuNplkA50?96K>CLtz5B_3efimgr@(Dy*qalQvP$AC-((18XLmH+>fgO{^p~fv{_yZwuiqUG z`*|UoMqk?=U)vwvJ=)4@8LzWf=U21UMzH_;NJFyhhn*k&@Xmk#<*6it1UBclukVf` zcRKc6ce@jKyVFV3@k7sDOxADToK6RU1BwHz)3R=AkE_;FUf^wyqU#5vqHaW4)`pCf zH6mIZu`Cbwro(>sY+2Y0CWFq-s6Xz8Kl{o1ot~xIR=2vT8YzqIaroZ7Lj-4Ywel>i zTGMDFl_aB(TrM1oo5*3vm<_;ywN+haWm)Fs!B+I%{X?U{aov93Pm)!gML+2+87i9^Rk^U(?P7)DWJQr|4c$9OTcIoJx(YgxWxJdU%M!*IZM68; zU;aAJOOCD=P~eN{V0V9a>~^Qu4sSep_S_3RY8uybH0ipmzyKFq00y3~x#;#H*I|(l zyZ5h)V1o=0xnY_k*xb>^XsyMw*%bq)E*ax%)X-!zKM?(4uu5A^JTIHkXyRDVDngra z&XrQFl)mRFhNFIeXF3^;27}OxTZ;>9TT_)KXUq^|3=x_5;O+z=`0By=58gX;k)>;9 z1$L`Tm$Ugy9Djeo_IP)HS~ox*B1ccS98pQhncx~++ea(t4aec4D$2qVcEg1bSeKF^ zxQ-(z+l2knwW@*L+`hGSc=$tPR@>C8xV)Uj>$EJYW|gMD{p1VO`mLKcZr|7iUAYz; zjwVe~uQus)*s%l$uokld!5&2Ywo&LHT0#>OM>~hZX@2eC)^@J~XsY7M;T_)z!`<+M zcaA>${C~z>$7c1tzj=F^lqa**UMd)!9+~b}m z0%F2*VHEW&&y#ITiZ3tEqu!3=chR!mKDzPUw#&aYdccQXwW4g8U~i*GTmALp0GvZ>#x7vcy_7;ZI7bE>$m62>fz(>hFjxR zQEoJ>T1!ju#lxqE2SdihWISFhvn0#YG?{Mi9PI68@ghk}rP!_OcNX)qZmV9{r*<qfukt(xP8W}4lPPna^I7b+g=L zsFh5!u**ZC9W>L?sN3rZ3-8_9shV0`pE5AADu+k=mWAWUZW;h#JHFj2tCcGBU8D5> Y0_|$SEl$ZrN0Yt^9T2V5xvHgG5zUe2xtqw&SC7CQHD?QX1ZZm(}Psx>e`5FRYWWzKbf zOhi~|-jkXnT6%FDHJU2VTh(CR8@*p|6h1Hp6AO^nN7;fYD-Q}j;{I_ zj?!z*dMm144KBZZzJGk!D{`4iggQIT-P_3|!^62$h%&c#ab9oK4bOF6cykqRZhqyU zCWA$}bdi0!z4Kx++Mis72(O1Vfeb^ksDe{z1hSN`_mBG*3!zk0L6v&t!L_^ngz=I$ zoApP3^-~IHC7vpaTLXm-+xjTWe_*hRy@tJw14F@)gf@Fv04wqOqLv3 zgpdGTV=jByPq~7fcCih?Y|h;v#Na->{XiQWG`r1}Eg)DaP6!1AU=haHT1hC5o9#dU z;g8Nv4#jCtJ%9QB#ug@+3ken*1Gx}0B?@C$#s&x}~vU=SMtJX+sG$zzqE2Tsjv{syRhdIU?<~Un-_D;U|0>Xe3 zTq=bTaw%~b?KqwVSSn4lRz~YW+QBqM{_3rV4=Yul1VIor5yl8CCfHgn1;+>xN;sE` zWTvzL3uTeFH#W9@^n=sUXebJLQ}SX4Wi?WJYT+Kro^h-M#nD_C|v^y53xM zFj7h=t&Bz18U#iwGfxsJ6{Xnf(jr`>SzPaIJbGOF`(pqhRAj*sd zf-thyB8cMH%7vB;kZ+91^F&DD2a%KtK+LAotKl$+s+BMvFQ&CP);yiffmkPSJwshe zEeEBI)dn!ej1X#pVL~t<3z#76Q3nADf>i*mv@Y_c!@RP{xZp)T>z|$U&n{lSxo{kM zz1w`}(R&!faxy7XF&$qy7C9Juj3R`%)R+(pL|Dxjr3k=i8~T2&7M2=E^>whySkRJd zV>O^MU0n1IPftzQhAIkN@I>&7B=a(d24s6{O3=)!M;~V!|-;7%~WD+RSCi zC?UoeY*DM-jH3Fo0PI&RAZ?To8Fivk*kAtYe>_xDjO#V;+D_XK{p;7SyNO&9Y2ue$hXXf?E))tE)f#@p`M( zj$7-F*I3!=w#LY_03om)X#>Jt+rt2!tTr2EsPPt<7L(kBcWBr^Bq7!s5l4+ zp+akn2_qB`&Ws9)=Q}KxSF=&I`ekmLa1VE3*S6XSKEk?)yz7kq{9>#d7 zq_Ri|wJZvS0AqqNS}MNO%43d8nS-gaz?IG{K2G!hK0kRqy)dS;Z=FREM3hnzLg0UdhyX|w%1F$1 zcMj6&Y`I)U#Q&@OdJm#NL_#6->ciG84gid?Z|MLAaB$dQj1>w+O6j+a{<0hp6a_63 z?&j7PCT~s$cj%YouS*FDAo~4wzuT);tD45YYcwJvD20I3^TLC}gMYmF)|D&Q{eXOP z8!DuP0ESx|cjxDq94GOe#t{(#Bn8>;j?SNduRC~HEF`XfdM6z__sswTq|<6$yL!=f z%u=ZYi16*v0|Edj6%PCN)-PWC{Pxvig`7G2Qf)U`I|#oLD=I)#007Wx*8cLRKmPS^ zf8O8U3A{c)s2)Ed03skDfC42!3Q$0#=M6SDFRib?bN`FW#X|7H8CFgoegE7_?V!!S z=0qVu#0Ph;-T&gUKyvHWjkTrH4}S6*vJ?WNh(IC$ipUiJ5~?7o)i!TkyYlei`mo=R z#S_Pm=W|X^lkI2EJb(Lk^D6*C5P$)?CE~4n(-H)zbd#8QW3%s6gxF~6>9e0ev+UlA8L(L7nrvL>MK*?}?>EgTh?tDf# zuCq@0);D`pWEvC(vT zdgFr5LmsE2skgO1);_sJB7br9M1JM$-Hkh)+WN7@K_WKLYzQKw-}jgiXEcgwbho#^ zyE%O3nHP^OWU~pE|JObtpUEaF9afN9qcR5%(ncP?E0 zaHqa;dVy;THbenZ76tjqI1)UZOg+mH<+S#T*M3Otg`>S|sn~!60HPv<_Pe$B&%gKb z&tCu0tFJbC?aF+vtW6Tk(4jje#mF_=v%~i2(cW%zb=6p_>Qn_tG$dr^g+U~zZm?U| zlE<||(K5o|gDd}v8`E6Mcnkna$%Ts-vW3#Ge*Ig)drHrXRBiyeRQUv(K)qCwptv#L1X4Sk&rKA8rDGh?a^Tiv# z{7tD;Rtg={TC7wit~(Cgd?n{3ndiB^4sin-ilEW#%|et&5I_=b>I*x@jxAY*Bu`H}j~d;Rr&cyM z#{QVD&TDf8TN14|o|uMKOiP`L%>#cN3R4dXc|V)yl!+$*&UreOQGiM+N)e-pJFE+( zZEW}0p@?VFks3BT9o>e&vt6p~?DdkSQ_N@~q2n3YR5MAa^Qj`>#I;i^PN_&@CW%zy z={FT92q6Hf-99?p`Pd(Q#>_B~JRDMPkWlm^|=;b&7ClZ1lr_;kmd!*}xQ>jz$dsVZ2L}wu? zfQdUPR&2|r3ZlUn#uLAq5vZ7w1}ZLvDUw+7?J#NCx_c2H|3HS+AqYA#^}ZoAWNh)AA1{hTnNJ>CMY zCX-pOAF))T(+o{wq+^_7=nB~%E2^#aW@iXBOfy}`SGYgE*Q;H2hqnN`6i{F!6iJ)L zx#Rg_E+NCwlmt^w`u!Q!di7?QaC9N8K|lEH=45rz@@Fa)XURB7VEu8H@g(NN1W(sa|L_+=F4iKUS$&s?`nd z-0rrrStWd34|5rZ=z4c*45lZXSXqx1)45c&P%h-t*__EEx51dNBuZg)y0e)}(s6yc zVi{WGsQ?%O=JY*(yEXCUIGb?d7T+4QT@R*#778sDPo$Fh{+N$P6+xEc$$YVpugqt% znY3kc#*x={XNxN_K_Wk(jn)hZtSngBWF$pI7!avTFdEbGNK!+Ca>-s@PB)svX&6^j zo4AvuVii=e(~k)_n=U#_3+2V)TqR}if%n>i$K$e)*C`^h~+$Anb)XPQtC`G z4pb2;(TE$)sbzh6PQ;9GVa`ce7}GVWqId=rBsp1|E9XzF%r7sLQ#pfMKqvrE0G&HY zjZpMFtJWGz&=#sC$3h|%C5Q?o>Jge6Gvm4{q%@6q({a0y&yg2TG1)hD3}6fi&_1c?fe&}qmY)jiJ@6cD6I2#G}K zIQsk|9f~76p{FvD9UB#kb|Rm(Vw5oe0ssVn03ZONgsXTjP&GY!Zhl9f|3soL?fe;7L6pH(jb^6XMsw$H3{lN2F#yCnFBW|+Zcz8$Q zkWvLiJvqd_6o0hu9jjLISUT>#dJ`2DA=#!WT0RVpb)bWhb6jDq>_pm+*LR!dL4Bfr`?*0J< zPD80P9_N~^$FiY{OSiocMCgP9mzuXC@*t#{L60N*0@r SY3@q^0000raB8tr{_Dpk+1GwS$XWCM>#dRJ;6-DHzFJ&3&)s3XyV@BaCzx8 zDOFYFtLxXl{MDyz(V9QCczf&C;njDP%>{szM$)c5X?iE=1NVQ>wR^qUxw*6F(G`O# zxK3Zt7$HkaDJ=q;z-Z-+&8)UM;}9r$m38ZJC4!(Rq?ox(8^a?EAxk7;B78!rXXY*A5j74|HcDyJvOf%+hLE?X9m}-`U&Ud}pIB$B<>qC+5S! zP8w{f`Saj1B}ial|8}_XdL&$X`U|ZnpM=r1I=r+;%*tu<J&JfrNF67Y4bs~!Oc-$P&spY}&ND_kx z*}PCLGnCKF_N$0Ly*%;2#S>2FXYN|aivpREv`SGKCDq7aV%w3FvazvAB3h#awzhcS{!0bX8*6WB(uBxPs~l*y?e-5sIo!2|HgDab zm{<`YG6E2`W~X~CPS(~D6o=@W#s^3#HuBo`s5)$nHLQRE+J#zOoLO9a*cjA;IQFnh z)gVFwdG~18?R4)hRK2x6INa&;o7Yi|Euw$AOG0num0;dKe8x4 zSRW2sEh_gfzx>L*zxY_IXti>yj3OdZN=AE94M{m7M57qBCX=G*95Mr$(KGD~LRpTh zP(hsp3qr_x9`rwa^SiSeq&XsttII0@uz7nsiPD8t)**<1B$iYj#k$f?i`FOzN=Ol* z0I0;#8sZp|0BJ_6bu`W;*Ld%vzV!Z|x^VjR=!Z|2qIO^YAt^8?mddJ9){rtF0xSSP z(r{g~=_u3$N<@%^nUTO)1tFlOzz80Q6e@0FjddvNQorZ)U8D9XYkEP2&;IToI;;Qo znPw~^~sZNWoogv zs*{Q@yz=@-l>%S==s%vHn4U`9l=Z07nIckv=$nvbc{aT?_t~fKeCtbb_bodyi-0Lb zK(Kk%l%>rI21ro5$&>j9J_qv`lIu(g`B3R^eEr+I2V3vIAd9DFr!&}n=dJ0YGaL^4 zMF&6;;{JH>r7!+o-tH|a1g{kiL#Gj(uH z0v{@cQe^JX$M3!LgJ+-p?(TSa?(DgPy`5}wX)rqKblR#j-}%tzl0Qo0{dDK;wElb= z4wb5`}f@UzKa(x zPWGqfPR&9J3RqG|DG(Xzo^<^)zBM&`?oYG3AJUU6jV~oK0t!hIW)Pj_nQ>X(>PH_@ zk5AN#&Vwq{tcfkKcLr&9yaWG?4}*4&lbe#?5Q%XBU?;mno%0gos20 zfCK;|<`*B=S;2tJ0wN*;BErI)D8|T~Y*uK6h_HRI|E=e~ueDKx0>YdGA@6kZRzBD7 zjSddQ2N7UqL*dRDAmD3P z-WU!BF$6?J06-L$+}U4$^4B_@2}D$yjMgZrE=yq%6pn!aLkuagaN?9=42`b|!8xlz z(8qZ0u9XXC7KGH+ojXOR*OVg!AR+)rDY1wE#l*+@5P>uyOI02jn+XUhilHGy08S|) zNS^23hqCtC8jAuK?)zY#x#yn${@TXI#Qfahtq{Bi5D}Q4pC1l~K14(SkYoA?gu<3o zsOu0}lSY6P1E&NcMr#QH0TB@&dGt|9iGfGM;oBQm#k%G5=ZX1XXM2C^wr~7z{>JBD zd+o}_d*A!T-}!A}1^^L20R&)900KyfSr~*my4UOV zXJ&40ZSU^x&Yze&cV=~ZqW9zzPaGT^+_-u37e4Vg{(pW%5KursgoQycr4(Znk)~-< zN+HJDhbjd8o4@|MJhSaq(d%~e%oRoP_S-+ccJ=Ce@4NT>*|SC)1R(@x?YJz7kXaCp z<+F$YfUqP^BErlmC1#E>g&1ld%Esf1|M_C8$hB4m6$leyyWQ5-2tceoDH34BV-g5} zh$te*Zi)yX3jbt8L`0Y+rWl&wLmYdr>#EcQXq{3FfJCbHl?nlmUor4ca)ST>2!IH3 ue31Xu(BkrZMg#Qy^%@~Embo+V%a0000H?xB+WX6vmcq z>3|g7*2PKlux!baMTwM1-uLUAdmj5f_;pa&(!Hkay)ul{c*aNfFq1gufr(V|o>*V^c;&FAUuRgyNwf&O|h zzPEMvq2u?Ors#wcUD+8VlpwvqW*lGTW#Zc=@JPd*PPBDd_ds5#NJ_eh&!7EQ{_Vr# zb>>a;tFBQ{2ZXwL))_rzmdk{>QK=V>4S=(Yi^cVY!K#xh12LmMNtb6`TOw3x=-3$f zo@I5Gqos0u!#8hZR3B(Eqxbexn zl6GUPF*e(4b|$M$-`Lb}u?T3m7TF!3j%9U#AqYep_xh93*apmd`knW_|Lp@8ntZiv zi{%;x4|cZ+R5OYJf>a2h0a^3fV2x!H+ql*|aqVFe zCv}}TG|GkIjx{9MC%`z!^&*9arb$R}&j2fpr)?v;f{*%slHH!X{1gB>*?J5^M!52s zOY&q%kg*U}sKQ+i5h~lPsN5*pp5D$G$^y%yc4(jn!Kw!Z#lfF1D31AQiwF{9Uzai2Cw&rm*ILLco6kPR?Sum zX$*E~TTc@ufzg{o@U7z`Z8mBgZR~yPvoAkePOsm3{Ahn~x2ZJ)f^0af6cYQsBy}sn z&ibVGhphxbZy5B#AR6r7J$iF zd|~Q39P}#QB3avh_~LqgdNSYM-UlJ^{K0sAQa5Q@HF>RgS`pA6Zv<9(dM+R()X#a* z~i=hGD=kEbHnvS*_wU&PB^jniaFF8L7EWu3i%cC~>M* z6B0VivR$|Bn)BOhsZqK}7y0sg-~8a|{=HZ8)KQ}6MM)`~u2r+S?~X>3rYn}yH>b1Y z`Z30^ZBvOWp~^I~0VZ9Y*OkDWP%2v`J*+Iz#HXff&S88_E5tdbLzFe&L2WYj5+pdw2L2DgK zTBhl38n;|x!?hGjS!G)rf~M*9Jic*2?LdxgZ@lxMh+EemtkO)XmRMLph!mX8Z)S1w z^400<7hf)>XI-%*%3z>?QH^Zp$Z!MI;L=JG~pT&H=aN#t2n z*q*8jjEo=naePZ{c5i1FdxO^YviSKRjCKwV!r_n*SV)9b2MEK)?mGkwRBN<}<0Y38 zV~7Qp*4l`$>yxkTot=xztJC7L1K?mIJlNgnw3vlF4{#}P-o3z+oy}hIRcEQp>s*M2CkDh1H9+ z2rQq}5Cwe__BWH|ylYpMXR5Ncpk51l21-iR+Xwsih68W?L4Rw<7{H~jwG}5t)~f59 z<@6?QlT#@)Wp3F@iW#AJvUgV#YAI#eKEaH-SQ)8o*EhnF5);BKiY&{Qx*Ya4HUp0X z^}nB<#2-K3*+1Idy(2}MrMYOlylQH$^QJhPUEUe(GyRL(6@zvH9xj zc782Qu|z;CNNgZ2!l*+LfSux@;Y7)%>wtv}GBB`#R0t{V+vNQ}eeZ+szQ3`v1CVw1 zY5li9`DxQB==HvOakjEq&v(Np;B{G64YItnNQyEi7(6-rMyo+x$HGX+>D;Oa)IiF% z(*Xh)gAPd3i+Vr%@y|c};61}!%W(wn9M2=jELLe*H{)SXD~XJeU02nu1e97{(==68 zLanlz+dX@gEh9VdnYCEPgks1rErjjcz_N&IIsfqYKmOrg|9Pu)Qs^eHZGyD!@`f*y zoQp;X4uO_ja0NIQj8dRAK)5sdc6O8H|M9_}{pI-1lRF+myariII)fXbEoKwkcrI?z z?$^J01~Axp@C`p))2fq7@}^9)WEH1cD})dQl30iUh^dV!6P#lMA;8RL^ZDY7FHU~+ zH~SJ$%VBFSB3j$LmYdrVWx#RG*~!hn{N~pmfBc_6`^SH}e{>l1yr>^_P1T92NS8{f zz;$|lk2!u*G__Dd8)|u4X{`-{2vPFUN6)$}{{GYNjK_Tw4vB$)S~i#EU=&eg?jIeD zM*+<8#bJ0C)T=yBFxIt#qA8ppdU1ZeS|tdOD$5jaj4}`)tu>I+VGhMqs!nU^ id0rH_qA8d0>i+@MR^s8ppEeW#0000WwEgz` z)tm#b?QBAX34j1X10VueE6FtnT1o*hq`hh|3Q1gwxnjl&l)*v>EhR8o0c14r81o#4 zppi;J1eMlOaH6Ga(}VE|VYUkmK)@IX5Jbip0}L?+AjB1yN@h9D>h;?n{jNRQdHd_T zpMT!Zl2jWbBsW^w6mvz5pfMgpx7fKr&U z$)|$FINsvun{ZzS!47mK2q|D*ceq0+ zqX=LC5JV6G0|AAMYEdF2D^-FJ81*`P{c6K9>3S}fb6{kz5!}1E`RL)@R)08OX73J9 zPS4I4t5`#U7}XeJ2o)vVp#JtY;bm!rPzp#0paBpQKcjX3VW9Cxq%-Vprhr=!MGCmHbLXdiAW+$i0?Xu1PYN{N-CTfvxYZ1|g~>m$I;I+xI;U;Bp-YUO4Qx?|-m6>UZ)awMD*K zFYE1AT$tHBbpo$D=xq*0wb0Jv)y3syoU~W-!?MUVFol$st>EoDch~EAk#oy-o2`yE z7(j?1avj#|)^>NU?QRWwjnJVmPxGtUBF}SRxr+iXma)TN;4s^E0RWzBHJWuw$l>Af z>w}YD4#(q@NtvzS=JtqEdvo{h{f7@N%BB}Hsa4=P{dRS8Q18?oj{up)iWi77WuOo; zm*8}k9iPwgG=q{Oin_hw-rlX9tx>Duh-@)iMhEB1XZyd-W(!!Yd&8}Vo&Eq|01fE3 zJNF;l8+2=R&k7x4Va~9UWiHEHYps+v0AgYn8qcCMPcueH*lIPqL(ldc20UtbyvUQt zZuTM$&&D%?a8Ts%@Mw`H7aFJo_dsb``|689zim;;OoWjzfJx4IUdXb*82WY6ZMC<~Gz8)aiQp>c#U#oB#q{2U!*} zzz89QD1nj$K+pApMw?a|hFH)*jEI&}rZLZREd`sd5FCDltL`Vu&!Z93Ux1Km$Vv zgODh#ltyXB3CLDoeEH?c@ny1%ol0eMbNIpSdx7ugLI3~>Bmnt}09iyB0cN=hSzH-R z5wZiqTtaQFbRMrvmRL|AXcV?J;uDiZ?LH#n_gZ?F5`voSS0XSwH5}hYf&m?QI>_00z>H5T8-hB7X)HHG0U?+ zXM`wO3L}i((HE7ypvY7C* zWIdZqF0K~KBIg!i!+z({x{ad-{_ZOIo!E*``*X8$YzIcTxNA@zD030tTbx%rf1VEavKB7VGv+x z8G?z?NMS!(q>Je?wAk+T5s|mKrJuL0M$Wc>U84f3H?6cOE?MZEWm* z{K#YrYH|vsk;)VaB?6QSi7ku~A`WRbq(J2=PtvI$x~)z#lZr?s2_>N$7$p)Vio8%# zS%kP2^KhMT@#2T?Fc7yNe^wiAfU>Q53{+{f&>{sY;}`&HaYPYuHMGj2C?Hpit@Gk) zwZ2@W1RB~1Dh8ruvQD$ov7OKU@JYAVZ*=;38n3S=aU8w;@!x3_kG6Ljt)?INwp#(f zC>dzQO9%~t2q+CPFZgn~I6gXl^YZoUSMQF_F7u)wggUnG&1P3Qm!00VZ@&5Z!KY8g z<5Q)vlZDU#C<{jL#qs--gF{3JWwh34jD{mC@FBy{XpAx9c@}3U$M5%_{rBbbSM%vC z&2z_f3MEL!^XXz1#f9&NpMUlFlP~{NZ?)oNwVW*+$4)JG9mQ*rD5=XV<3%~0Pc_0j zdv_pn7gv{s5E=A&5|vl)cphucrI1p~ZokEgBA|>MygQ1Qk&vd>4{z*@d=FX34BFk* zWa?C`y}S3<)7f%`n4BG0*i;OulDlBDvnpnxy5XwI0FDekQODfVY}Ot zTsB-n;zgo`@EB2PG(SG!TO)&M)noOlFCeUUI$l^Ql}Ph6ju-Dz#oy^w< z7w15Vdgyt+2c^u?oC8&54Wl)X`rh3;J2!3;Mo=RRZ|`ll8kKx{di;8y-V)67N8Q?Q z$CuN|nd63GwPsr^&(qd?z6tT3HwNbU*w)`JvVR=`LT#=fMU?qWN#R)b*2cAKTN}4O zd{Ak&Nf6jKw))q1H|iBnWXZ{!{psoZ#$Xgy+P1;j>_Ua2(Z7ZnJ3V|?&5ob2_nX#x zt>FV3lLWmOlM*|WT2cwY%T}|wcmMvQ-#=-$JLAjw_uoAwTbtLt&_ckJQC#MklKSd$ z_U>@pY_+?CYuNU(EG~KB)mmOyWpaD9I*PLiRO!XKd~-(EsnN8kTJB&t91VuMclK^R z_$11Tr{913*;BL*=@+bDG|uo}J*`Wk9loFhaZ_@i+KuJXylxfd?KfkO?bf3oLKX%)I)O+I?)~hy^Wz#jvsj4bN2uk%KM$Y&oD+FQ6 zjuwUx$F6Ia>#erIN6@N4>n@R`$_hbpHQ7$EXy75*o8yumA~y z{a~YqoJkCbH*nC_LzrZ_Q8@Gi3{snJ-L@crDyz~2?bVZn=gnrf=~g7+qR6@NR&Wk{ zFv=J^O39Oy3y<2~TH_MNaU{WE=g2rB6oJUnDmh@1RifXhrWtm3iwEP!Nu3aCIK>17 zLSjUQY}XqmB%&C_IB4U&7lLC9mD+{qfp-*B0I7+fE)SlZf9nUgn9Yt(PqRr44XIPR zr`x{Y(U$E%yXzMisVJaDLkJ!cKo;vhk+#Ssf-yL2oX=SX5$aSQK^)W&oaL0AoL>Fn z`(IVX#dfu_fEI^)(`mK){OC--xw+|ErS3XIz~tcge)ZZUHq9l(*my_MGGQ2E0=?A{ zTAtE~oeMrKMc0{LqiKEo^y<}<%NJ7A!Hi`lUp~7cD6a1xR-Zp@*NauNUaZm?^={Pb z$K~Vf{_*Z{*9?PE*)(sRdsOYF+l5<8QpZXL0EsbDO38M+;GzhC=^+rxy>U&`VB9~> z+h#ug;+t0|hgZPK=I%4>*AKHd*RS7x`uy#Q6at>gcM&A!=;7N!fkUTB0EH00)vYntWZ`N8Sc%j?JW%%J562GyJ%PLIw`BS3<2Jv!%N-|GR4 zK-kH&zz~x(5e}1~ZJYZQGVU_}YHD40Sh}0p^}qjjx3k1{YTcvI672hSS3NoZ?%QW0 zfctf&O|RPa?)J`i`rGF(e1HMsaqa|4@{AX06+MK%{>Q)Hb}(qlEUqG|xSZB|&?APK z8eJdYe6^U}+yFHcJjwU=4Q2VsS+aK+FkdzO>p#72R%=Q5i*LU15pJ5U>syc3`BX`X zy~SkcZ7_^5R2`;;37e2qJsi)Jr z_CEgchqnxERu9`h|C_&>939@d-QwZVI=i>OZ@h0hMKC7_2+j$J7={Q85lk?a1q6&z z?Y6-HXJx`F2_ewX14|MWkvsf)5 z!s}+6$|6tGqD%p{jT*EW2}Uji4FC%cZdTel0}+>!#pnq}-ukZHSu>m;SBI18i}TA5 zSiN6=_Z<7 z)>$f(WPm;x#duwod(xNtCudu?N*?Z?J%4(3dLg-de>)4oOCjBsBG z&33nI2_T|a>R zxdjtFjV;$7lrSWm!v!2|%zL+J00Xm@-R4 z(8JhDgl?rd1> zR(XO*wO@ByZ`IDnfEYsv$)ctJ`VmDElp9yu@#)#usUvURz5D6in~N{L$dmMO{@5uM zOwzR~04URf3hBIeqYF4>#U!-p$B!G1HG+guCW^e`Oq#*K01-d{V(WtsXw(rHJW2P) z&Mg+3l?_l(s?RNj@@jvBtYOp z6qHzFgBx-to*bV!(-8yVXxz~C7{IiuDCEwCUaO{SF=n}}PxsDa8yHGoz53<-=HuP$ zrYLgClMgqucb`65?^7uOMx!$b;s^i;0Re!0C=)q3uL;8}61yD~C&D{Fj#`hSw#Jz; zV92w)u1@?4oPYWB(@$?df4&WzjcQo*GrHd1FXuk^5Cg^-0vKb&80Kki9at`Uhg_0L zg4M`^1xAlLj!B9uLajSEI1bLXeY`k(TI^r6w%snaKm5<{W~<%WXdpzAreL$x##ECD z!-PNtFpeMq5Pb*`lZZ$_R2xS?j3{Cd(J&~CDPx&8FnTPRq)Z&0oXeDlQvdE>fBVD# z{>NQ21R-da2`U9QSv$`s(y4%gKfBgNs*&oZZtV_@su?=>;?l|BwkuUNjux@A}98ytvvwzB)YGsN2UCr-fRN4<8<) zv%m$+V03{|Mi8QuaY}dHF!V}k&1fJ#1Z&Kwdzm4O#?9syMsPGenC@kg(b@a=KfV3a z-a&4Fi4h#;1rNoqyU?CJ=#E+L@YqB0q&o@y!`a@CPoh;5R7q-2m&60 zF*pYT2(gfv%qR&EMG)1XJhTx&qd_JTYrwkimW!?A6u>xNE(bFtSxE>6AVvVXes^*4 yg!tm-cEh*;Aux;+DaN5ov&;i+>=0dW#{EB&IO^vXrMg7`0000rN`m8RwiMV?gA|69MBAkpVCu1OUbWki7STh~5){gx~-W0j#r#2;QUjTDM)- zEbB$nER!S#pw(R=1Ogc6qcjVnBnndi0l)!(2#82r9sG)hJayJ}UDwvjW!tvaIf)o> z4&WIF?^=Y`dy7cUY9bFL0l+w4UtDVAt8!RXYf8pI5)c3YjWLKEfB>vEb*JmDTOe8_ ztGm`2r-V{6002ZF@SHQjnR7<#)>><=ao!;yqR`qmn$mnQ9F2o8LEuCn;L3b--p=Ni zZ99)b&cHDAv62pywzh=^0E{-?JH|K?n5OI6Mr-YyUG+?NW?8pURtb?>YeRE`E#sVdrfchrbIwnv^TUIq#bRNdO|m?z z=Gw{L@K#C)fB+;``9VOXl#ue}x34bGj@Cx~Jk9c`WDs)Zl9aU8h^QD>Oe~kn<*aV% zZtv&Ay}hHwyvd7-kiB^E?~}`mosT|>v#O~2f-@ixLxC z4BegaW|{TMI2iPY8if!dO;hLG%Y%d2ArdS`s;7L1SZkp=f2Y?p%pG7f+MAP|u;Ow;7kXP@2My7!O2e)-i`UqAcg z(@-)M1tDiuo)lHilPE0nX1;j6`)cp-Fzc0rT!~DEZgP4uC1>v3y_J>0AD?~v;K4RI z%kV0uY*mPeh@;`?`Sa(D=*{a_$NN7DA*#`>Vtov8+&Vv-%$W*%w>S5Xj@`0;x^-_e zOSX&n^ZVn^?v3x2#m=40PaZu|RzufvPe23!SA)hF0{{=O^Wo0#pYH6v`3^15^UR?7 z@?T#snt7S!MV1eT!`AA-=I_oYmtXwl?|wSrz#W(4ylS_`jWgS~*Gw}%KRejCy-mPx z2=KbHJb&`|VJ>|jV0U--i@*JIcmL1>W-sn?R{JjvqdJxV?S9oiCfyCWvFtq;tdx6lVxTzf_=9$YmJD zX;GPF)1IDR@*s?IU$^ZK`}=UPFN9!Rww-GyOTlOHU}JCxG0JXl-OchSjFQE&MJ9tF zBVYh@LqJ4~@!{I~_|DeFbk=AarA2SJZk?XbMPq&2HqQH)aVdG$?``&aK-73Tnaw3* zsglFBQQ03D2feDFWPKt;g!e2WqBzMOJbp5nFK5fPsLbXTotz%G-icN_ZHzMuZJ6<4 zoCcCrRb`AhI@ntujgtP_`gkLV;;bn9gAL>gya({?T?L3pz~ZEM^!UT^?aeSr(mWGN z0x;y9ODUv689kH8*bEShZ@-Q5&ee{Qq zr?cr|Ipdt`PLm@glv3FzP?@-RhkrmiaAqZ5PJu0XN-5&yUBENba-@rdZvWzRn>Sr-q_d> z;=MI|GiYz|$pZpODW5!kvRF(nE+*6ILTlrk_nx#iTq;Ap)jEnp@ZM{ahEbMg!{I;OaxJqk#3n1fD9QIQZmkyG)h05A|DF7qhQ;v{EGUCDo^eoeuRV`z+S znudVFD2fx;S!Zek;2E+oP-z--&daJQ$}$K70KCr54bCqK-nIn1r?zR&&d#i{44KxQ z_a2Ay1YT#FY#n3IZWHmk2QzRh$<|flOE;L;^q} ze|5x_g4MEodwhm`6;l8(#?(#iym!uNO9aR`SAhybnWsrARG?H@lzEv(|UF%)zJM81^Jx9lOoQZ?&q&T5zb5Ru}NQ5Au5-A`AqH0wN{Q+F`qBs2= zz3f$gMQnue45>5|u-#x=4w0rjf^%#(7(GIU+yusy5>O$LMX*d7B~jJWKtk$5#6;Qi*0${Q1#M|( za`VtmPnw-qX#16tlvvLE`NXeRAw}*-1{8s@92L9>_98%L78!&{J^=#|0%Hg2($6k~&O9Yj5R|k_g%IKLPF)}hp zL{^X-H>;325Rn*GSPd0tMu7-ImB@ktq3)q=GAE1{1fd>EA$2eb6-t!4mMDd;q}$l` zBIU>+k}4HQAq#|rkfJ2n*%@r_Zc}#5oTVZJj*hbc6nSw@m{3)MdLRgOM+$f)2-IDY5;-EG z&>iVF!N-(sCf0W8hL^f1F>@qQ>Z)+9RYlS5*STN#7)i>aQX~s0wa^32ZS>28LUI7c z%mSiM1r(u)kikU{5`-3IU6_?K3oC^3>qZDFD$Ox*#vItSmI$OKT@r^(x*$;qSyDYN zV-;A-yH;*>(*oWO6qKq)c?PM{@#K&3RJGwj;{D9a&8LWdAcAOI9H zcuox7=k8c%>xqbTrvSlJX0|EBq$&Z9Q_4spC&rW+5FyKqTFY`vX{fx7z6}sDJHQD2 z?OFu17e*om%Yid#XjY7};!m)u-W=Uh*j{!nN_$fB3uK{LK%4QUJN8%^66Fl(MQ+(d|O1 zPflOlyZ?tz?%X*#{>t`DV9{yvpqc6yZ-7MZyer?-ru-(vss%@?|h=A=#HwQlvHVg1cHP>L=x7< z%Ujoe`Qu-W_2Ajj!})CLtSJkfk_;)P5Lc^(@vc+WRZ$viHf^(5Ee4}ue`iNm)r~i9 zeE8iTK7RCIIh$(zIj`KZWk^#zSNqe z6sjYw3Gdu`=jR{)LZO;3*OT+vWHFo1&X?0k@&=JfQn&1}l$=l@Wf0bR+|`|aRaUCp z9w6}QVj8Wzee2CTpZsTxz8F(gYobyi;qGYskH7rwrQyM&um1S->xa*u9Jg-mLSx<9 zM{7g$DfY_l_F$`)U73|A3x@pS?8WZxu6r4Gx;1myTud*V+Z*k?bNjt}M}Mrhs-Er< zX7JPSxBv93zxw8TcR%_4lP8Z(pB~%w%455P)x8IgkB_IiTe$4ANjI-wd-KYH@O8}U z(!u^_y-q1#+S^~8O>`mF^YwIoe%Rgm^S8e(U`SGW7pYm=AARuCAAI}AUwnG^@zKM@ z#iVI2)_M6no-Yg2bdwLD5+jl^?)hZWa(?I5x32DAML_TU>HK18oj4JLL2o|0Xc}|& z-(R?d!TKbyZ7sh4{&)ZNpZ~?p7xzB@d~$YflRw*>-aYy>l(w>3bi%trR_a_b>rxDy9B>o>DZ~&{ z)0&(!3Zb+{L>~ja{_5f5M@O^ee7@O0t!1T0V}1CIo45b+J!iEsFaGrO@yp|-miopw zUy}-TE{q9gvzSg7t@Ymd5R-Eu18j{4+Xusw;Tx|u>#i~WpjYkf?(OaG005DEIX(T(U#sWGC(CtnaynVAH&Wnu+<*Ho zuGhoze?L2V@p$55^e&Pj{EvVCw|3Je@1)kV$;-vX?8(!ohu2^0_Nu{fRFrjH5Bj~K z(v&lkl)6wpBm}nBp_FZ7+Sc{^o$+Wm7}YMg`P@Rn^=jF)mO0axcR!VakaGg=R%L3I z*_rc~=iZH13j=_f3PjETWm!3AQ%ZGJNg)t{NJ^4LQKXhO6ACeg5K+j)DL5Zvw9fxO X;9cQ@iMrS+00000NkvXXu0mjf4w$>+ literal 0 HcmV?d00001 diff --git a/datasets/example_images/peke_s_000545.png b/datasets/example_images/peke_s_000545.png new file mode 100644 index 0000000000000000000000000000000000000000..fd75a564daca1f167bef891b5fb6fff7bdc37111 GIT binary patch literal 2498 zcmV;z2|f0SP)OrJ&69Tb<Ljh2P#A z-`T4o4+CIH)NhJ6zBqq&5We;9{@ueuAR^#YQ+)HKv(I0cMnn+!)-UgE?3G`CVR61I zEC@yrKnSj=6ncR+oagy;D~zYwL&v}X4gefm0uNzrZ}N*< zN3VT;>D8CwDJwj^{3D`O9v~?C(AQD@R5c;weN&uEf zNQeLo0L<(h7{aL2uja4y|KRn%`RtX-lv(PoxElph8N=voAkhlYO1GLq0x}W&DN2GB-CrH`{j%C#gHC9)!L9z3t6=D>pBmn}6=b7tUY2 z6gGaptlzFDkD7tBs`fo%A|JsTqpDi^p74AD2%aFeZt?v2zx})Ke)W~Fs>$*4M=Qsw zz1Uj{TEXeb;fEi6yzyYvo?gNr+B-Vk+1j}Io10fIUwY=5&&IQhsVPV4QGnlvZmgNJwF{9?Z;q?;qZL{q;Y3xP8Ykj0#F?_tSrU=i)+te6oAv=31s)C*0~c zJt-w2ohH@YyK9s5(bDYP)r*&^16!m=ZEE>aP^o>N9Fu%+Eh!CJr5DdV`Hye@_#1!z z#_sBS|MQ>!v2gkN>Ye4))q9t&JvTTrwR3c;!&uwI0FMhma&g-e(lO&`n2t|Ik?&nt zS}L3~h=3xLK_Jjrpw$z*pW4Nz&i~-eAOFQ){iP&l@}bHeZLZ%di~Q@a|LH&d==&F+ zzM6}Es!VOH)|}QnEF1#}#H`W}PwUfheQ#r%JA;K6U!m?902zdS*zl1A5ptu|dhQD^ zG+WWbgL`!~j+ZVx`|Ptk-kzUd_|nU-SlPRAbM3dwcgAS~h@nqQ9eR2`l;A9)LYkDy zYBL;;?r&~(m!6!tdY%1LeQ(5jpFUPd2u<4?|B#fkfy47!a*BYyY>lva)>k+-nrgPA^?L_vBIF zZKg?bGAsmJVk?;YEidrMT8j(<1skobl8ocA8fI9UaUhAU)#@blNJ`%WMrOy%40v#O z^wI6xUwQeZ-prZVCoV1SKgiNin%7Y`2-;lN3YaBYW>Xfb$~9X{9tr_MK&^OETWirO z;{cf1)JA%Q00|Fn zXRR-2=l<5|$zh`rMBSO0vzMF$-#=YCmFcM$kk(d4p7f^}P-|CI&evq13|v{) zH4%oP=LwVqaystE{f?QNWd>`kwN@J|eZSr52_foA3BSiugzbpiR{9UzDZfX&m_ zfA7q>3*Gs-ust(5T+NWx(Xlc*Z1tj6HE@1m4qTENZLC9-l9`#&vXg5c{;DeTEYIs& zsk$<{25W(xb&N=%Prgshb}tCSPPdI*OSvEgNdZ+=VXiDT3S}#;Jl_)%wNXkbL_v~( z02t)mcWQdJE=S}0x3Y2Kb!OYWr<(I_ zxO#hSePwO)eyNQYhRU!5QE7*W9smnQW_kJA^_P?G@8C2cz)pd zApkM}09sS=(u){uZLhrZ>1i??XS+N19-a)#JS&RIvdD`nNvm7|uoc<_k~~CA#t(_e zF%StMJSha?VsFJKml9a8P~42^aS%pfBO(IE#|`;7BdxQRfr$tZnbGMQP;?fqLT}|b-`sz2xPN$3 z8`o$wti?&;Bmy(k#vuZMLqPoD-+yZ=9`xh5KNw8Kakn40THPpWN3C|F5eXrf0f~qR z5fQ+#v({!wvbVeS$(`FD-@3WGvvst;pC^e{wPQv=XB`7u%hvM$1NZpP@AteN+yDRo M07*qoM6N<$f@)30?*IS* literal 0 HcmV?d00001 diff --git a/datasets/example_images/pekinese_s_000046.png b/datasets/example_images/pekinese_s_000046.png new file mode 100644 index 0000000000000000000000000000000000000000..ecaf5356561c10828d0b36cb31b97932bebc7886 GIT binary patch literal 2388 zcmV-a39I&rP)-VT2r;pFFUUGYVx4zk|&Q`nKPAc`}{5(l{b=WS(v*~oYZ=1{4uXk5BjI;G| zH=fTv{Pd%X-!2CIeip|JoUCqHn^8w5+O!bw~t}oEvLyltEVR?tHsQKW{hS$IvlJ2{C4^B zmy45WuQxt>{pNDFFFoOdBHul1vpDu4cw>wCv{RDJm&>fkw(EOuY)tTCn0@$s^{0=X zz4LTcq;WnU=2?;iPn>klJFpm<@hW?K_Q}^@y{J3wQFyi(-`s64u5KdC{nE)=?#nWX z<19({Rf)g-$6vdu(ru%f=I!&%w}JI`RRL)rc|lO z8Xf{+&Q}J!_W`g6;)w$YBw*`kKnx>B8DkW{1?z_2lf!&p&>2GImlSqXBs;1jaactCbAkI6>Z7a9SQEA|x&Ph)@xwghv)^2;c#G zXJQ(E@b0rD761J1yMvTgDz+R?{_^K9UcCEc*cYa0LK94<01U;6H{N3aYh>4w;28;^ z)%x}p0-i2cWvdOK>GEVSDq=x{vq)J=yukG7ih@qKZ|pQP4KM=*at!5 zH2dOCTaCCT8n-EAIfGG@r>0G#ojwNSVI zc3oErPJJR9Pp!h%>Pn?)&J!Af6%2p+(Tk!tVlhQd>*}~e!9|>`j|b~~)!Isxab6q( z`SJR2`Retv#pJywr-bldH(-=JnHPd*u4%vj;n!EMu3kJp9c98OIT*vqWXX8sox?#t zUoDu`x~<9(U_2ShvdoK&Q1$2=cu|o0dyZye_Z;n3bc72d>B#Pl= zU+Z5l-XK*!nh&P4k#x4Iy3s777-Jm3Th=s<)+$dEA-JXtGpHOo*s!8{=GOG>gO+fB4Ku`Eb9EMat8`-FCa#Hz*#{e9}mBv?dzN3+FrvrfX)y z-sxm2IEtc}Fpi8nY>(p^Bb-{(u@FL@=NNfobil~^Ksg(Y2OtHqG6Z+GtG!o~>7?7# z-UeH3JYiC6L`0lNC$mYx!*VuWo}O&0y6ZHjqN}A4Rd1Mk1dK3>DKSc6gcxOr5HPx_ zYo{cH5G6cH>2T67eFve!&YdkMR3tZdcWnpGhrwu^5mUE~_bw7#w;CLBT9^9+Wr1+Q z5M!N^&~=~{LNrNIL{ad*RkqT|hB!*7uJ-%7)Lx6U&w3+9F!q)NbH6^k`Yk$Nj3L3t zvWnsu8C{k~5s9*^2@P=`GjB{+*AxI0IIswXIEpxDgc51&&8B>Fdw;#&JRBl3~D-wNuU@! zC4|tjs;=(0FJJ%OG<858AqZfquE7XXl)_cn0MGfvScM@E^==711Sk^*CQI-jnV6XuPoJM1XJ#00#pt#Q=2mT9eb zdv&$jB4Z~>%yD9kJsb{s#?zRlX~vVMqtS4CRLW9obwsU51)fks*kCY91ep#qBO8po zY+9!sLYVN#lc+Rc!7;70@e;flEiVG zWm$Zvx>i<{(zfjYd%**XMU)RGcdG7ZqFZFu^6`vkjc0&80LghOxvba z$}lE47fNY_k%+}$G+=_)bqx-Bc@ZZu#)wer1JVWpV5_ax)`#E#0z#2sjA4S2v&MU1 zNkWg(MX?Azv{E^T93^o8WfT{JUzamsjZbPV5@YiwV^=hC{tcR*8z$NMvQWdu=mayV~puq1t=u8$clneVJ$K` zgjRM{ci2}1(Y8^@``|4?;1T#>1M&<&16Zw99u>u4sG(z&U>pP`$YX0AAdfMol%hbC zQu<)Q`{7{1S!`6P%C6p(T573m(?}1FfU(XniZKpp7XKfH<0~G9!ZGRq00002r{r?BpF18 zY)KX+Q#4x@scK$(-|bJgch-Vm|K`7#*?aE-AD^CSt*ffqY_>#vad9!3Os;RPWduo- z%*K;g>d`1hEpIowx*ep{-ugI^aiYh0>Ycw@tvhR^lG1av-)&YmYPZ`DL%%QgRnxCF zyQ0X4VW{h7K3^DPy!WHgsO{=JOO=GaYXEF99$j5s{qXK;F~6&n^1;kzBhwd5Y>YA1 zgr3GpdVYR>c5+WWefmw?ws9OU@7~i|0SKkk(b3Uju_U6pu1BNMd@;@9NO)K8HvJG7 zVYyh$mq#F^_dZED08i^Xg zk-#)BilRu8gjr}EX|0LKS=Uxg1T=JwLyoe-d)}AjdRrFxcsiY;U?JH1prj%uV@#T+ zeczXdL%H3k$B!Q?rLrtT9T9^u1^~2jrPF8WmF= z>E&_>f|SDPbgYsX5Xc9u08xU-tfv6zM9h{|EP)-cAG+2VWFe%KX_`9ctqb1!AXtDT zX6ph4Qp@GC?>lC`dw0Qtqy$8P2qAbG=}1XtZ+H->En#2~&c_;Wn|;>}XZP<*B>?~c z5W#x}Knk?m?~z!65G3&I{>gkXO%k1ENtUN50VN=CtW|KPK9u|Is;O=RH8S!++9=iv z#hcg9cbiQd#Q@;Fcg_Q#j&v01K!k`SQ0j;QRg&V}dov&VvE~aLzi%QUXhv#If^ijT@|~_NCfvuf6kHD?v_2 z34>6PR7z@rN@9{|gp_>o-Lq%y&>o-MQv`ti;?ViaHEq9iZ0R*1%j zkfvFdoG8I)jMFO4(qvSOqO2f^A_OER<^b4q-TSw1|NHa*`S_EM1v}^VB6KH@mz-n{($KMz+g&;Q|9(a2a2)NvXkYUgdaHEr9v;M(l? zFRxy{6!pa?AH6AGI9fganVyYhoJMy}&&?2SZ^~6WFnX=Cceg8*q-o!E&N+}WPNUxR zOpXILLsxf=+xgjiHZ7!}@ap+rlPptOTPX7+55}44?Z|!@~iFY>383L`0+>o@~{6K$B}E> zpFF;p-kCr7_&y6NyS{V2j8x<3-KLyowBJF{I5lk-Me>GG}|~)%cB{Yel!|?dhe&p zqa&2EZF^_xh{ZULuhu&TO(EU4^oOs%S*`2!q5byF{~gL!5l1Y?qcIEXhU&-Lch#=> z<>ANHkN zZ`$|oRxe(>+jq_cznEsTJken=^y5`5rAFk`>4JbVfW*CO1ZmM=@+aUx2D1!t6uHSCWZQboxL%n6+XIjit zjAXpHe-h`Jy1se8UTrTgZzhxF7k_wO9hzaV)>2)yefO+wdgFWu?7a^m2mm2DisWRR z4MS6vYw~qBlo&b<6oDvW7;8CG7zsXoa6G*;QQtp*W_r;zLFqM-@Q%G_!BPn6oTET2 z2+>mT!8^f1XK|hvrg>ZLu5EWv!XaCYUZPL}RghXp?QxRkj~;$7zcW*9+nS!uz{0dX zc)^GmAu1;JWCJ-OgQB!{APSL=^8AFU*{ojJq122(0N_LLK@b5VNG&5N;KBL%hmRi_ zt<~xA2STV}a6*JbRd$^v!ZeNpd1I{gp{Wmo>o{U!x_f7qjpb%_<4viBL5>6~5+Fdb z0Aj%`Jo(axsk6PMNPv3sNd|(K)>6syS`fACaxZ}Z4aOm#dyBUu&Lj|qB*Ym6LP`cCG$4UAH~azq05|*$1h+_x5X;2~ z#GskUh%lNZdz`Uj$9A`St*&xacU9N&t?%-@T$r3caHMnaum55*6(+Cp&_~Ak$sy)t z6h}e87R8*Fh2oMoec5TXPKOuE(k{V)=X~E)6$4Yg@>0iR;aUIEqT0T`V$qH-7i0ll z!XP<4J{Kb4q^qbN8cT~@YYZINP`G;NIjt2F0$sb4Ab6WV&3 zRl2anMVY6jR2awpj48#)B_syDniYN*OwP8VgRws&_B(ep%>6@IwZA7oDt`!eESM6XL1 z^P}nfl;t*^t4xygo#if$!rE%Bq_i?sSQD+LoGpYU61`1mwfx@d+Le{t|MU00yU}Rb%AnxFH*pORX_=a`B;mV*gGUkr7SPKp^;g$Ax5s1k z^AGR;(|`W#Bqq*M#`qd*2VjAdZuh!_nZ`?C#fl2IafY#_P>w+KJBk zhcJKP;L*w8>(9o9bg%Z_J8%EXzy0u0`jq+eeU%NLJzc0l+_a1nA|VbN9)oKUrCL;F{cK$M4^{^ZMq-cAo3^zZ@MrIeYo)>L0z{!6F^@ zk7o0ubfKzjg20P>nFRigjg^bspXf$9omUkNj^}xSyVtjb*4p=d&e=+*(_87T_f~@- z=&f%4?*||J^uPb>=JwV~d#%%qcQ(A=e>J&U2b<$~IM3$uX1&>N*5>nNRV;4pto_wr zUh6OUSEJ+?Klz_@Hi=}2h3op;0sxk2>PhKw5K^vo+Gm5|(}#!mKD+-n_x^TsV`FFY zh1DA`UD;V%j%H8o)g*39=?55Xt#5qmmDdgqjwctB)062x{@}hy>Z4LEF2_I)!*G3L z^PS)MQ&E4wTaKxmhk#V+>g;5T%r5SrU=wiH$2;*KfS|$DGg8bTAm4oSq&X9;JEu z)q{KE@pv|$mU)gK05}3V$BA^U&ad4{HeN`CF$NKdxG~0BV~k;p`CK3X=Ntf>!=fm& ztRP1+5Ieh@H*Z~Y4u*sAg9i_W!?Ri~7LpM$vRqZxI(6g4wYToP7`G|~=lBnQ|91eu z7(*oIoVAuQ<}r?lLI?nG4up`t?;ER2U6Qi^5JhnuC%*4%rN-yylj$T+^E@vu=a(p} zYd7148ZFXsWm3TyGsakJ8FK*ee2F%OAu6R9V+dSpYb|lXg$NMQIaifB%bp=a!9}at zTwCc`S2!p}7bj=q$H)CowJrg`3#71>V#v4#%eaZP=XPwEfgCc9p0Z@o)Nbm3gLK>E8)m74rDA!&ba^p0I+0<2ocE{ zA~IyHwO|bZIcJSghycK>mH-4ilQo>$!T!PLrpj0xIKdn+Kvq?yAR@*XGG>fr&V4i1 z5z+rUh@NL5vYsaq!5D)8()kiR?x*C9PUroT@t!rgZ>0}@S!ymBp>qyItyatP+-gk> zks%QQ0Dgn#dCwY%rT`??wpX)kqgO+ z+qZ9@o}P}+$C|7$=D9#hiHK54#uy?#R|bJkWXX~52klM=MYccu{NeE@)7h+^1L;&Zsl^kT4 zS7z8BKm7dB&vL5_i7c-;xZE0RDldf0>4-D!Sjwz8JUpCCCcC>kw_m!o(rxwo{X8#> zMN8zq0TIE0ArlC%Q5Pr6&-RbcpDj-`RgvGZo;6yNvzD_GofD{qMN@cIN-qBo>OBzC T)u4*e00000NkvXXu0mjfn~eIF literal 0 HcmV?d00001 diff --git a/datasets/example_images/puppy_s_001045.png b/datasets/example_images/puppy_s_001045.png new file mode 100644 index 0000000000000000000000000000000000000000..3d5c52f731e9793a58ad0b3b4f5b97aa9b029501 GIT binary patch literal 2282 zcmV(tYBx!vVIt3b)6?J8C~g}BeT$WA*9gfm)iMJ!OQ^a9q7PQ zvE@W!hLkcQNQ^-Uhr{7)cAi#qU)N5-F{Om7`OJH&Du=vs%;LmsT}U0WK@J6T;Vf5V z7=qb{&?_)8n*k9ra^=N|D+lA06M_Pu?C$OhgCLf5={%K$K;92F_V%xT z_2l!fzxu*rySSXy1Gm`>HzymF<6_W^M&q0zr3|J3kaK2c_Ffqy_g|~Gx68w)7f)X<5>#CmU!Gt1 zm&9X0nQL-{2BIc}`Q`X*cL{J1(Km(JuU3t$=2sJw{QZ@~7r`g)tdnM!r zyAk`95ueVx&yMDom#uSMP}$qr3H|cf>G6%7rX;i+)pVUw0Z1r-0ElRYV4^7mHFe~f zv$70Fx88j7)w{RffB)^T9zC45m^Dv_^^19T_)=%*iG?D~5$K(_?=1t1FvdtZA_ivE z=m5PDnIT#rGntlIRYjyI%kyP8J%9Gv{RgAr^v}Ngr+?GEC}2LBY%jDrnH?`%h{;ED z&(8W+chbhB-XCv?Z|3Lc^Owh*4H%L#02lxQAfSPfqfj_27#&0>`t<18;o&i+c<10& zIcgq0`fu`+(R4VN3|%qeq9*35@J-V=S5KSkufO}vd%yd@HG9gXNa39maqxeKARsO! z$QeUiRn_SCzxCm`E@o#ZP?XQkyN|z|ZS5>GLpi7{6|`OzoZfru;KSeh;MT$G)7?A8 zWUttQ8*iNb=m)==6nR)wDfS3xs-^~r@~i)PYGx@V1(2fNxvovKb1}c5qDnEcA3Qla zpKeYw)>-w9*KU5}{oCL9!v}ZnzGbdPS6R*}!~V_N_uhK9JN;rhl&T`gNNPC)iij5$ zk(~3o?+c-BcKZ0?zkc!P=jf{k-*|U_XCtNd?8PaW-9C8jdw=|$gIm|e%@(>M87DQ= zlv665fBW5cetGe=5*nbHnJNk+layW%5!66UV@8Vo;(5O`DTe#M_1hnO@I9p5wToyl zX*QeD5D_yM2yBR8s;U+ep^Y{-)`s0|xlX1AWQyX9N${!+gp@HG6a_H@p*GD%(`@*% z5YL;No8%-|PT3d*0nk*y7&SAh!SZtctEg?v0bf&)D

CwOZ;y*47 zS8ZRG16d4jUz&wJtPQ%5+L<6^bVM{b{phsWU%YpB!%WR3A_7Dr0;2y19T^cdSqO<) zJ>$jV#ozz zCZ(^6!c1cfJS>Pw6;#y#K=rDItB*1RGZT@rtW-_xO4Rp#BoGlnKN$Be)MZgtf|~2n z3t_uj4=QSxt5w(8#t;CQ84vXnIB|8{%34m&(IA4XIJ$mx!!d1;)gvoL_NLrCwLr0jilv{fsWyppAW~#zT zA#`gEc6PEn9G%~N|~WGic}L-WdsHct2ApD{itd-C(bQC{GGS{EAL}}+4b`Tu~#(7?Ri|UhSNr~_oSmq#YNFC!>zsMkG^&P z+EhZ$=!#(a_><2*{p3pv=mx$S)fPL25(xn^0vEu!UuT9kNPl^OeG&)dU?9$cwIRMv*EuYdG#Qn6##UR)eMeU@=rZf$Ue z!FY7Gin(^C2pK;)d3n+s*25NJA}Wng$cf9BZRi*O3!cw2iljd7r2qf`07*qoM6N<$ Eg8PtS$p8QV literal 0 HcmV?d00001 diff --git a/datasets/example_images/rana_pipiens_s_000379.png b/datasets/example_images/rana_pipiens_s_000379.png new file mode 100644 index 0000000000000000000000000000000000000000..7bcb4539408f3e318a943756516617e9e810f505 GIT binary patch literal 2579 zcmV+u3hecXP)rQU=8^I!kX%e3DjWsvFD6pajkrfFFi z*R&cQpRlsTvS5~N)+~?nY!@XEBO=3^ZGn_<#;$(;>n3b_PVoBMXNzf8(X0VD&q|4a zLztjOKmcS3)r>DUYfzVnQ_^ayM2?bVci3ShVF`dT&pAk%hK13NaUjdm>G^7r46gGB!Pw_AUxmGWQj4Frin^q*RlXYh;a}e z4%_vD(JXKb45XXuk!`z{(|YrEs>t1=!2^iIa&x=heTu6&!v!KdiZ=@8t|2eyQ(Bfp zmfJyKTjpTc*DYO9WJ}i-2nj++(+$%!t1@3Nrl76}M2AhBrL1Z657%4H+-G0?Ch+|* zUQ}$ij>fa?Eg}>`n#a5G=j+cmAG0h`h|E|`WI3-&tP(%)ZOhE#DBkW+l5MLZRRl4C zqzK+`yS72rvs4DYBFmd~!Wg=EaDpU2YvDUa8jYgphSyP3G02gO(PaF2wY)EjLI8yd z5@wax>0ymjP2Rv7d9J1@GT_j+UElG}&d)s0dVlpdE7aDBB_h)gWE$K2sD{D9x+t;=$vGkDdg9 zv)PXGEE@Dq5Ht+6W#~Q2@Np8QgI-^fAjD#G$Y8v=ynOU!I)3}()z`0{N>an>G>Q(x zqt35h{1!sAJA|%dW$|_%a@;uG;fsrhzc?Ir5SF^d7R!&G?<1hgiefpvc3>b$R)GM% zA3T2g+%O#o1Q0y%+|ifuyB}|S>$Kr@7=6m~Wy?LiJb$eai_)UoH|;=1M#E7lurMq= zinm$5sM%~h`5{i`MHLy2%9;u(ro}mDRV_HQEgOl7Rmsyw7nZ91`A>fdw~b@@M4=Vk z@tXHq51u~!<*?HQtQ3H1ro7mVw&7eN;Qfa;K+58DzTb}-OR)?W>%~4ipc*1Y)j-qC zC-cL0LnMYo*=;#5pTD^Oa{t%=`+F-m4!ol{%C_74yV1BCoO-5j>n280QDqt3EjC|1 zetsttVOdI9q?`3BO;gi!qcEE<*N6$kC0R(a1c63m!_fQvE|#R%uU@@+`OV*dxO)F# z>{_RgTc2(|{_yrMvcz`XH3p7U)_Xtj zTU|reGNe$VD{f#0ZH+a%#q#H}*w?f|tc(C)w(nGeg;}=V$63`dA#n(wKL5oZ{`iNQ zvg_;jc@cSDYdE|>7>@4VxxV_%>t7S-?Ux0SG+LLMDK0MkqO6OOe);lI2v!ya=M+nj z4hLOL)9>!@2vIt2TP7w365vIH#QE7tUDT`9tlfo&IBOuaZFu$mZxxTfdHtP?$!2$7 zvzTC+!zK=sIH8WEv&EKDnkJiJ{}^Ee%A%Z3r;0|1j?*k2PbQp!$z=NH|NYauH$Q~C zJ>V6VYujPfW6ya7c~ z*Nl6ftzuLVDxkWU&GRG|0?a29M>mV&kQe*qI_-2X_hCaYW)=T9{_ykF_3Lk*w!PqP zyqeACN5hA*v}{0A*4cW!4s2t-7$c(CO@j^1uua`})qdCVY|Ym7w%fu2h5P-(2M=sZ zCj`uAi}79Z8MQ1VPhaJt+{Pub&2|MumtJPLj70>r;#<(a)qc4-m1ObkqWK~oE z&}c-XB^R9XhMKnV=<%gx8dw0q@?NKBsJ*YB{|a!p+pv?PmzH(u1rIFu=>9%_^XA6) zdf$HgTO`$IXUD2)4Te3(hnCY|slicPXu1i|A&E;9zDVpizEpiogN9)l`&XN$6>gd%$6@+y!`Q}{~wL+bX{t9U2rb4!#0XJ-mPYS z5Ezz(H9O4;T0}8zBy|@>VNpf43p%}yB$a=k; z%_g32{rJ;Q7w3J(m>)PQR;!d^LlK514USF*N9WgX-yRN`)9xl|dN&>gp5b>~Ro54b zVs|KnoDekxh!O?CJXBO&Aw?rf(^PG{y)C-fF*< z*ScZ^%k`QFEECnXt*Xjr(~%-cVJL*q2&s`)K6-Z9h&qZhm6%Xc9Lu(PyspzYq!4CR zS+!dZ5K_1Gb(zUUDcN2dCs|mt^7i&lF`Z7=<5Ao+qM{5-O5pcZ&9ADW6|_La08vb} z#VEjLvt8U)RIH}Ei*pk~et)+TjnptvWCa2`LE3VWE@q>P^RKYi^08KWB0z#SqRNXz zLdcK_mQ)!t)-+|EmvwI2?R}W-mr}#Hs++7(Ez^GX_^hURR_q3yK9;a{(j~8JmuiR1|S(I+>VSJvce00%f#j9B_cAiz&u6M)3UPF_9iFCik3Ww$~8J z*sb@QRa_PfA&O<-x^`KKAn@y&nU<|-fkJdvGi*8Te7oPScd(&G+jrW7?S7*Z`4DG9 zl3l;6$xR;ar{kMWZ=f5lrnJ1kh6uT?Ei3AHI@L6}-BPo>-0TVfbd37~8gWuvdR7-1 zb}h@Ztz;J;_l`NJw)-e6s^Q5Il=M$GcQO*TY3ms6H|xc6rW#h5B-7=hD$=vlzNwmL zrxyw-A;_XAXB5iFwf#WRd`cNBV^u-;`s(VCq*Yd+#!*zIV65hiZrHlvjYnhKGzdmO zf`g-BoRzv|WpzEeoyidN+x^*OwAgQtjy%ti>w*}n-3fZS<|PGWv|g{mv(o`C^D;h! zd799&9mTQGOh|B)ro(Rk?5JPlaTM)!RX#gEo3HnRD1nTDfSQDBDnO%j+sC4&b%FCZ pBRXL;|1=qwwUU>tVdWv#{sG_J=Cd!}_tpRa002ovPDHLkV1kl27AXJ# literal 0 HcmV?d00001 diff --git a/datasets/example_images/red_deer_s_001101.png b/datasets/example_images/red_deer_s_001101.png new file mode 100644 index 0000000000000000000000000000000000000000..3ec29f469bcc89810960987e19245be93cc5a061 GIT binary patch literal 2127 zcmV-V2(b5wP)^$1+?KC(a%vnK&wiqmqAoq zlq$xp92_;o$JmbT@z|cj-ur!?b?C*=53sZj()#O1=O$Z;439u0BPo#}0<*Z`wo(7=6uu4$caFinVAH%wVf=N)A_8LdD6p2&%XTf-u3H0F!$x@vhVv^YeXm{keLW% z5;1|a)A4xRnpN^!w{G3MdGp2xH~PM30TTb;fK7c#p_Xnu9*;X6Z!fm@_xFGK5C3@m2iHIOEY^VOVFTs*Xk@6=66s0g4C!0bUNn!^e+)_aDDMI5_;pN58so;i6e{ceixs z*34Sd?|%3C#fuki-1y+ywYSsS@tZe$dzVB?t=9KFF#$-VhZ7|MLtTpq5tGot;lXeJ z{kQ*h^XAK!ulCMg{pjZ(F;fIFBZHWTh;*pFTDMoPPbSY_&Zd1gncRK&;OOYMACInn z^q+s&f9G!>ef|9XAN^?S{Q0Mcua8%QNO|!1;qU(N`>m~=$#{DA?md9|ethN1 zTY7Z*Xx+M#)su)AO}cK>0TAZXR|5ao~;e;a7$gUO;}VZoRYyDNI`~M6XLF8G!QX^Jv=B?%hty1=-_aBG1}ePd(P%_TsNT;f7VGEtp6*U& zZt?nHd1bL^NO#m>+04Q+(utG642}$)_r0nR1%RTW%)N|yoqu-wFJIk%-cLF%A|w(S z*81rzP>zpIcDJ`#>E8c-HJgv;+Z*VbXi;Zt(OS5<8#3K2bT%4|MjgoAO-d2a_H6Ub z*TdoCSGv~qkko;!jYWcUb+QsA3qBo|=Xa*sGn|AuG&q1HXOzM!(kR`Xh%{1bOJ


sFsHJu1`-w-n}t&|UTrL9?|ty@Y(8GwP-}C~+NxW$76c*UOv)r8 z(I-z2fzU!-S3nS8T2{@Sm_1TPI=D4>O%9{MhFXWM-O0D#y~3m6^~rIjn8Cur9Uvxk zgS$t-%z@OyS1V?8Bk8&%CPA9ybYpYx@};Lw4hFYn9U{E2R%-^ocy;SL-@A7H^3JNh zS}j*heY$w0herV5430oVm{~+mGtGe<52otbmW_2VQ+#}tP$(b4HOdJt99x+j-H#4V7D7?so{(| z2v3lL)l%H4nFmpe5D{i}50EhtKtX8DTC+@;S!P;uLTSy`s~Qfl4hDDf23-vUkm{63 zBoi@KmYE(7=GK}P?Mj!(Ogp2UnVY$Lx_fIzfLVI;*1T1>mSC|!L`IOVD>4Fd;!V3~J3=HGsl1tRcx-m}MqKf`|du<+7f6R#l1I-9RBi ziC|`C&V-vcGgU6yxl?PjJT)X+^Grclz)MWf_rVBoSr6-7^w@^)3Jq5fh)y{eOLl*H-C=i}L^g002ovPDHLk FV1jjJFw+14 literal 0 HcmV?d00001 diff --git a/datasets/example_images/red_deer_s_001719.png b/datasets/example_images/red_deer_s_001719.png new file mode 100644 index 0000000000000000000000000000000000000000..6379d6af715ab8eb199dfdf65c8b2d14ba30ed2f GIT binary patch literal 2396 zcmV-i38VIjP)Fe&>>$aa?M<@Fr{q_H{8NDJk|2v1f#L4R?UvMXNva~N zNWSsDJKvlG|K(r*nPuz8zS^MXBg$A_;?=CBWX2%Z3(5kWb~YROst z?uXAj+b!4oAAk67YcK!!Prru}?GIZHEK0;Aj{xa)PyYI(U%X&Q+>HAC@v$c=_v3W4 zBd~9dU3>i4lSzwQC>u7*W#8i|j(bwnh<)m1Xy87AcVEn)&c7bbqO2cU1(b5micGS+ zFbN`RfVs5GjI*Hqpu*(pVpekLF9W&)tRZKFa)N>{CCwPl5EF)HNZ#EZ7PA>6Fz4m0 zSWjK&+98eBY9ABuHer)cKqwk;e(pms2|`8*Pq4`9=;8Td8LP^984|QEP9{D-Y41Zc zuIu%sF~{RWIWLKj0MR_FCmou8(A@}(qeviB;FLMbi8IOeYAmi*R@Tc!KTg)fp}BZR zLIh2J4cc;yt&hW?0*q7NbDEvQ_5Fu;#d7yIfB*g1J=c{i=c7{h-5B<--(W%~4Ij_v z9!|u%NDwlU)7Il8lhtN)-lY`5WK=K%q5?uF8LAhu>op|Ogosiqv-$by@xT7#-;EnP z#h25xn_&(!ffv{A-1Niw`Dw(C5K1C~+C|q*Qxk~C-p0IMc9&5N6Bnh&z=lxE;;`L+ z^~a{|w|6fWyW5LmU!FcVtM@ktZT!$Ra(4?HJbt?O2zeel(~mJCgn{=FP!14gl0!y8 zLT0jEKR5?4uOK9aD6+hMI$ztNMFK6BORe1T@$2#N^Kv!cZf?f*xT-3ig!8tWE@ryM zpsOlpoRj3EL|IwZLkMM^Kb*Se)D;C9RadVo%uoU0x?Y2T-u?Oi0tgkZe)yk1m;lb# zdyHm}568A`F2|4i>dqQT5Dnf1V8DQ9b?rb;w%ZaQ4oRXsXOgXU+fkWnQGEY*-z`dR z%t#~+UVM3cW?6AQpN+a0IjW$y>lXl(U%tGz_PnfkjIdXxEa@qwNniHAl7ETcd%2>GXB+6QY5H0B>w5FsZ-2waYnsJ>`{k#+sCLT*VIXL2(U=Qq zL-Yv{ivlAUxF|Det?jz5IX!~lGr@t6JS#<3zyyR4<2dxg2tlZ@zFRDz;3&pr!5myy z$0LJDcMYbf%!@1&qj7e!fM7xp?fYw4F_tr|`(f zWUYF8_-6ji+gY(9P~)`N9>4tb@lWG;mV{STDKoL%-Oj6uAc`)hvm2f1oD?DV**XvSIf%#F<;fY*KhvwkAGNg=JVBxNiit}l^NCebZzVTobe1K6uf=> zdT*vMO?`3LgJ5Ube0y`a*&WJxwcQ^I&MBjM@}yp5#`_HCN>5y}uMZy>L;%pSZ78ME zdF$M#`wydKF3-vQi?b2@cCu4LK=x^TlBmrzsbZ6xI23F3ZfPWPta& zwP4DNLi-5v~|Ty6JyAB`@_xMtE&o5TP&$^ib$NxDnbeo69TKwM_g%&?CD(mt}bCZ zpPp^5f#B;M{9>v_&-@!UT}=Mut0GN|!QcR8I-1d_D5 z!IAplEgqKZ4T!8afnW;~3=G|vb0m22CblKLLZFr z&IRL<6%r<}@q^UbfYx5xwb`T6~3&Y7elFCnCa3er23oZnRi3bvY+ zV|h&hsMcs>1g|h5r|xO&t{CFo`X&Oz1rwPBgyaQT-`x6PVkW+S|IvG713W!FE|!Hh zfYMOb1xS$O)h;?UHb=~fDDp%zUS(Xc9J!p=Mh6pZk!R4y`Fce#AsGZX4NdR0-R@s2 zG(0{$RMk=x<)!KNFMhw?thnIq@oU$%NS1`El!yc{cF#{)Zf{>KaV3CFDY_7~K=H8M zI1`~ykg~~)-WkMc#tI0f8%OU+P5M1qN)44H$v|*_O-^!L@mp?yfB}BVrz? zw|M%1z;_TQP=txt9T44I5fL050Fe*?fC0!+9Sy($5Z&DzfC$;$+|?bB5S#!A(ap?E z-2ssZMV{Q=eE)UKO-D5rW%xyPKv7 zK;5eYUtiwdTtAJeDcSz@U;Zj>)AmESdUEmo?_Xk|d74v7G2y3AhvPjib6xYf-wX~p zwXoZD`~99MJUpDhIglg?PoF(qmi2f#toi)(_W84C-+uG#TGoKCZF6co`5VcE_VrvrcCK>b5s)DP9&YMwJ9e zt2)w>7xU`qZM*q$|LHu(%gZO9A0G`qtEor>{YA=kUDmbZ1{})@SC>z3H{1R0>gPBA zd4F0s>S>&(X_@k>d2OU81p+TtJp|X%^sxz@fg*>j*luo`{_6ebyN8FP%WuPu?&NG?<5p|o+9CN1s z`uD&6$G`pU)2B}oLLv+zRM$-Cswjxlcv!~ya(`}?}zTmE|;78=#%ksx}&$d<0V;iB1&<0+3hSR%5L*Ng3i(Chtv|MM?zkz>~kF+>R>K{kDN zINX_4MBDE#t}b@tbY7P=##ptwTZn;)r0vqT&t6_#+;*W0?BNRQGeRbpI!_rSkY7ID zFLz(&KmGZ4DRl@+M5fksP1Rh=(ndL-rtQ#nZ5KnVwJ!6ls>BTR{N?ZOcNdr2?Lg!~Le3cg&gb*}!xv#p(o1Oe`>Xr=kEIj#y%>omDmGEF$QzmSX0DZtPaI09LDx7SOqtjAm9V0=$0t zr0MwA52ueG4y$IURhY(-^Q_DGaCi7J-rt>%54B|GFwZjpuIniV$qJ>Y5E2rtIe)du zIF1rR6b?jYR!b$J7tg;jxc~A0-h`HqcOSm=O&WH`$Mc7GZ<-{vXvvE!K=s6}gn)(= zwBHSFn`F)F+qZAe=W!`(-*;`>wQYNSc}XJ7TuL!jN4VVefBfzRvF7pac-ZVPe17-l z@4x)dcduVv?0Q?{={RnNT?oOstbYz@eq?bA-W7h|Chh~Ga%0EYF6fH zCW3Wd4u`|#)9s_(&M~?J$lH0Ki1QZJ?^9I643@vmlz8mQoOLoF)Z!2LLy9Q*%&r zKtuq_SzDkKTdylPblnCKYt@ue>>4DJT3}vlP9iRV7HtIp-2%3_;BND|=s+;7HTFVB58Q+p@VL8UO$QA~?X;ctk?< zuiw49yI)^jzIyfQ^XG%RnR_iIg!p)T9ERa5mjI}>5crzQ$HU$X!na{cRckl21rZ-Ra^Q&XW8Gxip4WUhxP(l?I5D0d#fF=I|8@6mgAk+mL zMzLc-5tSe+O=+AowUoGu96L#$pZ$j4e1|h1J|9#S4B<2!^JWcQ>oEoZq_r&ynHSV~ zuat-}r4*_G_aU|G1b`7r82o5NrO2K=(av{yPc0V40|JsFpx}7kgR){2AyyP3L8CI6 z5((ZDLI}Yzg0qQol2Y>C0VF~Rp;)&TQbIUjAH8-iS%ydi%sCp2RMYwSx)nkuL^-2E z2nq=kLMdg&ho&(pCLt&x7-O6uVZ^*eZH)+_l<>Cg>a{c4TjPWvi3t|s<=o1Q<`5Yt z1ykC2LU~iS(jvw=u1yz`+1}V?iDi-_gscfh2WJx}a=kFCrD?UFuiDkD79?glkC76j z4i68Uqt45fv5oUhn^vo~?OLOgwNYyu5~z&Qj1!(nPRO_qWmW$Cvn%f+XEesRn6H%L zoN?^Du_n)CPbENcqc01}i=q12%u(Wpg`gZc141w#V6}8YL8n90#QC(vld9*t*es`a z?{37jcFq$-y$?*l##TPw$Qer^cp8o(}l_B=CVZjT9NHWoSoDyR^1q=y= z01y!ZW(}f=Cx*d6=q!qm7=sjWia{#eudoj;As->w)^%+|F|?XYar|o9Jw9z~lZpxg zj?O2rpdvK22_b=thybAx6QG^d7_(wLM7#jR6Vf5Z7~^{7@{AJ-F(S?>Yxz1e0Shj&oO1zy zY}?j^5`r;8hzW)e(%Mj$0udB@e16#xW<os+Vrb&$MP1r0V~@3w|= zd!zj1^D83IxKB5#O=G$?%%nmw^wxDph1V9%=3z7{JMEl{y&_WKQ{v83V?lQ$%kB0a zqIYfvz_IHLBtkG2ER)O{-RM<&d$-ux_ItaEaEWAfzAk-8gz(@Y#?W>l1d`>v)!KPC z9MfKbeN5J}6pHCQAle-r2>QWAMqyyLbF7)aMNTjQPm zF~BS(Jjiv-7j|`>U&u2AhZFa9d`>Mk}g2=ZsF?BfyDClz=l;7L#5@ z#-%(vM`jh21O?>myzBR9G$caGGvb343MNB398AS}$jDV|QQBG3`|&oS^DWI4-Tjm;&QC&-%al&YKrg^WXpR!B&~A zT&ImeQRWfKA#7|>K~v`dh!iX;g(yivWJ~AUWkS;Dxq9REx9`0A8;~&dh-i~Jn>Cn{jI$}{L|l`*WLXe{WOzkxk4N~m3BCRa)=bc?M0pCm{SPBG9Z`J=E=EN z9I?W_eCud?Z)>rfdJ8lk>?0KF_1e3Xk=TXQtaR1Sl~R-4;!pnUkN-iD58wYNCg`j+ z7GlD)r6!1Zm(>gKF|>{@YOgh#PGe5O%ePB_?nfW|+veU&hlelHLei#bC5I?0{&GzaWEpN;oYyEq7Lzr>_66*2 zQu=s0`||T%b?@lp74+Uu{$884Hg(%Il=q7MAX8b>G|mT3``#rA=-!X-|LZ^f`-A%r zbA~oYd~yS&OG;s5oY@137sT-!f5eE#OW_kTRX=Ebk?+1_!s_{_XFw9;2!%k|zncbI7Cy15iST zWxv&{Lt*0*1W~F&Un2blqo7YYT^W|5V@UEHrl!QRYYL*svfBmifqn9Jk z&FX4zw?d>>$zo@GOnvl5J3%=x6@%rH5{l-FHDepg<-%K|@{tVzD^*t6$;me3-uV_s z3MpCLDV%@x*=NVw<*$D8HJ6L^>dLmiP?<}p&RDb2?_r*nr9gcy`l`esCGRCAIp+*P zzZ~?dk&wMS?>h^+(Y=fyWR3Q#MQgjXJvz*3@5$rG*RJg^Yhx^;SRL#>*Dnq(XQx;5 z2NJh*z`YPMlVrVqmas{oBh*3%h2KTWz0z3&GXuWPBQ49#<*e__;O*x?(Bj&8K+8|Jc>c!Aq zO)r1?@yB^8hr<(_j|F9zcY$}qgPnX+l#~6GNMFQ|V6~i{+&a1Q(%0M0Jo@}0`oaHw zk5b~C0f^4%wq7unW|x=eXV12_ca9F9%ZtsBP-nZVXQvfLqpfXU4tS*Xa_vA_zc%LK t?BWa%8jmM}}k`=`Wvs^zj$#o0~xoKc0sYBBUS$hZrGZA(A}LCgWl@ zFXoFPA4wd2-}kQwARrizM=ABVURQj!iEM7@LoxJS`}!LoAQ%ZRl4b_UzC)|$+QI$?x%s2> z>}d9QF`xbYlTXuR^yJCYr%#^VyEq+mqtt<5hKUf86AXl4%CIrR&>xIZuL__DU|ke6 zxjQXX+TN)C_IC5)vnn?0Nhb5+`0?8pcTew}pPfH?`)9Msd{AB2l?fL~k|4?gLe^L{ zD5Su9(={zbPhXwF;N!09zFXG!-#U-IRLi3OE_OTDb^{_qG06%!zIXS{ciy~T2-FX& z>(%Y@@=^$Sa(qW-sMEuswY3ICzyJiZ2$cRf6#x)pZ2JDoZ!e1?rtENAdTaAVK_}BT zpE;hle>DWLLMle_U<)2-Eo-t$p5&BE!lkjAC%cSevAI#0&|+5kj1^qoc)a zHl0k0Q8J2sUF0gTc08F7pSn1(iA~6nuoFTB1rlt0ezjTdcSoo5le0zifdYSc?@etL zP=9E)?V(~UNk$X;$3OfQV=@|LXLm2gdB!;_ch%qi{MYAK&FMMj(rd5k-r*LF2|t^#~CfeXd;qG>F#hFwMGzv8;rvA!(ToXJV{2A05V#iW|A;*bZ7C^ z7ge)k)5+Ly?6GrR55bLy)4kEXW=XnOjC+UEgwYT#ruo@?(H^#K)9xMF;?=M!ubWO=W2(MNCMn|-6GVZem}Nee^B$scYp8^v&R$ z^WIX)UAcL--)_3DSFZW;tCbsi7|OD#v7jW<-ig&7S>VU~EGbxoCc_nmNj(6fBppwy z-rcM=n_Zn2i;JSD%NN_-VQ{7$)MzB>3-WiWfpw2l2K&#z01|MT= z>Rr%EO7eWYyE4H#g9%20LW~rMQoaP6dK?$RNUwV>lm_st%}U!q0hH>lm*3V^S=Afw z4WR)LMC)mY-dSU=5$S4oL>e2dWK?5D3A}1E6SKBqS+{xmI=b4j`mSLNHR=TIU8c z%qAj_X`nEKK_hPrlU`tMqtHG?Z@6*+VN%!Cx@@vy{_cabdb_L-$XI}Y zy${rfKmcKcDW$VT>TUAY!v~9LmNOTFwVm3R2W8FIH_Jm?^*yQ&OmM;z4?JLNz-okG zPHwhkh(e9f*n7}AO2~KhrVm7qH z&>Qc_paPLdGR!b)dR6WZ%J{Zd>#~tVH~rulLns0YbQtstF}^$+4SUO z(OXXe-#wY$d*j4nwA+-bSJl3%51lq11rXDe30anHt8|*ne9TXeC)Crc?QIvFN8SUb zi4dLX5MU`K^FBDbT-Mve^}e^5;E|w3#zZ1B$#o__c=rML@bTx*cIE!i_F6fo`a{== zl;wFM7`@qS7P;V@_fBIJF(&lR(PAAu01m)fKl=QetMy(cd^*q5gn1X9y|`Wp>cAk0U#?NKs;dw~1b`TnHk;kH zS3|F~cdBk;b)dJq3PW_>Gfr%G$kW6-fB$TDes=VW#}5&|GSOujCm5W;%|Umnmnq8< z3V?k8y4vnBMvQTWDJCRI(`1w~-g$3B2*aR<-T(@KB0V1 literal 0 HcmV?d00001 diff --git a/datasets/example_images/stallion_s_000015.png b/datasets/example_images/stallion_s_000015.png new file mode 100644 index 0000000000000000000000000000000000000000..4db4e59cc274b57934c6019a936bc3a6bc4007f8 GIT binary patch literal 2595 zcmV+;3f%RHP)-*cDQXE-zBzK9e>O7tRGflvpiW5+NGG!I!|An*AD`5Sr7tAU~qEdtwU zT-0uY*l}$ov1LoPBub)0Qxdl!XPXZ#m9Jj8=D3ZAw?DHGn61yhas7s4yF^Mu zDX<+1iAs`;Aml;gIW&{fhp|NzvpmMQ%zXqTu^7Mx6G%CfMpr$nUa^4^6a#>Xb$Zll z15<=4QUry=(TOV62$qyv#3o3?Tv-%B=vZ>DV)L5YbV;WhjUqFNM63|wj#sycWe9}@ z&w)YADaDY4!=O%Col<)`Zb3jbc$DO4d2jMOa8Q8|HFXg$S_E^=Y=>jGTw$BD)w;vu zsChh0cMr$+_Itj?9Yz~7j^h!C0Jj~Vv*Tyo(Rhdf5k&Vyw%ICFt9*X(O7Nni#YB;C zIGm85?98n+onp-`)|_I|#Rk@0XU20GQd)_t8N9Zg9(VmoIOW`G)M_@jm7*v8=+C$A zKA0S4u_Q)D7$F4cIGPR3k2hbw>UhC7x1OHogOMPwtu3uIT!|58$igf)Aj_>@h%g38 zoDTYao&(!16w6iaTDc_CL_XU;yz}7s?fd(ask8uw21%`t z-R~O3N&V8RUZosmW*S9-QMm?2sLW4fp8BJ#b2QPAa*sf!6^xTugu}@AT+}?k1U&Kkx zu3KKYxq4~+)mOc0d3bvErw>28Ywp;HB5kw*6c__h3IH+yK%7dv-;0hPKK;dSe^s8H zJ2~wWnMjiw2>_)O6`bMm@rUpI{@$k_i(cQ!lb68$QJUTdfc;`D9 zh9wLmt(4IgA}K;r8L6SrxHC$2jcSoW|JmN#H{aPk>5_C3#&iHFn`Zg$z3>0>!5_Z+ z^M5zFWoKgjf=A*(*cq!Sc=E-$MnDU|6f%G+jVuEo02ToPNU3zSG4s}2|4Kdg?)SGX z>Dv%;8PAqIviJD@=*%1T&p-X^=l}TfD>xo5I1J^gTyP1+FV2G?1sI|NLAFh#Fu;&F zQ(>+H0D%H!0S1Gcxf-~9Q{F6~@Cd2;uY5B|OHpB$aun_a(7Hnz4(9DnrwA3lEn zj|}5BBkgh_pQenGgVT{8DWxGH1R15J5{7yNH9b1;r#Ll~;3mV;aN?p@cdmNnB7#UE zDzNqV=)^a$uyC2wS{GhC-2MAk-z&g?K?oRvkm0-^=lyXi46raNAS8`soEu5HqoCL- zlx;?E-fk9Z+&&%3^T^bTC5~9TCZZM3T$QpK7#s=gU9-$G@E)9ufnz(h|V_ zBpAfmagdA^j15SXj--gRNf0HDJxbN#C}rH6^Z1q7>e^x>HQC9a|M>m`7{?eJ<`#cVZ6Pxq!{T%u`gu=O$1~P z4FCuMYz)a1?oQ%<7*I}?Qr%IHRw=IFEFaxiFa749FTU2`92&=E3D_hFM;@)Eo9jf#?JoL?|`N2bch00c-$?l=ijkP5iPk1%?_nE|(k$b)jCZ zddzh#GNts|RCz9dMaXy%K}e*K zgv0eVE;JgQ$7d@Gb+3oMJ)HywGJ-dk7jvDz=naw_MMgPAsZ;R9ok58FAkj{uDph^6 zIiV~dm#$|*b*e0$2&J%+) zhQtU^DSOqqMg^nN@_CS)?`&=?&-wk~zFXq<3@$q4W^;36bp`W$@4FwWJiYeX<(u`! z(Ih_W_H^9qBs%Dw^zZ*4;UF9zbE1@#1&v6hQpggW3F$aoAeeDhy*T&cuP@f(+(C!{ zP+6+BZmci!H7H1wN84{*2{ZMNhx^MWzq~p#dvO6gMdRB;?L&Gz==&e%Pzn({oFW5C zwE>}&hC#>}nqRDAiW030?Q#m8Zg&ujMG4>gBolI)NG)N;txnClxe0zxvAh zkFKwqC{)nOAf%WvoV45Q<{Phs4<0QnFAm#Qkcuv;Jow<&1qo&w?E^myrioTq=Q0<9 zNMWebJDZDJ8*?R|VrXMP3aw%<6fK8u@2s?IWs!!MQbdtfSc(*4Q>&K0IqK9_n`@O; z0MwjQkDmTOl5o3Kt*kW4huvfx$4ZHekkv)6PW9_I7CfRsDgY5BhhM$E`SE{#=2&cf zxe9a+5yFHpixG^qhCm@~QeLT)+ij79Xd3ApT)eW{+11 zXsJOKm*y~37*oc$hG>0rVgIF#`*)wz%3O+27_BpmFhoWeAhl#q_YV*;)mqswQu9a@ zrHdOE`zO6oXR@+1o6w9>BneIcLNjfzy-=1Ymk0`iQ)9H&5GTu9ixOMUo*YnwAc9hf zJd1;H5>C@F(z|=lbEqW&(rAhi!_X;N(seMUvzilCQtnpRZbg5ZAU`4qc0c+*UH1X;oi(dFOopt9#!*dgY&X zAfr#^RX|M*T+bB+#SMRqOYXR!C}3ua6chxg zgiOX6h!Z=C+kH;X_W``pcs? zU)I$khReDRA%fyEaYLk-7$D_EXFiLJakyp@d5s8CGOI*^D5l7wNLez;FO|_Eil}wg zrC7gt^;97GSl9IwU^Tz;ZKIWyOebSh4B!J8r+jEZvqGh{MG!w~f3dVvBUNFj0?sST2=oY=;O0X<@=6t|(yQ8ZB9 z2$3cx5l|pN=mVRk^~9PZX5JL=}h_WnmS?+Xk_xAh7yn21|U77a^*P$q7 zW!2NXPi*1MYg>h$FGa^6Usx84pmPur^OZ@xTuGMyd)n-C)l zDW$D-LS(ev*xnd#4M9jk%)6S2tq6-ErOf`FHLWFc1&+FZ^FRL$(*DJRAAdQ!_>UJ) zM}uyN^zO#a?e`w-P4;fz-u~i?pI@H0^UGy-IA9b?Ns{pty8U&Lx~i_NQO;^HVy$vX z$G~QzXN)x|Sl@>Jcr8IP-ND`d`jf@fAS+?fnJv1r`FW?e-s$x~u&rAr&bp3CaYDsX zwaf8nYk9p~_&O2&|49T8M)Up;9*uYQLj&Kxcv+{gtSVHtv>62!j>39=IZHlG&yKez zw@NpzXwmI-syg-ZPF7fjPDGSadOX5k+{`2{+q5X=hAm86ZPg2QTgwQx7L+;HuBxspWv{-Ep?g`@fP+rRzm zzc;%0*_cqqn z<@^}D@3vL;>iF7N+1?y&Znf3*!SV5+KkD>%clvMs?H~RQOZT%s|1%T`DN#y^5)n@? z=P{;!uLA&D>*M2-*Qd{h6JrsW*>pPP>E-n5{A#z?RZKdBF(r3$JUgG_?#|6yxBY5$ zbojjKV6ij){L@c2_jZ5u@WVu8tkv4QIXQXx`lxNHLASfVzfUn-oSmons-7m-^_;}C zP5t`KtL9=_wWAyT!LS3F$$)sG7*5V-7gycgod-YsXmV>thUzcBdU5#u2S2#q?e)A5 zP1D4-u@qO!>t_eweg4nCs8=guU{3CBteU4!51dv${^Uix8|G{UE|Mz&l@WCs^ z00{*o+Uj~%BMbA{vOxiDv^LshJqs)pO(c~ot&|o{A$Vi8)p~X@ee%t-Uw-xY@!{d0 ze(+n^9eh1KFAK|3XSs7u1z$1gx~>jRr%l_U#?aOPV4c$nZI(watubX@n!;F$&C0h~ z(E*X*qqDY&G#ri}e)QpNvH1D-Po3yuw6=X?FSq4txmYY0m(%HdcHR0Gl_*4GjkTFE zMgT-0hSoVd==E>z?U@J~2(8Uo848urkfMO##@5!x<~kFdo}E2?c90Ss96Uqei|Kh? z*8rfDHr5!Ulv3JgqYWY&qsy|~*qlrzlVPvtj5DHp8kMBlOEShdmkTOLvGD;hS)1={ zZ7 zNum}8Qiuo=d<0YoQWW{!yLUTf=jizO>guYhDkfH18)KBw*4p7W^L3T z4c0a%+j(XYP;0|XL=X~DB9UlP2+2XJS0MyzR7_C>1W=`b07{uGE1220wN@rCJLAb# zOr#VlrJb`IYyDogueB4AL;{FLYY<2bs7(ys)Kx{GImL1?&Roun!Xdrp zY=|+%6jJgbF@w_Df>5(6GG~v(Qs{X1vYwz7;lOjcNv}HV#K%R{3vEl8;mqy+rkC3;@MG#Da1PBl~MxgOX zjwQ`d3q`U?cC**B*QqVvNB-#_{~f_V-Ufj)z zA%rxIYE=V3IkKiM>bfMv5eia-S)nPHX`C*0n>a~2Aw$LpL2Xp6+PZ0!c8Y259YR~R zEh`OkR8oMtuIswiy1krT7exWyJEsj;Mv*Zd1VRRaiga77;xwi*VY~>VFil0skx{OV zt{PQVqNo*Vi1!{g)>WE={@)7!f; z2qcpM=RAqyVJ}KUiA=Oc7iBF_5LN*p0DyVxS(a;}s?RV3DCnf-SRRe)ZuQ%?c4&yYQGbEhxUc~xIoP=R0h4tQgFNKqViNP0* zcHq%k8=_zscDmgM2P4}yk;KF4exq$(R^wrB(1{4Js40!n!!#Xq28IgGsE}fm#y@#< zub0N0Gm6|hPpM;Q5LqaARkn-W2LI;dKr#j*MIj%b99Bif2^>sDW$R>ECUFpkf;i7y zu+9r5YRb7Ff_Ml-AcQQNQXB0l_28W~-Wm#kNS)`cHg&D=`1lx%VPUtdn?e{rKlU&HA;ox-2W?A|vF*%U9lPfAh^VLTFPps%=~2z=J2i z7)D`&|MjPr_r`-KU+k@J?%toz;yAp#ybJ|Dxql3dRb_6NyE^~$`o;JCPVeyPUA3x*`?DA46d-Cw})+lYrE-S6^b)&2Y=RsMOWqDE7 z2wF9CyzhtcDub^FN`C`u+tm%x`|{iE6VxR?fiC8gMWK5dw=$Rxh+&{ zz2gv#N3r?p^9S42#sR5Sqm3O5M~mgn$B&DsGsGAycV%6et@f`zU5?V|o3Fplnzqr+ zcyiQ<)B6YG_vardhs&F*+3kX1#Dd`Y*N>^wx6AhRr|he*p6yo4@7~-}1VT*RRQ&1= z81&NABg6XcvhhW_cl7T3_F}pF;_2!8`R0HA{AzzP_CMZkSBu~M`uWFC=hMTfalUTx zv*VMsSWNczP7d}iZpwM_*VBg&FSyLxvs6gRXr+s`EIMh}AN4QhYdqRdi`w5i-dAqR z7(6*W-t9J*v)O2{zs<5wmsiiKFM)?IzJHsh-Lg>5o(c3nysakvP9R7xBu3RA-oLA= zO+ck}%A3|ZXO(M|Uaq(JvtK@Mid|l0zxmtl&*^9|?#^!Re)Z-3;j~j`>z_RRqAsAD z?7d!pFxn+abTCPuoE+afJUBi)aE={KCxc#6ZcckqQ0MF2ZlknWZ?}Vfw^6DnGL%uw zyt{h;hOu2VoD}oyK__|m%}GhaadPz2zyIyUr}&%e94ibICfTfm6QbiOUKZN5J+Z9I>oz>D}^ zr&liicrqDHkH)`v{>#Dm7{kPa^FSS;gaPG1yeEW9E*K}btM%hUJ zh#-cT{ z(ZerKUw!}Q|M>PlS6NA9dpwr?UQ$;S8*OKcE90!TFzSXMZ@1UWW!C6N_xFyc_e0j& zwyL#m8*3fCnU&U6PmTtYs?3W!(d+N;J^TFW*L80G{o8;2%a0#DCKzZ3$~seKc~jRI zCvmOyd~sKmr85LU_d1lwO;ZH? zXiH!I@xNcaJ=d5dMY~(CR^zDv@YXqoh>ZGPCt-|kHtW2qJ*9grq#`Pn%Cr3C>yOvt z;mPS?C%t!9+<*uJ#7vd{`{PE6Za1h^)vjk}^YxW72YR>NEN+de0}*PYtpmmowQdBL z-8ABy#8sJAs%%VIXHxLtbd*(kx!TR{wsANj4rPMO=``(M-(0`HrcX{F*Nu#xG-g@o zb-7!csvdO0fHUt2rNVoMb*s@46g&zdBn(;2h2nx$jjo$KWa4--ES25vs;c!O8V$yK z-Vx{g<w>^InpbyC1T=RCTLb%}IkS(8jddwXF&yLn)<@ z0U-purM< znG9qQNlUylmN90GMMUA92g-nW!WgHN3n_(h-g~c|vr)*ysIM(l3Y=c97c;&iR2yd;2&AE8 zv6MH9)#dfNA0v-L#t=NSt^#MegLpKEhiQb&S?wHv2arObvz`#}&KqkfrIezx&RZYy zh$5np&O=q}x>3ez!Q9bw(C>Eg+HCS%wc2?>SP*gO$DA`Jh(|&-p@2xh6GBj1;|TFU z0En@k5(^%X380J;Mm#Vep%9!vQE8)%_YfcvsSE`)s?EzrYa4ROMZggW@dPXdB9K{Y boU#88NDiiQE)fXR00000NkvXXu0mjfKcLwD literal 0 HcmV?d00001 diff --git a/datasets/example_images/supertanker_s_000275.png b/datasets/example_images/supertanker_s_000275.png new file mode 100644 index 0000000000000000000000000000000000000000..c01c4370a87482a105939e9ad76f3ddcfbf26e12 GIT binary patch literal 1556 zcmV+v2J88WP)e&amq)37`BtY;jeFUGww-R93vIU5Gk-Ow_=hc;&;g6cxUAw<(8a0s_ zQIS#2zy9s-0059UKXWAjJinE6-bQ!N$gBi#ehmOs^|);=NVbTOL_T5hS*hm_Ns^MP zDl?yNJL8H(RhwCq6u(o4ZMhh^lJ6^ZB-Ot@-AO!G6C#sbPQs zf+Rsbssw^G<6O*krOFz!>Bk!630Klt2eyc{q{$ADnf;$cBmtW7QKAWvc|I`l@zhEM zph~k@5-n=rL`M>;X!g9o>8CW&pS&PR5@1umbKzN_=1GujF$Ou;l0;H6@GKW7J+s#{ zauWjJblt3{nkFbof=FvsNy5wkp7vDgOpq`$1CW_!W@hKc&xo3>O_`w+wuB^rSpYy) zl2%odqg3-e0dsTmWV^|rDkn&0L84J5LC_;DWu_ScHdSWkWYk(CdH#P*(~3!>n^#Fy zo6V9@G2)3z*4dX;RRjb{sszanfG=7>x`86}EFcS9vfHp@t5l@d^8%>yo0R+&^# zNomAf%yG?%s#N8S1~ZT#Rh8~+?#<1eX6UV%VUjkmxf>}}DUE1SNtHQcM3N$gP#HN# z@fuT4xZn%8&%4R5J%u1W%4LGhjNG)5m5e4I?u% zGilbmfi&}*n^%X!VZYnD8=yIEZ4QzoRGw1L%xHiW$_S}cB8}dpQyx(;P*XOVo3Zt_ zZ0c~NnItop;BHMS5>lb~X!FN`sKgxaQev7NG9zJbrYf36j56!J-`{_Uh^6-!qxXK) z>h3LK%oO|IgqbmP;sYQf#5A+4p)%n#)dsj(i~*@FOLrUVafZK&bZZk6;Cbi;;4xF( z9Y93T$WugBsUV+;k{L9sDl0l_9kG4*y7V3~(a>f#@%)^FOh(JosgOWxJyDefPA?UJ z%&JNP?!NR5m2upUX|0_uMa8T*FJS@Ad!lAIO6BenRGPFeFP4j? zuZO)dduxYv#l(Y7=Q+{cZMu6p(%k^6%FKIPmTnO-#*xV0`+gtp24z1OJ^e{*vaas0e{=-%mGK*Z=Yry){Azqpi2%C>lp)iTM-9LS7yf3f^| zd9$(BZ*M=Xnc*IhZH`Z=F6nQtuWmoykB7VJ7uRpzyng?lnV)|B1qBCH$9B=(bGP4W zEo3d3P`ToGJl_92_SZL?i>oWvfw?^%YuO~xW>yo(!~WNQ{#d)u%i%#65%=11AQQW< zq$!KEwsd#rPDdG3uQ?)#NWw&wM$2th6keQ79gqsi^CFB4E&b)&cZUz#OVHL3(^^v{jw8(pgm|>IB1+>hD6EoF2%U20h*%%C zBBJslbHCs3O6@dF#&&rff!pK@Z&d>jbCgmkqm@*xx6Nj^?vX>~Faii5K*+LC&E(yC zZ@r-{yZt`R+88q`?B?d(`*+{oyn1{0@u%A_9~DD6LWTLwMcZ7x;EUII_?o+I9arz& zTl1NEC1$3gEsM9syfq_erfg#j057gze*fKH-oF3-`sJ&3y=k9bnR4CljyYs>cQfy{ zX&2X5o7cz7FUNIP6wEBLMzq!{D|58gN~C+yA`-Q>W$7SxyNCb$_``3%{e1uROEcZy zf9dWSQ4&gvFGfG?wzb_wjH*Z~Go_F;bGrb+){*ATB%@#`u+3Yvsy_VkZ_>HYVfIhpCkpIpA0000 zU9X+zQC(|ho_Ftkd~BciX|2&6n?1EiGk|ha z1ltPJDvhKSAoq9!;AKvuks{^pW^M)$lt8$vP=OrDc?eWUHbzQCFf8DytQ^RN?&fYJ zXG^-dXMij>!VIc3gFB2~Wo9JZ$f9B_+c62~s>#jF(vl{iHObw*M3Pk|nYl@X2nYZ` zGcz|xsN(3POBNB3%pE{1eXCt>+4}JHYHiDKr-4wog;{g=O0+4YvNE`%kSPzhfC05A zVwzeL02Gu`L8|ghL@q-bnyR(vFjR%RnGL2}POf2=4Vh;!x<{BtIGWc`+E}kvtFxor zzcEM?Wt2ffEJnD7V%aSYlCBhGD0OsqZ>=SZA)~2OHRN>9q@_Ct-`cx!`{3%8jjdYy z%U@hD{K;_6$vE6Pbi_Ds)NzlmQMw(@2xSaqFMHGcXBN$z)CEoMohMx zK6!e4bGiGY_wN11gO|@fZWlWq-d1H_mg-9lm6Ux%^=-PD1*Ig+TbELUFJ<;(s3<{J|eKyZQWdw=Aw z-}tU>Uw!}aho_sn53k;zcdEj?YzK;RLMnwuI=Yr#L@&4{J=Bbo_mLylhtvG_tLMKv zAKgEH{mZYPt~Gz}Y}fi22iN|&f4+YD<*DEN=G))fdv&an*JI+|js272%d?#s-4L}~ zIKf+}=U6V%Y?xWtY7=h$aDMrNldm8A>X*awee&$&#dkY@u)cYAfAhDw`|R;o2Zz6X zcKhLr)#2IK=gzvu<@%MWYMSS{wdj?ho)%h`MPopiR+DJ1dT(`d=j)T3ufP1^^=n`1 z;{D8%XJ3B*Vi%ju=eG`jGS2_z7oXbZ!TI{`TdQ_pV=oZFi!uH8?!WzG zfeLTym79&xLEqcpFK?YZJUaSfjMWs^rb)-o?#`#Hu#eW)etL5L;fs@6-(TN*_gnXF z-HVmHy)xgrC)e~KDZ<^I9ywo-lcc24U74g#&Q3n^e(T%cxdVHAas1A~jr_*FuU)ROHIaX%jInPVhW`AQ) zYX0!aljAx)SWj4%N6YDM@ayNtUp#+$zFlJTfCu+}|JJn8{GX?=s0)3H{+e#9JFbWscyohu5{}&bzaF2H$zWXN+9(?xv^n;h# z+OCATb`fhLZHZ7Di!7R2Dl5G8MZwyFmDy1%m?Ke2R$aY*eS1MH3u%HRT)Xr3@BQ(g zee_>Xe*UXtMD%_RPV|=9HFuk>BEt4Gqhl#91D0u#I76-tpr@3TNdzkw++-umq8q}zr3_1{+mgE>+G;Iz(MDzvNe&}Q3o=m-^r~&eW_{%f zz6!H-$;{iu@$NhqmeN=>$${wotVl$ogferFscD>N3bwChj?$J`q<|)~aH{H@*1!E* zzj=7~#?K!=%5H*7Tz0!?EtMJq2~35#atH34C`c^G#=O;FfuL9KAq^o35iBJzvgU&u zH~#1^{`A4!JAe0+e|`G&1z1vomYG!r09A%=G;#)5-GmaE>FyI@CP9n(E+|2w4QNOp zN$Kj9y$?S4_p!`nF|GHO?M?(x0!URQQH6mqfhcSXMv|DARL+^*=%kQax~)jpn`?vw zg45J!)pOq4zq#9vWqSqy-pmXzm4p(C2*M~e`W0w|II1W6*@;9!khAOIAK6hI;YUXDN#q##@#6a@)yq6iW} z2(ta0w$q1`0?3mp->O%)mI5Gee$JJ<3HaomI>{X{$MvaLxttEp%jK&=N;5VgENUWZA+x~C=ljnKe zcWCf3k8QGCE(37Y)Z6WTG@6ukZOi<4u|$^7w-3g7W{wnv zfWZ%CRXOL{ros7qx*49gyF(t;gpqtm$N79@)-bT*zkT&}Am2MbG<`n=lc#OhGAo;m ze2A#bepfv?JW3WSl4k=}rzx`dA&x_^{E*Nt%sFfDbXU zKx><3-*itiIO?54?Zj>$SWJqg0;xa?0YpTCDFoNFZPzMPh&UdPF(bY>J*|7S{ox)F zDTMO=$wmJlhws0?GD(_^l5u8>eEQSJpUSE#tIB(->Ry7%rqkKc5$wzD&2A->V0O;6 zZHF2?IgnBzqE%`%nZ0~Be!SmFgE4qTFv&)D?%8JPYI3sR;x6+q0eFc>tE#aIg#kQp((5C zP=*k+HdR&Sc{ZKR9Gdxj-uHdoRJqY%a7`Tg=%v-UO(f7$xf|S&CP|PHBMFL8Qk0Sf z^Z4?txICAbyuN(>{_C%A9_}k8lQhlqyzhI3NHG%8&CN}`70D5d@~I0Y5P>yb<;E<#%}1_ zs@ZL~=Wi}2kjAcU%R?l#laV`AyZzx|^Pnx}Hjh3KGg9aeZABEM_rd7NY6TQY!12k8 z-~ZtcpFaN3HV2y}oBPe~d2dipr!%Y3rg|~WzgQmiP16r;U01@$E|b-#su&g1VydMI zfu(jBeDDJ)l1AWYu^1Ia*L9LnNG5JVwk83CQX#TYO5oZ0 z^8B}NtGZks4G$L5h1IM-d{Re7w202vMoF zs{+$xUU=vG&i#0G6@2@fU;hk5C=ZX-;koLXebrREm`s;Coodk9nl#BqNp8?ESdBUw zhwQx!LNO2kdwTfxN59(^rztPlJ1 zP!hyIfdUJb0A1HLzVEybu0xVMNDl&#qF_lt z5h7?5h{6vS*?dIuw%ZPE|HWiwrl&*NMa!W6a_J}U`dggX<$#%gW?#Y j5{ea@Wd>)G7V`fAITnjMu@gAy00000NkvXXu0mjfLk)_B literal 0 HcmV?d00001 diff --git a/datasets/example_images/tabby_cat_s_001983.png b/datasets/example_images/tabby_cat_s_001983.png new file mode 100644 index 0000000000000000000000000000000000000000..cb423e861317762e58334daef1d41c80b69db218 GIT binary patch literal 2167 zcmV--2#EKIP);CDVKdIEg=p!vh^fBn&&g-oY2L@1@G8Z(aL(6(*P z883~%axnk^K*UU->;Qm>0HLTVDj5+H5FsJJr2`TjI z01?y-%n*@0p2I%{1kdQ#P|Wo5UEm z+p(0Qrc_W+9WWw-{T~hhT#^Dp64L@|gvdn6H0Pq4!SwL(a2Wd2qoV@amk|l4p?d4B zdyDzvvj-1~nE?TtXAhZ}++c|moaaKmD5wxIiF$c?bYy0AT|4Ix0aSC!V0!J^!QsKd zgU>#Dd2t$N4FOI=b^G?M>(^iV`tjqJ$0r5^h_l7)-rf6ezH{gC%SV6z^vf=1!bIj- z#ELz8{9G zaCjBK08mwjq0hOPnTU+z`259-lm_p;7hw^0?)c=m-)+pwblPm&Zb;+DpM3n2AN}<8 z?=0Kh>e}I*o7Zplo1-89@Po(SoVMS7T}P0*j<2>fXA#c1s46qpb-mecw!5tp@x;#i z!OCj08oO;(#o4Ucb*+KkxOroFu$)XP^XeYPnooxq7AB?VJ@L%6f*#!H4D5 ztLy-or`0}l%>q)@sNM(U!z<17dxC&oTrS0mf+`^(7-(tg>dvj3K8EdjWoXZzKRtf= zY%yD0JG_2;daOkmc{;ta+&=&`c3wmX1fWQmu@R9LED)wN4m}}i$vNkeGZ8YAbJBIK zsYXH_MnUPjJSN^>-kdhe;F{TV#)R|vbUxqH0!$9k5jZqvu0$#XA@t5wq$zdVt)>*6 zFk#NgtT@MAw>x_N%p<;j<3@0PGFiO;;Sauh_x_|_OzL?Z>glBMj;l}skQpH|AQ>>2 zAUH7RT_fxTK>;iVFCtx!FcgC#j)<`6xL%#_E%x5Ncdwc4HM9A}`I#Vk;VOoviksC% z+wFXCfM!|@&;Y^M00EF04T{t;0s;{+u@hz?A`)R{E?UN%`k`yQ8~gS1XWx_|UpqLM zO#GOu8?Rk|d9qopPGe-#-ariu;PUPPfB`ZQ2oVy|<$Y!1;DZzHx^A&p0KoH?M{T$B z=O-l%OjuWwwj~LYbDvY{hiC0og**#FTqH5KRju1id5 zXj2UH=|sd8vt8GL)c4+c`;B{V24`S{bATjZghoi9U{)fS5vULmvuwAU^RtT~6<=4H zOG%k4O8roKzi6h@rkXZ&6~bsbrn27dme=-|2Zug5W_|UIkyHVdh>C)N5|%u4rxHTt zyd57OBa#y$HB$4$Q5>k&2n#2RDzcTlJ|Ec)zKT_IaCj}mSj<2R0fHd{f`W~pnEKME z*1@-N*Q{%Szc_ESic-X-|%hPLEGF-R|sUb#ZiJdFuq6uVhSV9FuFR=v*Kv zP|ONBpZbhQ1#nC`WNj#psSFtk5+Dxa@bKY-^V8G)`F>Rq;jq3q?Y4badQ>-Vx+q;0 z>uyj%L{rt29di@H#jqw60xGH*kQ7nad*?CKVm&!9Bm@8$#_rpvk6ylb0h$-{=mfQt zKDV=GnFvx&x#V0>)U>YZC|r@1qC-|6tA0pbKNyG!r?IbMWe&X;CZPgE*7se*(a_qy t^&%JRZ54#s#B)mhF!Y{M+ir*A{|jO$;(#3mif;e_002ovPDHLkV1krb1+f4C literal 0 HcmV?d00001 diff --git a/datasets/example_images/tabby_s_001593.png b/datasets/example_images/tabby_s_001593.png new file mode 100644 index 0000000000000000000000000000000000000000..608e56f4a1b79f9977108abe2cedff6872aeb893 GIT binary patch literal 2122 zcmV-Q2(|Z#P)#pN9es(7I)#Vh|JFZ?xLNS*>!1VSd2 zgqb1C#bb}%?e0F`Ip1aPEs2Mo@F=MyEon)M{^^H5AX6`-wJJbuudxaJ_W6f@`slkK zrq#OEQs(*7|N7~#e)^N9)HXSHsXB>~<*_VNsqV%uf*65lm5bVb+CTsBqksI-k3!HT z2650Vt;D8XZ@+qVA@lI|>u;z#K0QBQt^0<$C+|MH`TUa?zx;)Zw6x{Tx~Mxj#}ulv z6Cfp{Y9&{OuG+M}x&GzH9|v7#p%g-A9#0-V9(L2qmzQ1JQ~cr}ZL{6RAYN;?x%2J^ zfBg2#uf~hZ3Hv^EtL?GY%%)aKo#!%FHz5ar7&tjyUta$DSHB6MP(c8-+hgfB7ytYE z;`I&juEdy5AI!Ub4$+{9>CX9g-}|G>&wlo=+(v0S;RIpTMQdTBBmpAupl0qG8RLv; zoP%(1r(CRWR&TF{;V`!S3Q_JodVK!mJhIez;f709k@)!i4-eN@`|+l@&o_gb#ULpp zcQ>j;4B})AQ=%%F859J7VsRgC#zQvu`taF#v%T}^z4zPoD#TdKb)13y+pC-5?XY_A z#IN5BR~M!eN}XqmDM%0r0=NQREkT@xW`lbOO{mp`gTkkI%r)OVd+_vwKYaY`xp3GY zrWaql`0O_yA02ho^l-?3`&WN`@8QGii&vN5ycn)-$}*QcEc4Wa*sqUBs8T@)5fWl? zLZGGvYi!pV(^B7tMpm2t@HrHX`F5bjv@4o!m61O z0>#Zd&x`YvXBHyCVI1lkTBd2qx(xaiXjP`10Zs9A~{L@Qp?M-7_Q)c$jfp)cx~AcLJCkQsd^=1fXXt3Kurt+ z+V6JztMYZu_0jXqW_#QxiAf|8shJs^h1=DNQ+oLH{W|Pl{rAsdxnYv5SPB`!g9m4a z!@R2%Agxu2m|08hQd5^~a~bmCFwFbHX>;eJ-~02_BzGl(yP25-F-8Kqe*N&td&A|c z-8WMch$M&tgb*TP-<0bCaC3JvcQ5lI#jMug#36c$#QgNxyC-+=j^iPv##GIXIMk}{ z4uDwtlY6I+o*r&qM;l#)7j+!(pl-q3PP{Cb8-I zb!vNw5r$GTSj?=}T5BZ&5}Ele6Ck9%Yd32fuZ#>%0g6TfV8q1Wob$3QL>?o95JR(G z9c{PADJD0JhjGdW7HLuoNY1&G;wxf~a0da7rfowAvzeIz2&gXZG0;>EfQZBxDTXA0 zLTbBB+pU767@HWHAdYF6=f#ROsjW+{ZZWof7eQ2Og#+&HAR=J_5ZDNAF$xykHXtH$ z6Qc@hWqi+_At(>Dk7yAfd~|1g!|3))x2Z?%xBX$mQpxWr@UUZK|;=1Rc{~Y+a*g3 zv11MZIHgXcVTt`}(=?qKsX9OaR5j;3O_P?TZKBrVTD!KL=UG)_2&!62F|*tICL#d0 zRL9VYBmyx)@6uHy;?#F*fZWL)w>hO0xCy~(&hs?vZgO5kBm{By+XgvjGZT^9>ZKNd zg0w*rfUtNXPR{JQ5HXYFTFjl`L?j~aUh=ZLxn7n`Osn;pndf<)b4{_g)`_TT8dC!y zgkWZBRX8yPfI?x-^RB4cVJzcFrdD&V3Kjre*AZcvr)eBZw?t+ljEBi_*c@$@!Zdk| zDaNR(U}EM{OU-k&xmYzS;qB{}r4}Lt3k-25bIr(b-&x+4Ev^O z*8Pg26A?4jTFgB(=~hIYp-=@md-%@%XP3En(%!u}rPeroDS`d@i~a1EH{u zI;iiKJojCHJse2ldUH%fs_KA5ad$O?8S;E}Gu*kK!e9Q)-|hUL-&|ZV$YC2MgA-W% z+gEfz$!{khB7zd&w;`vZ1~@2)jWo~pPZcjA9@0AAee2xo&W#<07*qoM6N<$g7^n8 A4*&oF literal 0 HcmV?d00001 diff --git a/datasets/example_images/tabby_s_001774.png b/datasets/example_images/tabby_s_001774.png new file mode 100644 index 0000000000000000000000000000000000000000..a7a54b3d9c1cf00a327092543a5f8be7706e8916 GIT binary patch literal 2294 zcmVROAts%Y!G|?1|;x~75@TT zb|A4pvINK`#DvK4#N)JQJd^1ePrq;9bI(2J{2o;-+DNE&r7C?@_5D;)P|aBcDr_>OQ+7rG4Z?r>H-G(i@4aWe!H zn+zKTR+Sl9g+L52R7If*s476j01OiZ#5t$4(-Q#1Rt>8ISp{I2jl}=}P=Ej}s1voB zAz%iG)T^)(0Yap7S;!eM0+%8R`?P1r$(xr7aLB09F7H z0dZDl5`qv2IQs0TH|goScP7IAtd(=cPa2j4I?d9(EE4tvk?T3qq)nru8>a8%u$)t+?sf3lQtIAW>BaG;POwbv;jNEr3gg2nt|mjzoIOnGG{khhcEmUjCIV$ylu`KbcOi`}GLD+6sWZ zJSqZ2VJ(%D?O}AqfNq@QynEP&?i;Rj)=oy#)HPYPFitf(gQ~1zj5$WNk?(P59L3gyXeWRY-%7#z+Hc2iD z+jgcKLK#Cn9_6H~b<5T$kaI4p%2lI2hSc{eB1(y?Z0pi63jhVFk}5>Tqw;%y@JDOA z`SF*{V!ZWaasKp|_q~LAJaN8g`_!&GRW;ssaTszQQtabE&SXN6Y?urw8JF!uOqET= z01_lZ6$&6rzIkou?pyDe!qmEmPk%l}TNsrbio$ze#Hd4;vsyJiBx#yPBs=GwFY3|A zT1$jtw2|CMLa(ZvfCS2*Obn2MMR0BBhO${5KK7ctr6%<2FsRD7E}_(PaS%;#AxTQ9 zuEz7(tS%ho3;@g>xao5qRuX$ofWTxm1E>e0QV^X^CaJ#u8HF z&nh;>Bz@b(5Rx`2otz!&`Xcv3>3CLG&ez58!k0z0IK3F}ZhiNi@87=t`iApvy+1Fz z6jL-9GHu?t`|QgnQD{Ckv?di*R{Q=x{qvJY4-TF_SsWj)S53F>UwiYL@4WlF%hht^ zU9mBaDQ?W`Jf0jImrS{J{S|M`t!r2R;!pngv->|ifB0jhGfazWb3V@^9$7=% zBnaG-FYc4`G`k)8N58SRbNFa=&`+%cWoyb}Y?!Ob_*<{PxxKY5{opMJmS>AI6VhtA z$OGmteP?65=}g7ol=R|!m8}_8$LnXG2ezAAyTMnd&FZDE{rZDX8{4cV@s#sz+a4T| zvuurX?oWUCqj%r^{o2^tP?FT9V62%<=Oh2ta&@N0#272(Svj)K4&NTyUK%GV z=XsE3u_OrUt$U9?9y{cIVMWBmq%ns+eEh*rYFB;hjW;HH*Blq7GwV1s{d#j$Ta)^s zi^S7uY QivR!s07*qoM6N<$f~HJ*N*$XGHm^XKyS`nCM7>Z<8#Pfs((GGj9~w(tTR=3qE*;^qT)pNzyE34zRk z6B2MkAm%m_9550?%i|u8dU~pRs+q2;u6_Ac|Nq?Ah>YMp_|we+gvPLoEe?gEQk%1I zt}%!+g>_32D(f}~EQ_PIsXUt%X{~A_5&Dg?gUQ&d=eEufto(itHn7PSQW#l9fYEve zLWaO=eabCTrDb?q^KPd_(qJ$uz@k(R#ceAG{lEg5PYLOGwNyE1xCsD;JWSP1=>~>s zbXt?UWNvfD00fQFN*M@F3Mlrfrib%{!yc*3J&V%J5nkV& zo}Dm;UDJB?>Bl5pjsjz~i;6_d5)A>u7S^M+pu*1no4fPt6f+MS$aZL4B)*}i0q;;4 z8<;rW=I)LZaoY%qXr=97G}_tSM#y+@dP5k;*x3JkuT^mi5CsUEw5+t@Dy)dQEt{wN z=Vxa%h1eu8zfD@H-hJ4|WzE{gHqBh%qpP_-I{WQMQ&`E@`xlGoz%aQJ{L1(3>~cZ5 zEp`3XtCImX3WOF*K>-l0vN*(zVO?GpFOCwAbOt_x0FSx>BgIx1shTe4<;^m@t%*C@ zSS+f~K7DONd>1B3RujsPzC7qqh)V%XdK1ox2HdkP*Ih(Q+GL7D${}tFr`N;dzAEQU zQMS}z6tmTpQQ2hLu(anblV#mPBW_Txm#T_%iESNHdxu07uV zz3=~M@9Fc)&!2c6@+^d|Zc?b=N|z(hDuWTug{*5Sm&!}%=%S<-v92np^U~77H9_E- zfzA95@q0WN2MskktE18Ow70QdLBsBPOHuUSen^5&Q5RvBV_~9YmJ)+v;uJy;he1_n zi45$m18Ub=N<}Ea9nTsL?a5|0x=YKb@W*{;Jo)0_6hdN+2f*W5+uqx{Z$fx{bkM?1 z$1<}VR*gd1@QPe;!wfUo0O#SO-|4STFPDIl9#RY`Xqrr5qrcvJ{PnjHBw3OKj>#z| z1Q%5apz8J=OrS~NyKmoPCW2bZJWSIR1Hui2+vqmub%@$rmH`{>KG+?vj(_oY|NP05 zr<7tTrAdil(r7vV^w&$%w7pKx^H>GjELr-F36Rlu&0Ms1vqgW*9p+{8oSV32Ojb*W z0z7bAX?hhU8ykqU?U#GchO2}3-~UFQl#cD(&SqH}i@K^?sW1WB)T#oY_D5YxjkQ4l z3Gr--syabZiMmBdDbfg-meMq|1}$2o4N~^B+gS~a>Do$FlrNq?Tc56e@W+4h{U80& z_FG@eiX1A@^*zoVr9hgdkYLNPOoj>6u&qt3v@AWF^?N~H)ZF%pqB%Xg5)D-fVTrA* zsL{p3B0T60ZOa~ROt3%xmyiCVF52s}lY1Le--Mg@HqS3^vN+yaUm1*j%7`WuWx@*_ z2tgc$6q^!ggWIZsrzdZ06PVZl03*s&QYRXl%<@K)>0o@XtkHO5>pD(<`KwQBgSwN+ z+FRRpzY7=&dRE}ELC=lj@ciOxxU#k}-K-kXDs6W<#Bxm6uceety0PK}p2Q5}yeL{? z%@(Bv-GkXp6Nv}Y$%BoZpZ)ny|1J3N;X9A4z<&AS<@es%{l;LFpTD89xeL=pSf*Lq zpgTb6pc{~zuosMb!>*1+ef8Qzt;19zo3P;cC}x<`>$5pMsnYh+qP{=;;J=?fRib?N z(L*g&d_KQF-5IW}S$uj7U^&0lNLr}NbM)l1-(25a|Kum%wH-ezN_4x(N|?u?i6kQY zWL__m9OqHeg$gq(F7<6%Xr%w)!w+j9hn~OE2}E2Xh<@n?~IQRW~tIjit6eb0yMk2 z+XsqclQ8q@qyN13?ce?JAAHZx=2sBBe15QZaQJA&{NcA(&mwYjla40?Om!6Ic_S9f z#p!KyHpiZg$6ba9#c@?%+}tWrV%R{?QiwTbj4*_ZTvhWr+&}#4FMj@)fBjc~n}z9g zW9pdL1JN{qqK=TEUmu(v9-q$^$*Y4?2vwaf+`wxM$c&a!id+kFi@2E_1 z%iGiAq$<~@>!a1_%d1-mI$wPL%#fHj>3aV@ZQ`R>&wL9tRqeAD!-6w_FT;!wWDux9 z*0tp7D8O32nS+49%`Pd*a@e&WZQ9`?{_fW%zyGIyXc(!t5=dg@jXFO(atAz4Vx`5& zAung=T-2h*REyn-i%AU}hygN10|FmGSzrisovwREQ>i>}m$8UTv9h&`0w)jO>^y$# zxzy0|e?Q*)^^?!Kq!~L)F1eIxj>Pd-zacgBnQGhGWtg|pw{Vn~IEspgc0?{jY8dA5Q4QH5lC{hSh>D%QJX% zoLyF06s1QYyVn``9m)^}3dvSo&x^dk9HXww!>qYVO2F8=+dKW~gU8=|Z@jY-FPEqP z_vH2d-o??YftK6TK35>Vt_lf6BgUlT`RI})xq>X{7j<=8)7yin)-7fj3uc5#Qx>hm zK~gq{*Rghon_JtxmC45T*6PN3nT20`_6e4SCGs_nnHEsbhHF-q82d+2TTAAMMNv+M zt}oTHBB@sA%c?>+D;t~(Y1%X|A$OdxkO#+e+v0D(wK3jU>GuPUjk7ObM8~h6fAY(< zsS|J|fieuD45CbOeX=Ms6JB{H@)|GEC|MW-<193KH?+ zY^k)t6sRlEU+wiadN!z9;1XTL1q@p>7*ad0#=TC5%Kry(^o#Bl!Tw+X0000b-V literal 0 HcmV?d00001 diff --git a/datasets/example_images/toad_frog_s_001786.png b/datasets/example_images/toad_frog_s_001786.png new file mode 100644 index 0000000000000000000000000000000000000000..f31dda3d42b905f934c9a8567af66043180732c7 GIT binary patch literal 2364 zcmV-C3B&e@P)S+io`|qffo=U#D78J0SWPhgaC=R z3Q-WE6$skWHffVMiQ_mM~xd+fQLIcMfv_uh+#vC~4u)_zz=Yioadul0TVd`Lt9 z3IG8JKmafRPRifgTjwlODp!FGQviVQf5&GR&@dnZsfYsrk=$QG3=olm?fSL(TYqx8 z;?kub&Ky0blp<7!i1>L70ze`H02ro;0Fek4fs~3569B}3DN=!Waw==Ypb zmS=fJxAjLg@7;G=^_}L!Y%*|aq|J=)Cny+|FL;xW>IzykBFa_0OHQikb9C>4~pi`R*0D>amW~MxL21Um0-E=zl!a)!VCye7(D=~CqdMu7aEHtX* zR3S$a!Z7g{@4V@DI^TKyrzcOHQ$&dJvCxM6X9xQuB59hmGCAFvE#@wkN>7sJ`u>VY z>LeYDYh#(@OG=92Nx?C*H>3YGotsqW}v{pTwE|nQ*DoTWi&0b%3yh!KC-tL26{qkq4%d3c> zND>h2&x0EffC7?`*p@z$9|Zz|nx3Ap@>OLN%&b*ClD9|FZBK?#s^e+huBz9P`2~M< zE$QtgsN`Pr_8Wivh0|$jXagb9fyjx7C;^~hx3}uGoA1wC%s4bY-WzPJ*Z4aS$K&Jr#6)^+%~230G2B~#e@?Q~Plx&zuYO$%{{!%A>Dz)2!$IF}U?#5QPG@>cNd>5RS6cK5F4A8c< zLW!kPoCI*Ljab%XHkBKAh5T41U6LRWflH!(5H;Iw7$&C8OF27dis`a3Ju^BvRtn?3 z1s;o=v1bIxZCz+_l9Ft+Y!)lh%;^%jVt!@99xLSJ9-4id5@MFc(^&z)fN{-Yl_!qZ z*VnpRo30xWQ%tv?JXR?bDnZgvyxH&dMl&cxAH0b(hmREWMm8OmrYT{nI|u{tN5%{b zb+;BB&Ob#=1-?@r8$)g~W*`CQI-Y&$D~G0ztgbFMYU>a0-QmjbN6g=8bG?r&vJ>k} z)!1$Nli3p~n`R1T%Je&;zuEAUu9`V+$uL=42&PignS6F+Duc4f1Vw=eg9HeWNJ=6N zHB!oV!?oo{tBZ?U8;_dxr4$7=qGiOudtqRhdMOeugF(iy2TB{naih@^k*sYy&8~m( z^wDfeN3P{EHW_7W=8NZF{rZWyV;le(NCHWSsl0Xi{F#%_%ypWr2X}AR?%(QsvKmK$ zrWg~R#(QbY5V34~Fa&Hw^tvn-@UuFdRvH^#K>ak45ER^ZJm}>Pq>1Knc3kq1H<8C!NbI)G*-uHiy zD&~?{h*$zshRp$${M5JZ5B1;i(AXjn0Vn~yA0&G#t9SqX&U(E;%rHz_#Qtcd^1|gS zv&XBPOGF940FZ!`VnX8o0KjKFfgIXG0t5mAP?#v-n&dJH`h!Rag$x;EmTg+Lfs7a` i07b+AfPjFQlj$uYG60000$X!Z#m2FoOAcNca}Rd9(z3Y*kg|uY_}waDzpx5(@+9UTeen!RDt>ui6T*< zNYDr13Gu)SA}WaX1$amiZBqfQszi02g6lMnjqTW(#pChJ+&lN&d-mVX1D}85gZ}VK zzu|@v^SvZ58?_q7I36Clo{JE2T%Su-mWkmio=p04)07&|XX7}_O0F#;6cK_(Kp3z_qd~zK$0dY%S(Z4sK`;U0_`WBz z93#}|^gsH-XIEyLGu_T$u;;M|S&b3loZoo+M>lU?-`IS_g1YC?EH62il%T-(gj9q$ z#G%?+^yNSJZE2*%CFN)`j4NAR#qa(AwO(@#YV{^X;$v<=^baX{R1uK0f#RFMr~}o%?l6 z9D>=Cz2AN5GjIL)#@6m0qfWfLS0u^U7*?xmYi1S}sMf|J)u;zWE-7(Xz(&JTN$Yu@ z>p0fhEJ-#V+!a;c^t_$HWPN$|g_V^T8g-C5<$@1}{!H&PFMj%~ufA#kj8tWsN+HTD z_XGBHdy5*tO313>QkYr|@v?9oDwPBPtu>;wJ3W2*!&jNdw2@XUwL zkybopEYI_Cnsz%KDFuMlrj$jd00#)5;nW=OAW0RP@lg)Asb#DHA*l=se)-x*Zr^$D zTk^Z0EXIU7o-x)Ugx1cSSXi8`*K0)~!qCG6V~mYKLEvMA&pdOje`LiNO@R&$ckbVO zJ8ZP4>aFbXNm;4G!(m#MxhTteSU2QD&;Ak#qY8PcGQ!x5ms}g#Y}Kkt`b;8>0B~7U zEMSykK$cL)p{_IwCL5|Lu+E zT2smmKX^Jp1ZgP?ZWR}_)ox6Z0%0ApfR~k4S_tj?{$!kZo+^vHVGJ?`Av--g)9cNY zd3E~AhtT;eQ)f>9+DEQlbn$yh-pF~S@%Fnn&pr3jlJ*OY>aOX#I9D9!dBFv@hQy=P zTI4gvx#V0@>Q2VvJkPWNWvn(vDGkWR!~OXqv;TYJ#~*BL-h8~hTO_x4wiqu-yD=Ia zOvZaC-MjVQKYa7Q{xcEMahn(ebt&bds;aW$A|6jHwgz-jWXyLx&oRboYmLcq9ZzAG>z7;$^$j zEK2UPa4;N(K{z)%Eksd|8jGu!=2tIR&&=kVGN?8u6MlCT$o>MPE7^A1J)XC zlvS@^zxAzK_qMj)U+B%8TU=)K20a`dMp0dS#O2e|P^;&h;pnL?CIv7)D4+g%FOI z&scOg9w{X`mmy<$o*>ZFT7n2*XMSO^jN`C3D@B!ZeSPz%ORM_GY-d!+EGza$S*sbX zFE62DIEcL&YMHY@tE%#umn4Y=JQyBUOZ znU?iyAo9Dn?;h)S9oMaZuAjTGe=vCdm2Ym76O=_Q;)ea>O`oclU;1RLHT{)8{R<&w z7>56!IHi=;YptnXG8jyftlesTN2M>{!rL?Vf z=jm|tD_6R=-@idW|G7^`KC0L1lX!Ih-lI2v^t!Of2_nn_ljqhTV>AM2^xE}K6z`87 zZEnQ}W1|n>_~(CGTRZ#C?e~ba+dCV>{k^QrUFIbw|K3~Qaf0xbFTL#fEqd$yM-HeY z%{M;SipPo30NNO35O$2wfM9OEhkzVxZbz-U7aLf^LrP}uB=BBR7JS^;Q;k(QOQ7Ayj=79l_g0KgisKnNiY@mb({?j#;r zV~NM+jxRNvtX2!ot+Kj4cW82OALKuU1ObF|c$xLz0~qah}USG?J&efZh6sb2T&snfsy z;^!$K=*~1xuJnunt%y(xV=cKBmEc8rI2ct~F&YetG?PMFZ2&L^R1_6hBc%p}U03jO z>;|OOY%eYxSvj$E>GGwMCs&r{=cB--vyG+})o>t!!1Y|zh$67s_(%k@a8aul_@2j0 zd3Z2N;_=>-J*^A^XmlebxX(P_ca|2H*4Niht)82on>}@Et=VjaECftItBPloP>U@T zrFN|CGQ~6D!Nh#wySAPOq-dbi1{{ ougVdnE+ts03KQ1?xPI;b04&wx`|TZ%C;$Ke07*qoM6N<$g26T?VgLXD literal 0 HcmV?d00001 diff --git a/datasets/example_images/trailer_truck_s_001350.png b/datasets/example_images/trailer_truck_s_001350.png new file mode 100644 index 0000000000000000000000000000000000000000..e20f68e34b4ce91a4785d448d1e554172f25ac48 GIT binary patch literal 2341 zcmV+=3EK9FP)%2#i2L!~lp$@c&B!fRqvdYFEAe>Z?cZ|HEXNSzT!?x1z)(S)<)r zPO^5~$f7J$i7_ThvMf!S0woZ@R{?|&5K(EhktUzsxjQ9EX)3MFs}h){ls+}0k;-P4 zbykvQ`@)lt|LDaRmsd6f3*fH+Kty0p)9lK{tvkR{3KN-fo@;BtdV@BMM7?_48VurC%1B9cN#LR2#JVDAQ9ph4PfBFpBn@K^uY(>`0*P>Z=45*atW=Y*`lBj0F{)0s7QFF zhxL5-&##Z~@8iv9k{egE3m1fvq*7D~2n2>;J%S*uP=fqi3=myi9q#Tr6;0UtI3zl) zti^~&-qX+_K(UsRhq$IT_CG_eObLv`9armscv>oi9#k@HqI> zhxc7kkOFB`05ubG5i8|+8;K$%zq}1|aFeMn%1djVVL|#hZ%bJWMxAz>S2s>E$*1R=ZYS3y*coeQ1SsJ&ht9ar-O^Fks7@HK+)b&*7pTFuV+ zb8Bkz)a_ikerd6%x8M7q>}1QPeLcsvk4?lP*nU`G6{h`scHXZfOO~~Tpp*($Mvpw= zHdgY?Xve{M$*y2$CzE{t{*FZ_V%cbS%zEo&?_lZj294)9tL5}8ZrF9_41yLy+IlY7 z)h^^=(4ThJnsZQ6hK65Q#=Lv4=*CihUglNzw>NKJL=bto_s08bIG4fZv8V|Y5TPf4SV#mSEpLtsWG#99i$LbjK))r}=!b?xe7Y1Z@=|M2{tp7NI+mr zGIIW%?nzx0?CdwLKl0qQ8?)}In@^*_!Na1+z2l^T%%4RR%F6oI&tDkses=H8TWK*~Di#7mWHd53f>d&ZlgJp0S1 z59+))k>*@Nt-2q(N(xEL9?&ui00yWXJ66s!ix@R|A%jHLk-T;5y>CA8EvUmspL`Oc zq8UX>5KC$GHrr$^3j_Rso*!ajKTP>5*I??;N_D4ynpR3w6%Pshb%%AT*M z=9<#R2ecLf`w%QsaIRv`84yq;k#4mbYa1udfBToelFgN++aG*dR^^k|uC9&BU zvwS{ZYNu(E7QQZAnPqKBAQYVxvyT9lnn}l&;)2gH#+Xs$EO>Q z-MI4HbMoSa^_}gbPahr_qpQkQ`ABh*#bUYDJimEix>%fzM%G%blq69rd3QR?lIUBT zn~_RM869g1zPq=ZGw&Q9pIcjBU0r>1=aZ4QYr%ehdt0Wl7>!3afB4+j9=&wykFT$_ zo%CS*1xgFOJX7WJH0yz4^?um$%w?-uo-p*fg@x*lu+cQfd-OVr^~p z=F?C2y1jdQdxLr1x_s^2cfMBz4gm#oIi2i|CLeW==SqFl?d=|&auUU0TB#ul>rI_^f*}9M3oe4^-J&5{bG@8;#*;iVy!%|k&#FuXDv$JTwjw(l$0`` zo}C70taUmOsPR0P#bP`ey!7%*4-R`$;e*c}&KG5))e0dvXSEjl`=51>_R}==!IyPa zGc%$VBqa$ZMqZHR2vw}4P_YmTTg+!?6?$i48CXkn8l%ILfk2;`c+el-`un>Z>+1*m zM;L@s@z&NwcA>Um^ZZtSFd+e?1p{ay3CMXLteZ}!cR#!%tEy5;2neC7C6Xi5gcR$h ziQ27pf1HnId0p3kd*@B-cs!jcsp3dSkzQY4vz8MREsD7oAQLT6eEt$K3q=uC1=s6njW2vOr#jlRtsBK zm)`sL|Nid&-fq4qthGuCSLIhPZBg(FQE9EUZlsM^DXsL{+S@gbG`)!b4Qr=&wvwj9Z2DYPI51WAbi z2Fna)`{vu;?Rj!Q`qHjnj0plR z6fnksal#NhkwZcZh!}tZArb%)at;z00RtigAS5J0A|L=@3>ag`09a9$h$uK;uGW+B zL@DJ%Xxpvx&YKp41Ef34LxyD)Z7{~KH^mRf zKOo}K(F5=OYQ2r)SW2lh%hD)PLI6rG0T?0@B7sLl#DD=gFnKwhGR`BdwUUAd20%nV zzdif*+wW#Ki@GxFVl%y*Dy0t&eqtOrXGxsIaZEt(zx(i4zx+5)qNXXmvpxhO41h?4 zh=jz6Wbk!BGz~}wSr&5+!G}ocv-68DzWfS^g%FH2QmBjR6=TQ&q?B9=;BvXx?2Y&K z#{(&Whyno+C*YieQX|N zw1iqK9_f=eZ#LUrlBB&suiNbqvO%vGN0Nd^?>RB{ zo4O_p&g1NM*%&h#4)%6-IOm(Xs@it5T6ypzUpRBY-;A3S<^bSOB~Rf(>Y zN=Th5AtmFIoYh*Bkj-qBWMViROGc5_jdA_!8;`it@9z!=K(MLGtJ&@CY<_il`RZi# z@Zp0&|9x-mVm|-T%Dz-7iUQvkH^FHdfm1@iqod9>uqt?Huv{NvNsqZ5-`Bvw#!+$ z-S8xL0i}+m(n?Ckm~-x~8w9{OlTrd>A<(8M=JR=7H#gVUh-R_49qmlK52Inv+4lI) zFX-7*nfHctm#Sd1Jm-voP)bL{U(`;^#DuPG6qfyEkUXe>pa$kwQ8jv=j`u^?<;xZsuQn@uhcONVzSm zs;WMC^uhb@K9EvGNd{a5YiBo?*0h|l+xcv{n0xPDzj;#>+p1Wh_f>8F;Gcf{v9a#_{NnoRhI7V{jIm1RMCgr+wfyvxkEB$_m`Lej zvv%H8Rkc{H%Jp))USG`?4<5WLy$wz4m#e~g|N70(-i0_xBBko8WBgu!C;Uk$z(Eq z`0xP{#z~eWNuK82JWumTo}a(X@*Pj1sT%;`oLyc`P2B_}F64T%*>1OKmi^+%FO*VM zwY^<#zW?s~H?Lj-QLo?Yb$iy@*VF0j<|=q|ety=pez-I4(-n`u6rJjw8U3L^4YTb<^f~8pny?YTY=_ zMX;{Z$vaslx7(teEj^)N3`20bKOdj8)&=jR5Q2*kJW>#)5L_*mE9>l^ zn)>9&(YcDpUN)|jT(??iD_ZcAxxo28Ly z4JWFrQc5X>aNd;0TH~v_Y1^6$=A5}(>9%PnlSz`QS0^V#Xsne25FrF_t({J%{n0&u zAOu%Ba^A~rxpl4`_Im`hUa#^z&+~k}UNX*m-9c41#xzw`E^g-l_&-XTUP7M>hR!TKA$a@izG??BrKOJ zrNnZ%#1NEHZQWR7>Z)o?sg!2G+NLG+b=5E+MB3lqJ3IU7c9A^$@Z(;0_t}RZN%GcI zb=*w^r=6X_csx2iJtZRV?8W(8=R5x>=dpzCL|D9P~Kn zX_^^hh67e_H&U{yu6M_Msig!A&f4kpj7vG*n;6pqK%S)lV69D)xUSncjsjxvu5K#p zS|41lJKawH*v*%-#BqFj`ts!TRk0}!jvgu{B@ziCnr35t`0Q_f z^_$=P(^p@8b$t9|Rh6Ir{onO^T|`n!O)sX4<)Sr>3-)?;MVu?m#-ouC(C>HSSZ7(f zv(wM>oG85b;fM8VA%GZwOyY!d&N=UNl7Ia5=fD1k&yNldUcGwdy?^%XnUtcg>$0q_ zudiObcyV@lVVy6E^ygd8$YdGE=Sr_Y~%(&==y)>5jwda^rk g#sKb)J{R);0Gq%QG;}l=0ssI207*qoM6N<$f|~cjJpcdz literal 0 HcmV?d00001 diff --git a/datasets/example_images/trucking_rig_s_001247.png b/datasets/example_images/trucking_rig_s_001247.png new file mode 100644 index 0000000000000000000000000000000000000000..eae266b15ad1b0a68f655a2b704eb26199f761e6 GIT binary patch literal 2333 zcmV+&3F7vNP)ebil*l^Ofcq>jj!KztGLWkh5Gfk~+ZD8UdJLqq@oAR;2bI4d+T zW|#l~0EmbPH!eT|iGg0388WyP2JmGT#+VR-aRSBw5myiq03adeWhE7aO$&%t>AyyR zh#G;dG+3P#a7%E*#~84iyWiRo6+x}Rup9zLL>$;Yb;a36x1oZXn^d*5( zH^FdQmsUy2ZfU>L*j4?m}Kq68^m)DbzA3y8&$CbAC_x8Vau*sK& z?>QaM^eoPZ06`!qBtZCT;$Lb21h}GhT`$in!5H4wh;f{fqNDrw`&WyPK6!EcV*1mc ze*YgA!-q!)-`xqee9l0l;2JgK3`}G7y1p@=+x`Fm7eY}bR@4lkmO4v|&31d4=3&r$ z{h!_#EwiN5rSdgY8!KRz zmyT^WY%^L$K{MELT#ch*IpM_uOx3%XetP*raBdjP5^8&E?Ps-o_0{{F3q%CE`EdYH zN)<)%Y&K7`Ofz#u07X?Pr9HQSKmx%J{351}kcSREEr-QyK0^Ke-#?-nEP(o8D_0N4HNI4yy3AqFq)aB9YsZ% zXXDA@>TcAC-b>^fdVb*)^t0cAG5JS`F@nhr~|;))-3TcL$aNx#t$S|d|hs;Xj|b&XqgNw%d$DzOfy}fsy5bJripPpCy=#_ShAclnK4a7plYS6l4^O{yBJKyvdEiW zV`Hr)401j0+Ta492r|utoXBaf!FjaIX46Q@LKc}S3a;p2ch5G>bg{sys>@6mmd%Bb zH34W@mq}6%r*T$FRqJ>eU)E{w^z5*^9=2W5)N;y~&p-L%MfPBmxn}j`_><+LpQKBv zq*eqZnC7A1+}hltnx{!wWQCC6WHm2yi8_Dw{EL&b!DmliETW2Yb77L~2FvO4{On3+ z?KG>R`Ewoj8EG0ax7FF&-U)qMtGcR{)_|bXyhx@|o)vK%#Cm*)$MLs7!|*5T%zyGdG2=2IDs>*ZCv7!4<* zu+>WPgcP&6sNuXUAJZVMP>MbhcC-+t%epa1(W+Tc5T-ui|you3MZ zX;EE|2FpcUHP*62pW`}on z*SB^){_IJbR&PGsfAg(3KK=aT%{AxsgZ5%R=skOGn0CW;@4dFS4C(mM*>?Nh58r zo=%pDhGMk6-re766`9cqac9%>fAYq!{`0cmzj*xUaTG=CTg_KrJv=zr*UbLEXJ=2I zUY|T47>>=*|M0V?h6TcOU%7j~;h>08>6pSW4HP_2%2vC>jUsI9Zf`cb9Y0#A!PV(t zcv%xG^Eip4-o-@}MThS^eC_B#S(R~=^at1d@x?S=^oOI#G;Ij+`s?2&&Vq1_3scl! zDl}Z%AtnsFXc{aC0HAG~ER(fjhUxgj$^2?KPKz?hva%?OvUvLJK(z>oI%eH;r zvrHklm`7PDwdtYXmQ|YddOg>#O%om0YP+uMHN!CUZOgVSKWMIRbPf;ibUMu-a2pL9 z7#E(m#yRJP$ruu8t+io!&A@dWODToOxWP!4jDsVrzzzJsC#XHo-dqoM@9cED-R{Ol zvtgQoxvuBBj@{sf!L%mMx!{&KIy#b43LyZIv|jC(QIbsK0sxefN)kihoYp(x&h|z# z^ftSlZnx8IhpyuohQSyzRIB}*h=_DuD`{kEs2yUQi>xWb_NFlVtbKARzZL)v(B%{Kglu(AX$bS zdZ4U`V~b?ZeY<(5zW1Alb1DmpaGY9Iq43sIRWJVAzwd)M5fLCl0uTW}1O!>FR*gne z8xKSf3I2b$KP^Q>e#ijC34joTLbOPLfFcM000;m`4nP2EW>F09T7McS0D=JWPWDFv zfRKQQQly9kKmd>k2}wWz0DoW!E7Y1r-f{sE5%6s;f^Uy-&LJWoigSM?004+4RNAoo zuptp40Ez$-0ytxs6|%zrD?=1SLIgx6)`%$1IT35EbMWq-xARdUD6JHL5D^jMTLOS) z0IAAEd={nHIY;0K(TNifA|XU1U_wQhq%kSY#?&=|AijM80b*_5D7>~>T3SxtUVlsO%H%UNU#!cLWnw^EmPp7(I{*YECIKN~3$8!t`>hrbzcr^oC+Hl26AeVl_=G@A zq`_$-A_71mRk^x$4w;Y~iZBZ?l45x0GopYnI`$f&QU-`o10re{RyIboB1YILV-P@? z0IhMz?ww6>mY$B!y8Y|SL?{Bx1fUp^5I{gg93lac73UF?(!`<|R_7-#o_(pk%JX}J zjXO%jIY9@6#3&3%2tq6ZSY#Cv8^Z)rRMtA@8?r(ILX0SY);fU{fDIdkWxhH;IXHRp z)vNt4v=?jDSWbVc6nV~ZMXV^38cB?U3KSWw*20dbwKj+#MebZ(`I?Z8t&1d?9KL>3 zM6*`2>vYR&kgv@7%ZE?@@XIniRv2@YNf}*@|3`U-Rb2@cAj_(9&1R3-&Q@2|Ds46e zDHRGCxhjh3#hb+<(#nv{vUGg<`f;+jG|JP>;p%EJ?1tYy`mg2W*zl?>vaBq{Nv~Ne zc5&d+QNPjY4xT)m9j3*zs@g4jb?P~wY^{QzRaS+nqqBp9{dm~Ac5C;=%kQU?t8lD3z7ehC zI2(mQ;N?kCWwVoKkAGLDY2dex0`Hse%F?dtXp&w{8tvlZdED(mnarxxxvD_IpxJ75 zdPzE0Ezk9Kw?>=mUp#v9)o=cwly8(%`7$r_z_jNtjTdtyJYoyve zr(9sHDRNE5Xg^zyZN9AQRhhB|VUJ52&8`-`CaA9-KH6RH-?)8iIbSA;%d5*)fYtns zEmv#Z-8_`0X?bN82Hw`jC|c`wIvd6dgOEF6czN`EH9L1D-MYDZ6}?E3W!U!Wv?x)e zDl4h8npCow9K_488EObWyZ3Sby4UeKU;pZxG8;Q!{Q13ja@B|;)ofkPuU?%%W)Po+MVFOt0henTNZaexU;vl;Vf28 zw>`GnKK#w2wa!fyAf<6sdPU7Pub?dA#o1|!n_=jv%%&H`>*GTYm8)y3s7a)y(2!(# zM7~YWEd(vMi65$(_M<-X07`cg2p>Cl5YK(m~Lw>MDvB=$s}SY7Gt@$|7FA`tsFcGBF;1`rdn;^=qBYox7hu zIM^Sn{-EC*Y@aR4>C#xohtIyLqNiFjB>rf9$6{XQxk2Zcv{is?ZS8xFR%3ItehrBg z_4xHJcdGR_JoQ?^TB7Iwp!hjGkkM?e0K2|VHHqA3n{H{Y+mb|tlh?97Vm%e_(pI0 z;`~DW?z_Xf${&2RHCfHR{r0!l2dc5PcW{0+p3Q?yDq9HxAh2d2m@Y1d{ctvkY`E>% z&S#VRpWJ5jfW2foxiJ{sYMGs#VK}TW{gO1zucm4~zibA$-s@iL;_{>k()Amy9pU_b=krysMHd@jdfL)DzT>1 zAfM~eVAu@Je}3^FA3eDJH}`&Y{MB>l*0=w1?`&NB`r-WG=vZy6>nelA;>{{qDvyFj zb@SG6ud(y+*N?BJZ;Te@>DA05LMij^Xgyx6F2`eG!_6DUYo4ELgN=A`s`9y(MD;dr^|x=0u8)|USf{maG@GwZ=Z8nryC2JYisxZy-)KzPmKIn$?4ckfy}^myU~O1q`j?%s7-wRTnU`fp!YtuQLANPgw$?60q! z{p|Cl>HT_Mt{INlbtyNkrSi;$XzOt^@v&K?{Hk_3Px zh~!SAi~*WC+>K;NIRa23Nh1hy*f&Us2uXqzK!!kCvPc$$a!4{W000sQAtXSVgqxFt zRuYgTgPrp2z~lBv@G$`>A%p&`jVQfiZTWJmQxWF{gBQI)ndHbY}r2%QKi@k`5}ApG zbJo>goSyc-c=e?peDCto1V9T02}z0=g2*|a7Ajh$)V|6DHJSr)&a#lEU7Lb8CacJqFD#~%HK~Ia` zIV(qfbN`boZ{7Ut<}DWEGp8Tl*}v1a;qEC_0^FT6{Ewt8WT^{SFo()^cLc7Tj-~bS zq$q$%1-oDkX`@%_J9zVF4mQrT{adV`_4` zeRsYtY>un#bK9GnYZl$vbI(2T#F_Ws8}04gX`6nsd}_ROFDpV>k%^QAgy>8uDVsll5s)4q4TSs9-$Cn>i_&4Ya5$#XCN@Y^e^OKVfS`|f*x{?j{)zMPE5 z2la!xJ~Ar+_I=;y)>1Bt;sm1O{0U6nu+}pi5S)Q(}uCHxw{qk4SrKOG6|M>ci-P=!WZ5`IryZg6~nJP&#zaDB>Z8@u9L+}KuA8fCB2*qVdh_jnv~RqB;o^rsdgaGn%-{IK z@2`G(b8YL%r!RfCU^1z%J6Z3}h|CVo;$bI%84;eUWzP5R-@kriX}Xh(`Qx^w;Nr^~Cl^7cD-cMtjT$2V^6JgC#UA5}3=2m(@Xg-G^&9;hVA zJ*Ab(PLJc-?f*Dp-5#u+Vm-p_ARA@}w?4jf{^`vWSFc{HZ@;TqcljdhnI$;ZAKPpD z;;EHI!R!_(nYou$VW6_yTu|glWx*@I`BoZ_dZQJ^y=GRdOh(mGebhEhzq_-)y>)g} zR`q;#c58h!$wied^<-zxh}3op&op@=7)BVK^kM3fWX@EzTA$QQE4f-$PUaJ{)J8}s zn~T%J=VjT!qOA7GmZ)GAeVB9rD{@J=h=DmlqnXoq%pp?>z|ogzOCY6+T!6j2gOy8V z1q3NrfPex4gaZKw4I+{ZK_EjRTEIjje7MGfQd30d24AL)z=Y_i2FZdpuz^lv0!b!1 zkPgFhc7&B=nFJd;Ei4=lc>wV5tE#T5?m2pH@sLDOD?8o`F>Gfy7Z79vuj3$r{pf!%2J*0moxnf>CkTx7 zO8|Qz?~0^Ekrc1t4Cmq;J>6Yh$Cn5Ho_=_Ia_qdxi|zmZ?#q+&<5nyB?C(A|1+13a z?|l2?k3afoIK0~C#ooc*yUX)_zgw#{@>TvnzxnTf_|fN$TJrAVE@Z3I+ZU~NXEq&Vl)5dPMQErVu_U7%$&ZC{Z{`2$mH}B5R$Qf{M zlNaop&yGm%`uvt z!xt|;`0#!-+>}d1CWPcn%D71(2&oZe+U&5Xw%Sx>74~k9G##L1%J5D~f!(Eq40*4%yAs zwa>RU%K;-DH+BcR*Y8f0ikWkYu~u65L=+r(6m?dk66Vu+u{MVcAZ?9#*UQH2BI^!B>GfTHKDA))vfsHrwsgyPb}Z5=CS;hNBR`Oi`f^3?d<= zN|NSic-!qBzW?DTCl^Ds$UHQX22rTIpWctH^ASh{BodSRm8y`-I(U~2hf}FH-PB?p zLQqNF2Z4x{V~jAC5>X3Kl9=js55D&|pCz4cCo{yc-RaL~lhtx2^2|D`h0t-kUhin7 zovjQJFSj2r@?FRJ(0ZnOQ=NY!rK!9ZnD$y`y8s4U2nZ-b=~B>7pX{Zwc768d{_eKc zXfCtGVshPVG-@iH&F0I+Y)O+|cULK07F!?T4|DcYp`%14DRgoB`s+X5-`wdWwvO&E zFIo-700c4*C=jUS?325jqvGNBXO|HJm9~#hUWDd?4>x*?Rr6>u!3n6&t zJOBa!05ODZmMIm35Oq;o{pI_|Y5m>!{F6WbE`Irk$$Da3i7~g^wN4U^F*)Nw0S6JR zztRHRO+HHw5BJ|)UzTOT8RML*SQ%r82mlHE?(SZei4c@mR-EB!aijIYcC2MUh))72}O= z_hIy~m@ZYY-^_kH9G%@LU28P5qImjfCrXl7VVx`IVjjgJEAq!_voJ=O^5yFz7lPm% zfEa)=B>@2v0U@%7hX(|nG}7DKJ9j&_&KPvBSfhXVY#{aI{xVx{1CO77^oh>Ku{RC` zs=ClZ?i@VdfAL}R`n72H*PC^oXL+7GXMG6V8t*MZKw^wVoN<)#T~z^M2%dx*>>vK{ zpZ>*X<6r;ss@~{PttDz{WsRsd0OVCH>JonOi=TITJHZ7V*L1(X*u*HeT5BY*rYwrQ zsthrvb)uwp0a+xSwNVtsQgvJD#nIPyx2K)npwk`jNC!uSWdI4G55&b@TL1Fp(VN%D zApnmwvMAO{2ri{UhBo*jFRH3yh#SUbqyizA(v&8W5-{9c9wE3^ue~ z0R#dB6RMtA!omheoJG2*d{Df~3(g}=TKL{UIv9s24kR8{^KAVfWgMxbUXOtDM)0S# z3u_@j2n0YtK*;_yWI=ckgb7h3rR?FM~<8!i+C&>oN*!wKtwAmNO!Fhtf5CItlBq9LvSmorHfOfk*pG_YYv*2A> z_(aFfRRl-|MDPL2b>fY*He~r;yEES`NB1KPRIk^VV5ag!7(y9B002ZF@Bo0w7?WIx zKrT27OsrQ+2mz2u!A)5bp)pq2q6{cJINfT0?~}hUl{-0qo0o+()wV1U0f;eVfB}I3 z000>RV+;|22*?+CW-Fs~?QVG6>g_~acxMIU)_d|!IH9+IfhQ=cSUa;`c6vQzZ27Qs z#$a#(00Ah3K!E@e5P=Ah5Ghbql^&2I1Y6D~_mB3TaOAcMQpFTJ3L%V8RaIJRp>i0U zcc5jQHrrapRaJUd2It6o=ZU=MNPyfS_lQhHf-^26#-li{_4q7{HqA%Maga8a7_CglC)GCrPvMy|GVVb3J8*6h6 z)|DngA-D(tUT9TMowbChxKxbAg8M)|Kvj9pSj45T3LnD%13P@Z>J^BdegFUf07*qo IM6N<$f;s%J*8l(j literal 0 HcmV?d00001 diff --git a/datasets/example_images/wagon_s_000572.png b/datasets/example_images/wagon_s_000572.png new file mode 100644 index 0000000000000000000000000000000000000000..a8df0244b2a42760af82b367d2ac54f467129d6c GIT binary patch literal 2461 zcmV;O31aq%P)?t_s8<`Gfz4gnfKmnt$ZK<=YRS~W7$9a`uWxM_<#QT#}cX}$vKm&=7iZ?S2yUqH~uG`PVzkb zd=zfar+7Y>42w{^(V$fuZ?edSBVQ>$Y2D@xx5X+=!y?av{)h=*@zt9|``ho9XvE3M zy{}*XuOLLOrjyCxC>A2zmI#3g{m7bh%a{;m)T*Tj=4-Rw@OGs4T8RXvrCe+_ys{y; z`KA=aOtvfvt7?>CsK0!5ZNM9x+}Yp1S39G0{LTXs(9|XCbZOAee5@)L64mW)NBYw+ z(0Ng}+pXJ2M_}rrVNvXf@gl2o^q5pJkrTpqu2Cls9+%moed~U{t0lU`_k*arH|$Af zt75m_G{#}4;})xF80f*E4YIdo9o`-8YmCFd>y4y)Dw&1bW+-5;}~4AL^U-U2--JywU) z%U(N*qf&>_s6A9c|M;Mz%wm4|D)t%UQgz}~j+Bx*_WWT#d3?{`+v^VpZJ}gdnji>n z-P$#^-5(wQ`K&oVtr#siGz5+@!O2OE?NHqADb1l8>^*t@;hm#i9PKMs%-(z*C{I0m z{1BW|N-{y#p_a;6TUQllz`*rVf7t31vEO|;na^(?#@5&J@!bQ9w9f0Sp+%;q8?zyA z#MViy9-Oq+lhY~+SE~&8ka>}6Tv->5CpjbIEHsRW93ewzk%L2)c>b$d`R`wxKNyM! z2c7wPd$1S!I;iu_E~~^z@Z&GLwT=JgD+~ZcQ^=cmW6E$n#S`uD!iGmw#}-lZH0jpjxqP<&H3fk z+g2y_ldzL0&kxnbqpO?XvZ9qNMQs@c8z91R->w=D9@lmDG+vN+CE2B{bvYEONPa;pxtn zEX=a))y;+&YRO+;*S_zik=IFr#A8x=8jU39tZAjK+qdo%hUs{iw$frfQ@w7d$m`lT z!I>0XDD!m5KmPFK*%Rd{I2c5&R&aW;{*NzT&ldVbm6m%~9KA+NKI z_C3dix}Jj)Vm#Y?_U+|2=kto>W_szW0<1x2h!N*fDeVVg7$rfN_f z^!)L|`$zR|aqsSNX7r%rLRve;gV1lK@!PA3@VvkQ{PoW-|Le1F+G6|byXx*JY$ZAj z{UGqP5`?a)vfc7#c6~Ov`gu8flW)h&ZueWkNAH~s+WPc|?;bt6fAZ*&%coscDB@rc z2{^)cFMm{j0>bS;2bzaXzIFfV0>S8TSq5{ zZ3fK`KQG>Y_N3GA{{FYWi3E9%_Zw3-rm7oP82jD-ovE{5W(WM%f6QI8y}5n&&eM0_)mkAq?s+tbPm4x`UO-)cW|H?GN* zbAxs=9QCiS#@p@AHkLEeQZg=>E7PZGVLm+nyN&C3N`bSLF})q!S(8OD7MFkqx*MH#*^u6F*n9CbleFKNlIZ! zvm`vdyxu=N>i65b<#@B+zP%g=p6s`DWyIdzo=}=Ou9Q>^45333DjGOpeNR{wmf7@b zJXSOw3*q*Aq$K;f@G`J~NM37lXE2pd2GAYwco8K7$Q%8svQ(`k|<$NT&8SY}1Jm@meN8MH$up)2H z&d$2MZY1pTW*LVPZJu_Ajw@uS&^8E;5CDJ(84)=v7@@0bYxArMd@VW8wmV?7Kk6Tk zI_vc+Td(3S-z-;e&M!Xy{ENwC^|ycb_dmV(JPN~35+-R9wOW@8BL!Dv4WR=-LdO{r z5uqc-z*NpONW3)Gw8Jl@UVJtD^gsXd;roxDzq{D3mq`*S zFO1`M-Po!E#-&2A2$mCZMvN>1pf%3bgh)&>;Sg!F%R^5C;(WPD1J6@lQ8ig!fNlES z_9iQ`-TM1)zOuH~tqzx7QJZxJL?I9<1Vi8mkuxN;gbvCLG&z@^5CVZqsSvTK8_wBs zmpKTL5$94WUOVEP{p#uQ;V|V4lvZ3QCG}=oZc2#)gc1tD5;#I&1e}1`nnNuJTvKw! z7#BhaAtYzS7_-1Pxsgird-46l!R`Gti37=b5cnJ!Is%7fRc4iQs5u8;Nrl!BS|a2` z92#s20t;ZlI?ja<#JLbcFha?B8V5(C^l%UlyGbXFe64^ukI@@tUtlD{xQ z0-_KR&|oPNA`?Yi8pAm0VeGM;Y4_#aPgU*9TAS~KhfnSw9p2q;H;IC^#%Qx!t%+#4 zTo$^t#xzZlB_-zdW}~D;Ez7b@DS`rB=x*1A7_BK|h%rV4q)0?5W%eRAZHYj%US3H6 z$qX!Imd@#}i&-RBn8I0CXr&DUNF|E05Q2xH2bqi7*b!o3{h(HhxY_Iw(P>gfh^(R~ z1quLAG8vhqRDqHtkXb6spt!CJV`DGnBmK_x|w(Q8ztYmBlfMr)N2%pA+gNhL$*Gi0TKSoDFV zgudUQqj7135Rl6dNQcNIQlQm(j=%G*_g3qr@BOqrK&4a87q4F)w(V@(yt&>HM_0hW zEM!J&vJ`=Qx7+y`wb2w)VjOy(#ZDe8;zS=kBAD9WtK7tnf60F=3?q_Hw}!)?BKu9%6L%*~SD zz4!3x&;H}&-r1l2*`E{1%y~YaM<2J_^~WE7%sKz|@BU%VTs2MM_}PD6K)S)?C1=5i zH(ekiO~L|!M1}hJ@KCB8C{3E8ux@hlc>3U@Zybr^v$Oe-iF#vnvKoYZ{>xwe?ce>) zox7*s{PuUMx_R}Rug1-^3za6!KqM#;<9MvJaVD#y@wjdgq-Q2#rkrJlcK_s?zxT)E zJC6Nqe7QD836|0nkbT$hs;Xwn+trOx;^MRO=g)qHqWk&>)5Dol2ueu2b5v?= z;^1}Z2?l_K4h-0oU{oI6d-S1zVmfZRe%tLfIYu-FNTeaAez)1K4-bzwo6RRb{fB<& zyROeEJh*?a*_&mv4a1Izux;y{MOmtXz}?MzgVw1=ZwTY#x$AMfb5Ay zlCBsX9Go1@?jD`q=ja?tGYz)hu%T-G+h7Y_5Qg2qwjq8-S-~u?M*Z1 zo9j3IcG(R}L9C6G5|DEwj*&uSsde@6{`-#}{?_d9)aWX3)=HL6Yb{Y%N?HKJOfi=E zaC-B$`pdug?&;|%XL|MOV!gahF?QQ|^1SOe+s#JkfM~F+tX0^Kj^F#>qeqV(?@ecj z1qekcD=Dlb2{16EkP1{1WM&eSC?#v)@NhhzU+#9BVc6x&Lcog`um0`lzo_ft&FMR5AI=U=jI|1tl#)3o5)gJd1DHxmf-FMU4}R!}zVG|h@@BcX?)xF7 zGz`OLz4jsg_p|5c|8rh9=FWrP7eLGrMV4p^smsNB`|8avjWKsmPn)JKiX!Fg&}zs^ zu@GYLal4*-AF_bua(VUkYUnqs)#~c%YCfMgO>=p9`Nhj$Pp0nu56lKR-Y3d?!?T=fO#gyAZ$_gTNqFu?_j@&;Ipxx!G)o*YwI-%glfE{qJRE z*l+#y>&~I$j zw_1yFgPaI~SW9&>sivhmeeyW#_S2_7>9(7)s+_6HvhMp~=$A@#Wo2J}d5eg*x3|ME z5XJ3k4H7D6G@=l~03yVuvh8T3rRG2)WmFlYfXXNhVmZGwHLR{Lzx?9!qAZ6%Cnu+) zQLPjc4BKwK>o$9PcSdz{adDvlDrd^F9930YxvHv0qft@1@wBO&b*NED07OCLoXew8 zSa)yVe187FpM3UT&(^Dzb*{8Jh0YkeTrLLh3+I})6#|5S(P-p*HWjw*xNYmhaaB5J ztpyF%fyzP>aLOnVV@%8gF|D^-h{AB!1y521W}A7rS_+Xvq!ghj41gWHZ^y2z9Ac~s zZ0phCbUYgurf?*<_dx?H0nvLUR1yMNs+cH+K`^C$m{ji3{ge4}$;3Gekp*)`i)aLB zAQ5AVs6isv)=jFSHg4)%C6Pg>QGu>iSs9lBMD#oi0}9T4*9%UWSpsNOhx-$a(t966 zBFZVnm=fpA>|>H~FvE6GLLDFMHKi`JL1R1tNAN4}v{DFuNJ6lbnwXJ7(jW;05ha%F5tF~)#O`_9*O{r?q0hFfqj;Aj8<002ovPDHLk FV1j+Nw&4H( literal 0 HcmV?d00001 diff --git a/datasets/example_images/wagtail_s_000747.png b/datasets/example_images/wagtail_s_000747.png new file mode 100644 index 0000000000000000000000000000000000000000..e0fee91b9f22b84b9fa90f2a37b26f7ab9903ae6 GIT binary patch literal 2424 zcmV-;35WKHP)3nZWY$Eoirs9eEeeoq7|_mMdEp}%K7p1#fmgnfeETSwJJlZc)hc(hCNv59wXI-=|iKle1CZHJ4QyMC5oOhJ_Z=a!})#WT9dSIRDv*SCXHKu)60 zd7vXRPtDO=KoLQfdE!Wd`fec4`-ZQDH^zGad;kV^lXcWzZDO9F={OT`3J^UupD^Bh z_s8eQCzGYoPhTeMRm7GI2)v59zyv}(O`{LYEPVtx6cj~vIt-|rW;2KD>q})Uf-p;B z7V$jFfkB9{AmwvAeEsT{67-kd?z{KLO%8tbPxHV0YPh;KFE2_n8{|OObQf+ivWIPrC+Qlq_(^AL?vjoN8+(G7i+#&A}D^)<*d|LflZkKyL3Z~Oe}w$>0WI3(0p)y1~o0s}?JHd&<){@f`N z13GouX`e@O@gjoB1S|#MdVQsJn9kiQDcy`t&&~G7Z5~0jtbX$yx+tf+ue$&FvaQps zAzs6PhrWTH=aNwDBX&Q0&sAQ~3-W4R&`5HxHkA}(?H0O9Fb{U_+hS86_WtVX((d@L z|MP!g2oPf-Xc8wsc|(h}m7lV17y?0yv%|iVqKjXoK;pg`&a+8MJ)XW0ZHFStD3<`@ zh)iXc^dWE}&Cn0LMU0K(^zg&|@o8VK@M7JprkI0>7b11lYJE7&ZGT=yg9%p7B}!6E zh(T+-nw8pakH{M3?Hs0gnmnX9B0ljx1PlY3_5%v?aO&Q@`vU-0EwDYK zcyVz}2@MVxd=saM7ci($u!Ir1XgzeQtV+@i20b=ejNK|3M-@z1iKIC-!_X!GXNWZY zc^JARB@{y#U>qk=Tpjj}2oB>=AWktT(t=Am*csw5syenl>diiSZos&dz z`os2%Dih?fopjgfVNo8Kr}GH0i(@hmD2bD*u0eoO!exxqGQGKc_3qOLqs~OLSje=H zQ-2EDQ3wRb$OR-xge2d#t@XG{>*)+SHG5+O6HmLY@06!Fihy8&5=3CeSe+H1s-O4w zWIT>s6r1eYOyJNA!!*@Jw#h5P)9mU3inwn_^6+Kv3F;22-oU($cF)}&`H~_|xrp;q zr_f5RN~&g86-8B+Z{My{{$~g9mnJdv3?O)QQ6Yo_q~p}^C=w(N=D;#xkv(r4Il!}1@(b?L=qYO;^*HI;V~SmRV+nKaG~wo?*E)bUz($&EM%H4 zk}4Cz$4?myoCTm~_xUu=E}X^=B<%D3)8q3!#DoyRL~1Q0*jsCnK(Nkhs{MXcU)`-P zUJ(QkAc9BZ#X3f08OChAcHg{NUtECWcCF?tu8QE7^#jS25 z$X%9Ym=RJZd{iOO%s9Vqhu$Oma9>D>LO7m=rg^;CP-NjepjS6G<{ytI)Gl8MX@WTI zn}I^vHpjL(sBM>Qq6ian)@oUnad4S0@`%IeRyZ6wC)|ij?_vdcBO)sg3bN{gIkKOKUmKeklPcPHOxH`?%u-tz+u2&iKpez#- zA`+0D?NY1~Bp`EMV8m1Y>G4?C*|%@`@7}rlADM$^7i1z<^B7mRZ0z;lMu7X2EAwksSCg;je^WVLO9sGx|9GBG$&O|#-InLa0cc@8y|#6$K!xo z@$%Kx`w#a)o0Rf8C4z3C(W3&Zss;namfWs4d9nJhKmO^|cB?E35D?Bo@5@|LVpZTV zkY${c!`Ph`qdOl_FgJEFzzk$nB_Fo$u_Jjb&w7r!G0*dWk|dMksOLBeUgdGDth&nM z0`S^Z!SLU=L>+AYP~iFfAY2 z`7*+02}5sU8UM}C)_*xXha|bH#ee^{J574=h#~D6Jk9r5%u06+e?DV|4yew<-;roZNn-z2v q;oz|3xm%Y1`{7w4nnYYL>i+@ca+`iQmbdx<0000v(|{g&>Y>FL=p0}Mc72MCbhPC_y%MluyEvSgPlsgy5CrE3lZ(<1lWd}V%O{MUDXYaPD+l~+OAUO0+Oync1gW_k6f zQ9L+ys}{7A=z%}I;*{rslt$aW_euGnz8y>oj9}@^6uizcr<{{V05CC2|Eps*;>2f!ml-|mc^QqLHDrR z+8rJrkelg5x|~oho7shaJRAfQlZ9e#t+GlkdpR`s?>%^uSmC*1JOh(iIbKYyESGz> zU#l}r&aADaiU+E6uia$Axiq;FS=emu zj|kHfGp%m-#8kFXZ-m3a*1>V}up33T*&S5-gI0G?nV1-ck(_(~N1mU;%QxQEJTn^f|NKwi{^qy7)oQnU{eF48EL~}gHJUi% z9BpE4^~J{bzxUnR)&_UIlZz_}N<<4$I6XeOySr9D*xw6}o6K`(S5Jw~ zVPksv;>Gi88Q+WJVY}Vxg`>`(r&NTF#DMxYycKg#`*I(9<2`(JQSU$*%<;GgI zdN_y}7yYp3aX=;qqmiG>hr^B~I;hn}qWbN_mlh&ndnSnnsckX1fxhNVW){c@`SiQZwD?x~qpUVc59s`ve9`&T-CEB8swpJr>F@NR? zp>yi=`IWiKXSY7S{qcWKmy2FHGrn~C{Iwf}iOF8?*u?Qq|Mk83`I*(XzWUz3{D=`< zymDT+-cG$%E>}v$3BiHuL3e-8;nvILVnc$uN+p?0fPeY-e^(lx41&BbL}!2R{rBE; zjNwQl3nD*u?$uWpmKR8yqvqj_Yp;I#%U?dYe=ilJ`eE;_ufE}CgqKNk;ErJ28W)kf zj{1=46F&8@!gL<+ITpY4f1cl^(UT`?|l88 zYp>mS_~gMq{o@bKE3f?VwKsRSw|91`hDq*v?Ih9?%B3-1IIc{AL~D)k!H3W=G8}Nf zx;f|_CupoSf~>Th8+)`{-&eJaOg;SK(bwPkMrC65&fPEK7(e`v-{jHX{r%JZ`b$@& zf=s@vMmp^Tx$N~T>y88h3j{*z5Q!{+l<6SIC~fkEyyuEmGP}4_nUV<`~2qXmyIz*z#J|FmqI!y z`Pg{5kS{LG%$#06C8ZZxlaMuuDh0voul-Rro85Tv<3Dp_zH$8{X$x}M{>uH$-+%;z%2Vs>?P`Rdgxlao`4Qn6Bk zb2%DSAAFHb%b)$?=Nns9LL9}3=NEz?C(&@yoHSSyhNC!&9mhF7?nWamg~J$QsZ`4I zTq&jCm~#30l?%%YGvz|InDtS|E~9cTla+9|y)l+?%cWv8>V-%3aPN8j@!jLbp5uC4 zN&+Mx07(03DMh<=^m4m_$cU^6!_ZnwM3$6bxN>R<8HIz618O|)ds0YHU_p=c@BaUP zRjlGL{NTqwxSjKH*wW#!8zxc;#xPEFT426VP>d7c<0sG8&Mg2V7DZtkjpAse6K$x9jwg{taWaTTL#65mjiYA6lTKrQv$gwpqEfj2*6VKIj-ruLMrq^t>Fik1 z_tQ=`^=xNrFp6Er6Dil##xTY>=R^bmL?qCiJ~935cTW-%TR;ssf+#kskWbIepUipu z!s^2O@@dES+MUj=Pi{p?GK^zS8RAYMo6YC^ooa1wzfl=KBfk96S{q}&)De*}K?a17 zuE&>FSGTw8oyNi0b7!t!zgo!Vv+1CF7Pc6;}`=!V^5FsJ}AfmO{Xth&8=K76mYZuNvc=)XPZ$I-h!TjtzS`9?Q zI5LD7lb1@Ln3?_j!Q=In70>s6b?g7a#JG;j5E}c9D2f;W0T2=a03stIYwch(sx_O% z%J|&!VkhiP&rQuO%d!R?>~ChXtoi+ zaUB3)#!{kml*E=OQA%sW7(Y1b91aExOAClZ0FLWkx^QXG>+d%kh-8x3CW*C{F^c29 zpG}v`MHP(#&%L&OA)9i~o?8B^Z@+u<##g{vVE_%F;L=*+42ckc>h)USr)OuUl~#Z( zj^ml>sa!VL+}xb5l#Et_b7QR~3&@0%D&z~EWU-AhIj>mGoLO70lyXX`BuRuC_7Kqu zo>(LlM3yWy4-S_W7o_KnqKE*D(M-y<3upKC8w$8^9YS!Ww1kWcKOJ1Uv`%Dl<8zn^ zFRz=K`BgU;5CLOM1mh5Yds2N)jpM)2&@ETbf>6 z<&I#9vIZFtayynSrE_BsH#Qem&S;B3L`aChq)=pBH)TN~vrL-DPwyKGSJ&5#H6lrr zwH6VP3k?wV`VSsFnwgy?=|);>i<}`d09K%7u3*e+FbZ^Htpze%u*QCU)bAIw0mX?* zl9cq)X}`K%>-UCn91CJ(tW|N6B%v#K_4!8FZJ#^!#^|`Alp-R|SYlBd!=;FZ-N>4a zCl7%L5Ry1z2(_J;0$J-|A2|!du;1?^pd`BU>3?$0@t=S6{Uk}kFjU$KE*kZ{Zl^ml zeFD)~ON=3B+-OL&0)~g}R=?ZljCrY)Bc(Q4aL$owrL48WkqDrTK_CZ#GjtqRv}@ad zh=@R>2*(0%BIjE&mMjAT3mn1fn3J^za=w!mST5Sm>xb1pMDd#-!xU&%= zZ_bvmUq3_~T9HX2ik5ej?1H%52jU^*jJ zyWQo4Qp$`agmBI|#TWnpFrhRGLXXiufA{yr{o?ZMxbJ(;nCCHMDF8q_8_`7df*_=T z^*KX?8jgLDB?H{ zg1|Y)IS1VILI@#<$2lPc0EaL@2s>buG7xYFiD7=#Y=u=vCr3?g9u{|l1VIFD-o3v# zJMji-mPAg&)#EDhLkMBhGzg)(t{Gzpp{8vyMr1HdIb+s3O3A;=Q)-pcQoFY7Y^_UW z;}8+oC7}V%Flr$L81x4C(m7_-;p8AmQ_(g5ytxsg_dFj!*h^`w8Baze9V7%GXMu%4 z8U+FJsNI*ls%Q~Jw%4X8JJ0hy#eH0Ni25*Otf_arF9G)!>-9JuX6fhH^GBm0CNz!* zaU4Sk+qUi63OzimmDC8KuImUU&f0RnLs-T!JvuoW%x32&*(i`!NK;h_cbs``qqXd_ z2v?5}ZBqcUna}^>g~`>`)yeU(@B5tdAP5*^N?C*;di(b6?T6d>-Q8}tUvD>cZ(NuG zFZ8H)dT}xyh2?IuTsQIT`QhwPHVp!hGfIck-b6Jt(u14);cjt*Fd~F8#waD6Gi%Li zwI*jL$3CSBI*W+t`)n`>0#Ex~ceS#PBVFt_dEV|?c>CD1RCnK%lruCEBdXgr%T#=D}{Mg@siRFCDpM^HDl2pJVd7ee*I z{{QROpS?I6PA2*CBZRiCt6u7ax*(j;l&?Sh`1SOO1Q5XPfIAFld$+p;{4t_>de!$@OqS96*7O(Pzc zySnVx>+*Q*==WSy!z-L*X?m@)<-l*SRt25HK?Xxqd2uFsfAGHyCWDGHoYSN}zTyEr}H?DIEoZ~k#} zbHCZ_$|g%f)fsJ!kj@zB3MrJ3TDMJ~ji*=V#|(&B7X9#U6GWqsc|N6_QDd#vilk{) zmbn!bvGCc8PeKQY13sk(gTYU(&R@T~{ra0fEZUs|P8p|-1^_}R0ECnA7+CbX-~Hz0 zm!G});ro39hUxIdr!O4gr?W985ILNX1Tl{?r?>U)`*&|!BZ{gn^3DRNq2ZjJ9gM&F z^{+m?zLExZQX7OFq0(5&*myilM&ad?vowTdCs#Xpa(eOf$;H#F)6r3*4(7|p(}U^f&n|n6!zhyF zBX#9q5Ing$hrl8Xj5CA~0Dv(qxzD$DwML9yzj`{K?+VW>Oj%1a?$B{G^|Q%`YUgF? zSO58ohrE8Qt1yg)LELopFw4T>0Q&s9+bWmz=fC`q$#`J3Gu8?r0001hFbs)!To5J4 zhgbFPG2(vJ_Wfdga&%^d-I@GB<_PE~2iahlp=h`K^E#Uzq)~LYyx(ed9MOESnfKiw z^)0m!1E+vC8bJUIG{9*L$T&!$?}n497G0Jm6J`8bdm1AeIA;cjGmlf>qg~fhh@W0R zld^mJ=0^;i-4{iXl%j9$@0{$5Fck6Ot}cz}{CLnRvxf?KoJ7M}9LJ1tCxH^0c89$HG<_#e&raH=TFyUw@$z#3(Lak_@p1WSnAtOralTs3Z>t@0 zD5cnFJBXqoB%}cqv7Tvq*)=?jntkhW5{CY0I6xTYn^N>0BNRaFATEkLj-vC6Geq$0 z^mOy4d3E`8dN}@JJ&&m$Vi0oQNjL_`0T){*b_F3C)=H)XFWbTp;|KB7p@Y!`Vfb;i z=GbIWGKk~F@^PP6Nt!UKj?S+E1X@E*0W@~oN+ad>?5yHnk&I+aE82foDzUwrXvA{IwEIOoZbw^AZQ&_*K%sgN;b6nP>? z##)0U$NelR&sgKw)o+_uC# zNf7`C6v8^Aq$DC^9Wc%@^q?fFJGL83aU z7FToLBnLc}`^oW-*V8qPjkU~M5k-;KrF8~8S3Dr&5IK23E*W#SwTQ@plXY!fL7Me!TrY8Y*4T|l0S+(@;yo}OZ`nJ9fqP`$+ttIOSm!$$%a`Za z8-22!-5k5KWpOeAQ50+D86Yyxy@$FgJ@WzQQsD9Qbb4K6&FbiA`Lj>ko){)^w4DwY z^GBzb!C`NS(QI^cw49gitlsVUELyC0<^I9e&|mI#Hs9;3x08iXDilH##U@n3S|(Y< z(Fsq^3&E|?dLC{4>FwF~^F=RP-`WWtKOAihZle-qg)Nnv_a@+-(I^O6T})Q@(}Rth zOfO$W%pOSE9fzWAJPVMlw2U~002IVrMqKOWw$M>oe|J7wBDtFuP0cRz{-<2bGSy)qJHx%CXZbjAf?D3k-~ zOQMk0pC7n`Ua$9T^JWtBEIe7+YuCS6nIBKzZSN-k`n>tKr*g+tkEmyT(z0wlz34~m z?oaa0%{0k)`OY4SRzxZ^#s*w5u*S535W;I#S3e)&r+aQ|-0bXkUY?|y?x~4fy-QYAdC7RKujue89pGkUVE-Y*|e& zo=-Ogg^}}fL&CO3FD@Qz+24E~4>OrTzmrZVx-cr{JOCyE1q#kx)NHUR&E|SD`Jju6 z>uY1`I;1spTg#A4T-C;~4a4Mz?(Phm6;d;MVwiTE%r{`mEokch!BXlRtU zemrM>!epqDU6lormjpy01PF+`3}SO-8{Oo62vVFh)i*zs@0TTua~{XTT#eM$U@#ny zw-pZqtT1xLxYg1}tMUZ-h z*rGb!=x={4W_F0(RHY(K(lqPDNuFmp^NburQXgP~>NuFN3 zcw&4rqgCt&RnHKm~95nj^&F}e_f3z9d7SrC|V`LBue8T0RqoJ2*DVN z#)y-P<(t`me*UY2EFNp&=PsD9S7}H8^^*Zl;vnLF!)FtEb4*9?q1TC{f%@u|`T8uW zAdmAX?qD48C>-!0P$(5DPs{@d5fCx4u2zND1RGsnck&yJA6&WvvDS}ojc!C5LF#f= z9#8+{`RwohXZq@m{cI=v;#WKCrYKmVa*T3FQlSDBMTwLW7=q_SB$Z+eIDp`i>c;iD z-L_qGteQ5xtZx;jFK6bq|2o&xS-t$~NBW09&t6u!i{;zZ`eeZ%OrjXO-6TuXAV~gS zC&m~f?};%Y$2l?vB1w`~>$YuQJ$<%WtgOR&UjOXw{Y0$T=Gm>Rdf%k~^ylK}B4t6B zN4DhMZ;mPrX}_Q6SsEv47{pQp0O%Pp2FxR4j4^P|A%KYEc)ea1MZq1eR&&FuM(fq; zMY;;(bmbuY`W3d9{he?Kt-j=&aM;^G7UpRlr%4c{VVb5L-L#GfIWi9b0)P`aB2V56 z=bV&MYrWCSOk~c40>bP0?CBfA-6&hI=gO_^wc6}j4>i&o+EMqaE=fW5uNjntQe2CT&=Ub zGZ^i5yTdnczrUVd6}2W09QcM~q||git(rO-WH)YqlxArZM&6^80dvUV|7NVS%o8v~ hMD(5jfOGUj{|mL7i{HzqJInw8002ovPDHLkV1k)X;n@HH literal 0 HcmV?d00001 diff --git a/examples/basics/Auto-PyTorch Tutorial.ipynb b/examples/basics/Auto-PyTorch Tutorial.ipynb new file mode 100644 index 000000000..b99e98ef4 --- /dev/null +++ b/examples/basics/Auto-PyTorch Tutorial.ipynb @@ -0,0 +1,387 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Introduction\n", + "\n", + "This tutorial introduces the basic Auto-PyTorch API together with the classes for featurized and image data.\n", + "So far, Auto-PyTorch covers classification and regression on featurized data as well as classification on image data.\n", + "For installing Auto-PyTorch, please refer to the github page." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# API\n", + "\n", + "There are classes for featurized tasks (classification, multi-label classification, regression) and image tasks (classification). You can import them via:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from autoPyTorch import (AutoNetClassification,\n", + " AutoNetMultilabel,\n", + " AutoNetRegression,\n", + " AutoNetImageClassification,\n", + " AutoNetImageClassificationMultipleDatasets)" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import openml\n", + "import json" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Upon initialization of a class, you can specify its configuration. The *config_preset* allows to constrain the search space to one of *tiny_cs, medium_cs* or *full_cs*. These presets can be seen in *core/presets/*." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "autonet = AutoNetClassification(config_preset=\"full_cs\", result_logger_dir=\"logs/\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Here are some useful methods provided by the API:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Get the current configuration as dict\n", + "current_configuration = autonet.get_current_autonet_config()\n", + "\n", + "# Get the ConfigSpace object with all hyperparameters, conditions, default values and default ranges\n", + "hyperparameter_search_space = autonet.get_hyperparameter_search_space()\n", + "\n", + "# Print all possible configuration options\n", + "autonet.print_help()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "scrolled": true + }, + "source": [ + "The most important methods for using Auto-PyTorch are **fit**, **refit**, **score** and **predict**.\n", + "\n", + "**fit** is used to search for a configuration:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "import numpy as np\n", + "import openml\n", + "import json\n", + "\n", + "# Get some data from an openml task\n", + "task = openml.tasks.get_task(task_id=32)\n", + "X, y = task.get_X_and_y()\n", + "ind_train, ind_test = task.get_train_test_split_indices()\n", + "X_train, Y_train = X[ind_train], y[ind_train]\n", + "X_test, Y_test = X[ind_test], y[ind_test]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "# Search for a configuration for 300 seconds and with 60-120 s time for fitting\n", + "# (use log_level=\"info\" or log_level=\"debug\" for more detailed output)\n", + "autonet = AutoNetClassification(config_preset=\"full_cs\", result_logger_dir=\"logs/\")\n", + "results_fit = autonet.fit(X_train=X_train,\n", + " Y_train=Y_train,\n", + " validation_split=0.3,\n", + " max_runtime=300,\n", + " min_budget=60,\n", + " max_budget=120)\n", + "\n", + "# Save json\n", + "with open(\"logs/results_fit.json\", \"w\") as file:\n", + " json.dump(results_fit, file)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**refit** allows you to fit a configuration of your choice for a defined time:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "# Create an autonet, use tensorboard during fitting\n", + "autonet_config = {\n", + " \"result_logger_dir\" : \"logs/\",\n", + " \"budget_type\" : \"epochs\",\n", + " \"log_level\" : \"info\", \n", + " \"use_tensorboard_logger\" : True\n", + " }\n", + "autonet = AutoNetClassification(**autonet_config)\n", + "\n", + "# This samples a random hyperparameter configuration as an example\n", + "hyperparameter_config = autonet.get_hyperparameter_search_space().sample_configuration().get_dictionary()\n", + "\n", + "# Refit with sampled hyperparameter config for 10 epochs\n", + "results_refit = autonet.refit(X_train=X_train,\n", + " Y_train=Y_train,\n", + " X_valid=X_test,\n", + " Y_valid=Y_test,\n", + " hyperparameter_config=hyperparameter_config,\n", + " autonet_config=autonet.get_current_autonet_config(),\n", + " budget=10)\n", + "\n", + "# Save json\n", + "with open(\"logs/results_refit.json\", \"w\") as file:\n", + " json.dump(results_refit, file)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**pred** returns the predictions of the incumbent model. **score** can be used to evaluate the model on a test set. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "score = autonet.score(X_test=X_test, Y_test=Y_test)\n", + "pred = autonet.predict(X=X_test)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finall, you can also get the incumbent model as PyTorch Sequential model via" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "pytorch_model = autonet.get_pytorch_model()\n", + "print(pytorch_model)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Featurized Data\n", + "\n", + "All classes for featurized data (*AutoNetClassification*, *AutoNetMultilabel*, *AutoNetRegression*) can be used as in the example above. The only difference is the type of labels they accept." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Image Data\n", + "\n", + "Auto-PyTorch provides two classes for image data. *autonet_image_classification* can be used for classification for images. The *autonet_multi_image_classification* class allows to search for configurations for image classification across multiple datasets. This means Auto-PyTorch will try to choose a configuration that works well on all given datasets." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Load classes\n", + "autonet_image_classification = AutoNetImageClassification(config_preset=\"full_cs\", result_logger_dir=\"logs/\")\n", + "autonet_multi_image_classification = AutoNetImageClassificationMultipleDatasets(config_preset=\"tiny_cs\", result_logger_dir=\"logs/\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For passing your image data to fit, your have two options:\n", + "\n", + "I) Via path to a comma-separated value file, which contains the paths to the images and the image labels (note header is assumed to be None):" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "\n", + "csv_dir = os.path.abspath(\"../../datasets/example.csv\")\n", + "\n", + "X_train = np.array([csv_dir])\n", + "Y_train = np.array([0])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "II) directly passing the paths to the images and the labels" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "\n", + "df = pd.read_csv(csv_dir, header=None)\n", + "X_train = df.values[:,0]\n", + "Y_train = df.values[:,1]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "scrolled": false + }, + "source": [ + "Make sure you specify *image_root_folders* if the paths to the images are not specified from your current working directory. You can also specify *images_shape* to up- or downscale images.\n", + "\n", + "Using the flag *save_checkpoints=True* will save checkpoints to a specified directory:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "autonet_image_classification.fit(X_train=X_train,\n", + " Y_train=Y_train,\n", + " images_shape=[3,32,32],\n", + " min_budget=100,\n", + " max_budget=200,\n", + " max_runtime=400,\n", + " save_checkpoints=True,\n", + " images_root_folders=[os.path.abspath(\"../../datasets/example.csv\")])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Auto-PyTorch also supports some common datasets. By passing a comma-separated value file with just one line, e.g. \"CIFAR10, 0\" and specifying *default_dataset_download_dir* it will automatically download the data and use it for searching. Supported datasets are CIFAR10, CIFAR100, SVHN and MNIST." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "path_to_cifar_csv = os.path.abspath(\"../../datasets/CIFAR10.csv\")\n", + "\n", + "autonet_image_classification.fit(X_train=np.array([path_to_cifar_csv]),\n", + " Y_train=np.array([0]),\n", + " min_budget=900,\n", + " max_budget=1200,\n", + " max_runtime=3000,\n", + " default_dataset_download_dir=\"./datasets\",\n", + " images_root_folders=[\"./datasets\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For searching across multiple datasets, pass multiple csv files to the corresponding Auto-PyTorch class. Make sure your specify *images_root_folders* for each of them." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "autonet_multi_image_classification.fit(X_train=np.array([path_to_cifar_csv, csv_dir]),\n", + " Y_train=np.array([0]),\n", + " min_budget=1500,\n", + " max_budget=2000,\n", + " max_runtime=4000,\n", + " default_dataset_download_dir=\"./datasets\",\n", + " images_root_folders=[\"./datasets\", \"./datasets\"],\n", + " log_level=\"info\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/real_data/openml_task.py b/examples/real_data/openml_task.py new file mode 100644 index 000000000..799d9267b --- /dev/null +++ b/examples/real_data/openml_task.py @@ -0,0 +1,30 @@ +import openml +from pprint import pprint +from autoPyTorch import AutoNetClassification +from sklearn.metrics import accuracy_score + + +# get OpenML task by its ID +task = openml.tasks.get_task(task_id=32) +X, y = task.get_X_and_y() +ind_train, ind_test = task.get_train_test_split_indices() + + +# run Auto-PyTorch +autoPyTorch = AutoNetClassification("tiny_cs", # config preset + log_level='info', + max_runtime=300, + min_budget=30, + max_budget=90) + +autoPyTorch.fit(X[ind_train], y[ind_train], validation_split=0.3) + + +# predict +y_pred = autoPyTorch.predict(X[ind_test]) + +print("Accuracy score", accuracy_score(y[ind_test], y_pred)) + + +# print network configuration +pprint(autoPyTorch.fit_result["optimized_hyperparameter_config"]) diff --git a/optional-requirements.txt b/optional-requirements.txt index e9b4bd0af..060175b88 100644 --- a/optional-requirements.txt +++ b/optional-requirements.txt @@ -1,4 +1,4 @@ SimpleITK openml matplotlib -tensorboard_logger \ No newline at end of file +tensorboard_logger diff --git a/requirements.txt b/requirements.txt index 6b46729cb..34c062db2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,4 +9,8 @@ scikit-learn>=0.20.0 imblearn ConfigSpace pynisher -hpbandster \ No newline at end of file +hpbandster +fasteners +torch +torchvision +tensorboard_logger diff --git a/setup.py b/setup.py index b72b0bcec..59587493c 100644 --- a/setup.py +++ b/setup.py @@ -18,7 +18,7 @@ setuptools.setup( name="autoPyTorch", - version="0.0.1", + version="0.0.2", author="AutoML Freiburg", author_email="urbanm@informatik.uni-freiburg.de", description=("Auto-PyTorch searches neural architectures using BO-HB"), From f70ebf6a882436d3bfa257dcfc0e5a421d670f08 Mon Sep 17 00:00:00 2001 From: Lucas Zimmer Date: Tue, 8 Oct 2019 10:10:47 +0200 Subject: [PATCH 02/13] Small bugfixes --- .gitignore | 2 +- autoPyTorch.egg-info/PKG-INFO | 17 -- autoPyTorch.egg-info/SOURCES.txt | 237 ------------------ autoPyTorch.egg-info/dependency_links.txt | 1 - autoPyTorch.egg-info/requires.txt | 16 -- autoPyTorch.egg-info/top_level.txt | 3 - .../components/metrics/standard_metrics.py | 2 +- .../components/networks/image/densenet.py | 2 + .../networks/image/densenet_flexible.py | 2 + .../components/networks/image/mobilenet.py | 2 + .../components/networks/image/resnet.py | 2 + .../components/networks/image/resnet152.py | 2 + autoPyTorch/core/api.py | 7 +- autoPyTorch/pipeline/nodes/metric_selector.py | 3 +- examples/basics/Auto-PyTorch Tutorial.ipynb | 195 +++++++++++++- 15 files changed, 209 insertions(+), 284 deletions(-) delete mode 100644 autoPyTorch.egg-info/PKG-INFO delete mode 100644 autoPyTorch.egg-info/SOURCES.txt delete mode 100644 autoPyTorch.egg-info/dependency_links.txt delete mode 100644 autoPyTorch.egg-info/requires.txt delete mode 100644 autoPyTorch.egg-info/top_level.txt diff --git a/.gitignore b/.gitignore index bddccd816..f0086e3ce 100644 --- a/.gitignore +++ b/.gitignore @@ -28,7 +28,7 @@ jobs.txt # Build *build/ -*autonet.egg-info +*autoPyTorch.egg-info *.simg .DS_Store dist/ diff --git a/autoPyTorch.egg-info/PKG-INFO b/autoPyTorch.egg-info/PKG-INFO deleted file mode 100644 index 4ef8bd634..000000000 --- a/autoPyTorch.egg-info/PKG-INFO +++ /dev/null @@ -1,17 +0,0 @@ -Metadata-Version: 1.2 -Name: autoPyTorch -Version: 0.0.2 -Summary: Auto-PyTorch searches neural architectures using BO-HB -Home-page: UNKNOWN -Author: AutoML Freiburg -Author-email: urbanm@informatik.uni-freiburg.de -License: 3-clause BSD -Description: UNKNOWN -Keywords: machine learning algorithm configuration hyperparameter optimization tuning neural architecture deep learning -Platform: Linux -Classifier: Development Status :: 3 - Alpha -Classifier: Topic :: Utilities -Classifier: Topic :: Scientific/Engineering -Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence -Classifier: License :: OSI Approved :: 3-clause BSD -Requires-Python: >=3 diff --git a/autoPyTorch.egg-info/SOURCES.txt b/autoPyTorch.egg-info/SOURCES.txt deleted file mode 100644 index 54a06ad13..000000000 --- a/autoPyTorch.egg-info/SOURCES.txt +++ /dev/null @@ -1,237 +0,0 @@ -README.md -setup.py -autoPyTorch/__init__.py -autoPyTorch.egg-info/PKG-INFO -autoPyTorch.egg-info/SOURCES.txt -autoPyTorch.egg-info/dependency_links.txt -autoPyTorch.egg-info/requires.txt -autoPyTorch.egg-info/top_level.txt -autoPyTorch/components/__init__.py -autoPyTorch/components/ensembles/__init__.py -autoPyTorch/components/ensembles/abstract_ensemble.py -autoPyTorch/components/ensembles/ensemble_selection.py -autoPyTorch/components/lr_scheduler/__init__.py -autoPyTorch/components/lr_scheduler/lr_schedulers.py -autoPyTorch/components/metrics/__init__.py -autoPyTorch/components/metrics/additional_logs.py -autoPyTorch/components/metrics/balanced_accuracy.py -autoPyTorch/components/metrics/pac_score.py -autoPyTorch/components/metrics/standard_metrics.py -autoPyTorch/components/networks/__init__.py -autoPyTorch/components/networks/activations.py -autoPyTorch/components/networks/base_net.py -autoPyTorch/components/networks/initialization.py -autoPyTorch/components/networks/feature/__init__.py -autoPyTorch/components/networks/feature/embedding.py -autoPyTorch/components/networks/feature/mlpnet.py -autoPyTorch/components/networks/feature/resnet.py -autoPyTorch/components/networks/feature/shapedmlpnet.py -autoPyTorch/components/networks/feature/shapedresnet.py -autoPyTorch/components/networks/image/__init__.py -autoPyTorch/components/networks/image/convnet.py -autoPyTorch/components/networks/image/densenet.py -autoPyTorch/components/networks/image/densenet_flexible.py -autoPyTorch/components/networks/image/mobilenet.py -autoPyTorch/components/networks/image/resnet.py -autoPyTorch/components/networks/image/resnet152.py -autoPyTorch/components/networks/image/darts/__init__.py -autoPyTorch/components/networks/image/darts/darts_worker.py -autoPyTorch/components/networks/image/darts/genotypes.py -autoPyTorch/components/networks/image/darts/model.py -autoPyTorch/components/networks/image/darts/operations.py -autoPyTorch/components/networks/image/darts/utils.py -autoPyTorch/components/networks/image/utils/__init__.py -autoPyTorch/components/networks/image/utils/conv2d_helpers.py -autoPyTorch/components/networks/image/utils/mobilenet_utils.py -autoPyTorch/components/networks/image/utils/shakedrop.py -autoPyTorch/components/networks/image/utils/shakeshakeblock.py -autoPyTorch/components/networks/image/utils/utils.py -autoPyTorch/components/optimizer/__init__.py -autoPyTorch/components/optimizer/optimizer.py -autoPyTorch/components/preprocessing/__init__.py -autoPyTorch/components/preprocessing/loss_weight_strategies.py -autoPyTorch/components/preprocessing/preprocessor_base.py -autoPyTorch/components/preprocessing/resampling_base.py -autoPyTorch/components/preprocessing/feature_preprocessing/__init__.py -autoPyTorch/components/preprocessing/feature_preprocessing/fast_ica.py -autoPyTorch/components/preprocessing/feature_preprocessing/kernel_pca.py -autoPyTorch/components/preprocessing/feature_preprocessing/kitchen_sinks.py -autoPyTorch/components/preprocessing/feature_preprocessing/nystroem.py -autoPyTorch/components/preprocessing/feature_preprocessing/polynomial_features.py -autoPyTorch/components/preprocessing/feature_preprocessing/power_transformer.py -autoPyTorch/components/preprocessing/feature_preprocessing/truncated_svd.py -autoPyTorch/components/preprocessing/image_preprocessing/__init__.py -autoPyTorch/components/preprocessing/image_preprocessing/archive.py -autoPyTorch/components/preprocessing/image_preprocessing/augmentation_transforms.py -autoPyTorch/components/preprocessing/image_preprocessing/operations.py -autoPyTorch/components/preprocessing/image_preprocessing/transforms.py -autoPyTorch/components/preprocessing/resampling/__init__.py -autoPyTorch/components/preprocessing/resampling/random.py -autoPyTorch/components/preprocessing/resampling/smote.py -autoPyTorch/components/preprocessing/resampling/target_size_strategies.py -autoPyTorch/components/regularization/__init__.py -autoPyTorch/components/regularization/mixup.py -autoPyTorch/components/regularization/shake.py -autoPyTorch/components/training/__init__.py -autoPyTorch/components/training/base_training.py -autoPyTorch/components/training/budget_types.py -autoPyTorch/components/training/early_stopping.py -autoPyTorch/components/training/lr_scheduling.py -autoPyTorch/components/training/trainer.py -autoPyTorch/components/training/image/__init__.py -autoPyTorch/components/training/image/base_training.py -autoPyTorch/components/training/image/budget_types.py -autoPyTorch/components/training/image/early_stopping.py -autoPyTorch/components/training/image/lr_scheduling.py -autoPyTorch/components/training/image/mixup.py -autoPyTorch/components/training/image/trainer.py -autoPyTorch/components/training/image/checkpoints/__init__.py -autoPyTorch/components/training/image/checkpoints/load_specific.py -autoPyTorch/components/training/image/checkpoints/save_load.py -autoPyTorch/core/__init__.py -autoPyTorch/core/api.py -autoPyTorch/core/ensemble.py -autoPyTorch/core/worker.py -autoPyTorch/core/worker_no_timelimit.py -autoPyTorch/core/autonet_classes/__init__.py -autoPyTorch/core/autonet_classes/autonet_feature_classification.py -autoPyTorch/core/autonet_classes/autonet_feature_data.py -autoPyTorch/core/autonet_classes/autonet_feature_multilabel.py -autoPyTorch/core/autonet_classes/autonet_feature_regression.py -autoPyTorch/core/autonet_classes/autonet_image_classification.py -autoPyTorch/core/autonet_classes/autonet_image_classification_multiple_datasets.py -autoPyTorch/core/autonet_classes/autonet_image_data.py -autoPyTorch/core/hpbandster_extensions/__init__.py -autoPyTorch/core/hpbandster_extensions/bohb_ext.py -autoPyTorch/core/hpbandster_extensions/bohb_multi_kde_ext.py -autoPyTorch/core/hpbandster_extensions/hyperband_ext.py -autoPyTorch/core/hpbandster_extensions/run_with_time.py -autoPyTorch/core/presets/__init__.py -autoPyTorch/core/presets/tiny_cs_updates.txt -autoPyTorch/core/presets/feature_classification/__init__.py -autoPyTorch/core/presets/feature_classification/full_cs.txt -autoPyTorch/core/presets/feature_classification/medium_cs.txt -autoPyTorch/core/presets/feature_classification/tiny_cs.txt -autoPyTorch/core/presets/feature_multilabel/__init__.py -autoPyTorch/core/presets/feature_multilabel/full_cs.txt -autoPyTorch/core/presets/feature_multilabel/medium_cs.txt -autoPyTorch/core/presets/feature_multilabel/tiny_cs.txt -autoPyTorch/core/presets/feature_regression/__init__.py -autoPyTorch/core/presets/feature_regression/full_cs.txt -autoPyTorch/core/presets/feature_regression/medium_cs.txt -autoPyTorch/core/presets/feature_regression/tiny_cs.txt -autoPyTorch/core/presets/image_classification/__init__.py -autoPyTorch/core/presets/image_classification/full_cs.txt -autoPyTorch/core/presets/image_classification/medium_cs.txt -autoPyTorch/core/presets/image_classification/tiny_cs.txt -autoPyTorch/core/presets/image_classification_multiple_datasets/__init__.py -autoPyTorch/core/presets/image_classification_multiple_datasets/full_cs.txt -autoPyTorch/core/presets/image_classification_multiple_datasets/medium_cs.txt -autoPyTorch/core/presets/image_classification_multiple_datasets/tiny_cs.txt -autoPyTorch/data_management/__init__.py -autoPyTorch/data_management/data_converter.py -autoPyTorch/data_management/data_loader.py -autoPyTorch/data_management/data_manager.py -autoPyTorch/data_management/data_reader.py -autoPyTorch/data_management/image_loader.py -autoPyTorch/pipeline/__init__.py -autoPyTorch/pipeline/base/__init__.py -autoPyTorch/pipeline/base/node.py -autoPyTorch/pipeline/base/pipeline.py -autoPyTorch/pipeline/base/pipeline_node.py -autoPyTorch/pipeline/base/sub_pipeline_node.py -autoPyTorch/pipeline/nodes/__init__.py -autoPyTorch/pipeline/nodes/autonet_settings.py -autoPyTorch/pipeline/nodes/create_dataloader.py -autoPyTorch/pipeline/nodes/create_dataset_info.py -autoPyTorch/pipeline/nodes/cross_validation.py -autoPyTorch/pipeline/nodes/embedding_selector.py -autoPyTorch/pipeline/nodes/ensemble.py -autoPyTorch/pipeline/nodes/imputation.py -autoPyTorch/pipeline/nodes/initialization_selector.py -autoPyTorch/pipeline/nodes/log_functions_selector.py -autoPyTorch/pipeline/nodes/loss_module_selector.py -autoPyTorch/pipeline/nodes/lr_scheduler_selector.py -autoPyTorch/pipeline/nodes/metric_selector.py -autoPyTorch/pipeline/nodes/network_selector.py -autoPyTorch/pipeline/nodes/normalization_strategy_selector.py -autoPyTorch/pipeline/nodes/one_hot_encoding.py -autoPyTorch/pipeline/nodes/optimization_algorithm.py -autoPyTorch/pipeline/nodes/optimizer_selector.py -autoPyTorch/pipeline/nodes/preprocessor_selector.py -autoPyTorch/pipeline/nodes/resampling_strategy_selector.py -autoPyTorch/pipeline/nodes/train_node.py -autoPyTorch/pipeline/nodes/image/__init__.py -autoPyTorch/pipeline/nodes/image/autonet_settings_no_shuffle.py -autoPyTorch/pipeline/nodes/image/create_dataset_info.py -autoPyTorch/pipeline/nodes/image/create_image_dataloader.py -autoPyTorch/pipeline/nodes/image/cross_validation_indices.py -autoPyTorch/pipeline/nodes/image/image_augmentation.py -autoPyTorch/pipeline/nodes/image/image_dataset_reader.py -autoPyTorch/pipeline/nodes/image/loss_module_selector_indices.py -autoPyTorch/pipeline/nodes/image/multiple_datasets.py -autoPyTorch/pipeline/nodes/image/network_selector_datasetinfo.py -autoPyTorch/pipeline/nodes/image/optimization_algorithm_no_timelimit.py -autoPyTorch/pipeline/nodes/image/simple_scheduler_selector.py -autoPyTorch/pipeline/nodes/image/simple_train_node.py -autoPyTorch/pipeline/nodes/image/single_dataset.py -autoPyTorch/utils/__init__.py -autoPyTorch/utils/config_space_hyperparameter.py -autoPyTorch/utils/configspace_wrapper.py -autoPyTorch/utils/ensemble.py -autoPyTorch/utils/hyperparameter_search_space_update.py -autoPyTorch/utils/loggers.py -autoPyTorch/utils/mem_test_thread.py -autoPyTorch/utils/modify_config_space.py -autoPyTorch/utils/modules.py -autoPyTorch/utils/thread_read_write.py -autoPyTorch/utils/benchmarking/__init__.py -autoPyTorch/utils/benchmarking/benchmark.py -autoPyTorch/utils/benchmarking/benchmark_pipeline/__init__.py -autoPyTorch/utils/benchmarking/benchmark_pipeline/apply_user_updates.py -autoPyTorch/utils/benchmarking/benchmark_pipeline/benchmark_settings.py -autoPyTorch/utils/benchmarking/benchmark_pipeline/create_autonet.py -autoPyTorch/utils/benchmarking/benchmark_pipeline/fit_autonet.py -autoPyTorch/utils/benchmarking/benchmark_pipeline/for_autonet_config.py -autoPyTorch/utils/benchmarking/benchmark_pipeline/for_instance.py -autoPyTorch/utils/benchmarking/benchmark_pipeline/for_run.py -autoPyTorch/utils/benchmarking/benchmark_pipeline/prepare_result_folder.py -autoPyTorch/utils/benchmarking/benchmark_pipeline/read_instance_data.py -autoPyTorch/utils/benchmarking/benchmark_pipeline/save_ensemble_logs.py -autoPyTorch/utils/benchmarking/benchmark_pipeline/save_results.py -autoPyTorch/utils/benchmarking/benchmark_pipeline/set_autonet_config.py -autoPyTorch/utils/benchmarking/benchmark_pipeline/set_ensemble_config.py -autoPyTorch/utils/benchmarking/visualization_pipeline/__init__.py -autoPyTorch/utils/benchmarking/visualization_pipeline/collect_trajectories.py -autoPyTorch/utils/benchmarking/visualization_pipeline/get_additional_trajectories.py -autoPyTorch/utils/benchmarking/visualization_pipeline/get_ensemble_trajectories.py -autoPyTorch/utils/benchmarking/visualization_pipeline/get_run_trajectories.py -autoPyTorch/utils/benchmarking/visualization_pipeline/plot_summary.py -autoPyTorch/utils/benchmarking/visualization_pipeline/plot_trajectories.py -autoPyTorch/utils/benchmarking/visualization_pipeline/read_instance_info.py -autoPyTorch/utils/benchmarking/visualization_pipeline/visualization_settings.py -autoPyTorch/utils/config/__init__.py -autoPyTorch/utils/config/config_condition.py -autoPyTorch/utils/config/config_file_parser.py -autoPyTorch/utils/config/config_option.py -examples/__init__.py -examples/basics/__init__.py -examples/basics/autonet_tutorial.py -examples/basics/classification.py -examples/basics/ensemble.py -examples/basics/modify_pipeline.py -examples/basics/regression.py -test/__init__.py -test/test_pipeline/__init__.py -test/test_pipeline/test_cross_validation.py -test/test_pipeline/test_imputation.py -test/test_pipeline/test_initialization.py -test/test_pipeline/test_log_selector.py -test/test_pipeline/test_loss_selector.py -test/test_pipeline/test_lr_scheduler_selector.py -test/test_pipeline/test_metric_selector.py -test/test_pipeline/test_network_selector.py -test/test_pipeline/test_normalization_strategy_selector.py -test/test_pipeline/test_optimization_algorithm.py -test/test_pipeline/test_optimizer_selector.py -test/test_pipeline/test_resampling_strategy_selector.py \ No newline at end of file diff --git a/autoPyTorch.egg-info/dependency_links.txt b/autoPyTorch.egg-info/dependency_links.txt deleted file mode 100644 index 8b1378917..000000000 --- a/autoPyTorch.egg-info/dependency_links.txt +++ /dev/null @@ -1 +0,0 @@ - diff --git a/autoPyTorch.egg-info/requires.txt b/autoPyTorch.egg-info/requires.txt deleted file mode 100644 index 34c062db2..000000000 --- a/autoPyTorch.egg-info/requires.txt +++ /dev/null @@ -1,16 +0,0 @@ -setuptools -Cython -netifaces -numpy -pandas -scipy -statsmodels -scikit-learn>=0.20.0 -imblearn -ConfigSpace -pynisher -hpbandster -fasteners -torch -torchvision -tensorboard_logger diff --git a/autoPyTorch.egg-info/top_level.txt b/autoPyTorch.egg-info/top_level.txt deleted file mode 100644 index d7d64695f..000000000 --- a/autoPyTorch.egg-info/top_level.txt +++ /dev/null @@ -1,3 +0,0 @@ -autoPyTorch -examples -test diff --git a/autoPyTorch/components/metrics/standard_metrics.py b/autoPyTorch/components/metrics/standard_metrics.py index f711c24a1..d9ed2b7a9 100644 --- a/autoPyTorch/components/metrics/standard_metrics.py +++ b/autoPyTorch/components/metrics/standard_metrics.py @@ -3,7 +3,7 @@ # classification metrics def accuracy(y_true, y_pred): - return np.mean(y_true == y_pred) + return np.mean(y_true == y_pred) * 100 def auc_metric(y_true, y_pred): return (2 * metrics.roc_auc_score(y_true, y_pred) - 1) diff --git a/autoPyTorch/components/networks/image/densenet.py b/autoPyTorch/components/networks/image/densenet.py index f0c406cb3..2931be7c0 100644 --- a/autoPyTorch/components/networks/image/densenet.py +++ b/autoPyTorch/components/networks/image/densenet.py @@ -121,6 +121,8 @@ def __init__(self, config, in_features, out_features, final_activation, *args, * elif isinstance(m, nn.Linear): nn.init.constant_(m.bias, 0) + self.layers = nn.Sequential(self.features) + def forward(self, x): features = self.features(x) out = F.relu(features, inplace=True) diff --git a/autoPyTorch/components/networks/image/densenet_flexible.py b/autoPyTorch/components/networks/image/densenet_flexible.py index 6f8f13969..ab766aac5 100644 --- a/autoPyTorch/components/networks/image/densenet_flexible.py +++ b/autoPyTorch/components/networks/image/densenet_flexible.py @@ -155,6 +155,8 @@ def __init__(self, config, in_features, out_features, final_activation, *args, * self.matrix_init(m.bias, config['linear_bias_init']) # logger.debug(print(self)) + + self.layers = nn.Sequential(self.features) def matrix_init(self, matrix, init_type): if init_type == 'kaiming_normal': diff --git a/autoPyTorch/components/networks/image/mobilenet.py b/autoPyTorch/components/networks/image/mobilenet.py index a2190b1a3..7d5a888de 100644 --- a/autoPyTorch/components/networks/image/mobilenet.py +++ b/autoPyTorch/components/networks/image/mobilenet.py @@ -194,6 +194,8 @@ def _cfg(url='', **kwargs): self.model.default_cfg = _cfg(url='', input_size=in_features, pool_size=(10, 10), crop_pct=0.904, num_classes=out_features) + self.layers = nn.Sequential(self.model.forward_features) + def forward(self, x): # make sure channels first x = self.model(x) diff --git a/autoPyTorch/components/networks/image/resnet.py b/autoPyTorch/components/networks/image/resnet.py index 27981911b..c709f7a3d 100644 --- a/autoPyTorch/components/networks/image/resnet.py +++ b/autoPyTorch/components/networks/image/resnet.py @@ -242,6 +242,8 @@ def __init__(self, config, in_features, out_features, final_activation, **kwargs self.apply(initialize_weights) + self.layers = nn.Sequential(self.model) + def forward(self, x): x = self.model(x) x = x.view(-1, self.feature_maps_out) diff --git a/autoPyTorch/components/networks/image/resnet152.py b/autoPyTorch/components/networks/image/resnet152.py index 68748ad75..22dc6b476 100644 --- a/autoPyTorch/components/networks/image/resnet152.py +++ b/autoPyTorch/components/networks/image/resnet152.py @@ -131,6 +131,8 @@ def __init__(self, block, layers, num_classes=1000, zero_init_residual=False): elif isinstance(m, BasicBlock): nn.init.constant_(m.bn2.weight, 0) + self.layers = nn.Sequential(self.conv1, self.bn1, self.relu, self.maxpool, self.layer1, self.layer2, self.layer3, self.layer4, self.avgpool) + def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: diff --git a/autoPyTorch/core/api.py b/autoPyTorch/core/api.py index 92c8d1da0..0b1e55bdc 100644 --- a/autoPyTorch/core/api.py +++ b/autoPyTorch/core/api.py @@ -9,6 +9,7 @@ import torch.nn as nn import copy import os +import json from autoPyTorch.pipeline.base.pipeline import Pipeline @@ -265,7 +266,7 @@ def score(self, X_test, Y_test, return_loss_value=False): if return_loss_value: return metric.get_loss_value(Y_pred, Y_test) - return metric(torch.from_numpy(Y_test.astype(np.float32)), torch.from_numpy(Y_pred.astype(np.float32))) + return metric(torch.from_numpy(Y_pred.astype(np.float32)), torch.from_numpy(Y_test.astype(np.float32))) def get_pytorch_model(self): """Returns a pytorch sequential model of the current incumbent configuration @@ -281,7 +282,7 @@ def get_pytorch_model(self): return self.pipeline[NetworkSelectorDatasetInfo.get_name()].fit_output["network"].layers def initialize_from_checkpoint(self, hyperparameter_config, checkpoint, in_features, out_features, final_activation=None): - """Returns a pytorch sequential model from a state dict and a hyperparamter config. + """ Arguments: config_file: json with output as from .fit method @@ -322,7 +323,7 @@ def initialize_from_checkpoint(self, hyperparameter_config, checkpoint, in_featu # Add to pipeline self.pipeline[NetworkSelectorDatasetInfo.get_name()].fit_output["network"] = model - return network.layers + return model def check_data_array_types(self, *arrays): result = [] diff --git a/autoPyTorch/pipeline/nodes/metric_selector.py b/autoPyTorch/pipeline/nodes/metric_selector.py index 3935a6d9d..24ff72bb7 100644 --- a/autoPyTorch/pipeline/nodes/metric_selector.py +++ b/autoPyTorch/pipeline/nodes/metric_selector.py @@ -96,8 +96,9 @@ def __call__(self, Y_pred, Y_true): Y_pred = ensure_numpy(Y_pred) Y_true = ensure_numpy(Y_true) - if len(Y_pred.shape) > len(Y_true.shape): + if len(Y_pred.shape) != len(Y_true.shape): Y_pred = undo_ohe(Y_pred) + Y_true = undo_ohe(Y_true) return self.metric(self.ohe_transform(Y_true), self.ohe_transform(Y_pred)) def get_loss_value(self, Y_pred, Y_true): diff --git a/examples/basics/Auto-PyTorch Tutorial.ipynb b/examples/basics/Auto-PyTorch Tutorial.ipynb index b99e98ef4..c35c6f340 100644 --- a/examples/basics/Auto-PyTorch Tutorial.ipynb +++ b/examples/basics/Auto-PyTorch Tutorial.ipynb @@ -53,7 +53,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "metadata": {}, "outputs": [], "source": [ @@ -69,9 +69,196 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Configure AutoNet with the following keyword arguments.\n", + "Pass these arguments to either the constructor or fit().\n", + "\n", + "name default choices type \n", + "===============================================================================================================================================\n", + "additional_logs [] [] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "additional_metrics [] [accuracy, \n", + " auc_metric, \n", + " pac_metric, \n", + " balanced_accuracy] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "algorithm bohb [bohb, \n", + " hyperband] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "batch_loss_computation_techniques [standard, [standard, \n", + " mixup] mixup] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "best_over_epochs False [True, \n", + " False] \n", + "\tinfo: Whether to report the best performance occurred to BOHB\n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "budget_type time [time, \n", + " epochs, \n", + " training_time] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "categorical_features None None \n", + "\tinfo: List of booleans that specifies for each feature whether it is categorical.\n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "cross_validator none dict_keys(['none', 'k_fold', 'stratified \n", + "\tinfo: Class inheriting from sklearn.model_selection.BaseCrossValidator. Ignored if validation data is given.\n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "cross_validator_args {} None \n", + "\tinfo: Args of cross validator. \n", + "\t\tNote that random_state and shuffle are set by pipeline config options random_seed and shuffle, if not specified here.\n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "cuda True [True, \n", + " False] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "dataset_name None None \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "early_stopping_patience inf None \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "early_stopping_reset_parameters False None \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "embeddings [none, [none, \n", + " learned] learned] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "eta 3 None \n", + "\tinfo: eta parameter of Hyperband.\n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "final_activation softmax [softmax] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "full_eval_each_epoch False [True, \n", + " False] \n", + "\tinfo: Whether to evaluate everything every epoch. Results in more useful output\n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "hyperparameter_search_space_updates None None [directory, \n", + " \n", + " median, median, \n", + " most_frequent] most_frequent] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "initialization_methods [default, [default, \n", + " sparse] sparse] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "initializer simple_initializer [simple_initializer] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "log_level warning [debug, \n", + " info, \n", + " warning, \n", + " error, \n", + " critical] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "loss_modules [cross_entropy, [cross_entropy, \n", + " cross_entropy_weighted] cross_entropy_weighted] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "lr_scheduler [cosine_annealing, [cosine_annealing, \n", + " cyclic, cyclic, \n", + " exponential, exponential, \n", + " step, step, \n", + " adapt, adapt, \n", + " plateau, plateau, \n", + " alternating_cosine, alternating_cosine, \n", + " none] none] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "max_budget 6000 None \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "max_runtime 24000 None \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "memory_limit_mb 1000000 None \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "min_budget 120 None \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "min_budget_for_cv 0 None \n", + "\tinfo: Specify minimum budget for cv. If budget is smaller use specified validation split.\n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "min_workers 1 None \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "network_interface_name eth0 None \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "networks [mlpnet, [mlpnet, \n", + " shapedmlpnet, shapedmlpnet, \n", + " resnet, resnet, \n", + " shapedresnet] shapedresnet] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "normalization_strategies [none, [none, \n", + " minmax, minmax, \n", + " standardize, standardize, \n", + " maxabs] maxabs] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "num_iterations inf None \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "optimize_metric accuracy [accuracy, \n", + " auc_metric, \n", + " pac_metric, \n", + " balanced_accuracy] \n", + "\tinfo: This is the meta train metric BOHB will try to optimize.\n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "optimizer [adam, [adam, \n", + " adamw, adamw, \n", + " sgd, sgd, \n", + " rmsprop] rmsprop] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "over_sampling_methods [none, [none, \n", + " random, random, \n", + " smote] smote] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "preprocessors [none, [none, \n", + " truncated_svd, truncated_svd, \n", + " power_transformer, power_transformer, \n", + " fast_ica, fast_ica, \n", + " kitchen_sinks, kitchen_sinks, \n", + " kernel_pca, kernel_pca, \n", + " nystroem] nystroem] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "random_seed 3478936497 None \n", + "\tinfo: Make sure to specify the same seed for all workers.\n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "refit_validation_split 0.0 [0, \n", + " 1] \n", + "\tinfo: In range [0, 1). Part of train dataset used for validation in refit.\n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "result_logger_dir logs/ None directory \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "run_id 0 None \n", + "\tinfo: Unique id for each run.\n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "run_worker_on_master_node True None \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "shuffle True [True, \n", + " False] \n", + "\tinfo: Shuffle train and validation set\n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "target_size_strategies [none, [none, \n", + " upsample, upsample, \n", + " downsample, downsample, \n", + " average, average, \n", + " median] median] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "task_id -1 None \n", + "\tinfo: ID for each worker, if you run AutoNet on a cluster. Set to -1, if you run it locally. \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "torch_num_threads 1 None \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "under_sampling_methods [none, [none, \n", + " random] random] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "use_pynisher True None \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "use_tensorboard_logger False None \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "validation_split 0.3 [0, \n", + " 1] \n", + "\tinfo: In range [0, 1). Part of train dataset used for validation. Ignored in fit if cross validator or valid data given.\n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "working_dir . None directory \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n" + ] + } + ], "source": [ "# Get the current configuration as dict\n", "current_configuration = autonet.get_current_autonet_config()\n", @@ -96,7 +283,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "metadata": { "scrolled": true }, From a54c9b5bc39c6adf74a8e8ad2134f43632c1abbf Mon Sep 17 00:00:00 2001 From: Lucas Zimmer Date: Tue, 8 Oct 2019 10:43:06 +0200 Subject: [PATCH 03/13] Moved logs from "info" to "debug" --- .../hpbandster_extensions/run_with_time.py | 4 ++-- .../optimization_algorithm_no_timelimit.py | 8 +++---- examples/basics/Auto-PyTorch Tutorial.ipynb | 22 +++++++++---------- 3 files changed, 17 insertions(+), 17 deletions(-) diff --git a/autoPyTorch/core/hpbandster_extensions/run_with_time.py b/autoPyTorch/core/hpbandster_extensions/run_with_time.py index 32bd4518a..68e8a1e38 100644 --- a/autoPyTorch/core/hpbandster_extensions/run_with_time.py +++ b/autoPyTorch/core/hpbandster_extensions/run_with_time.py @@ -77,12 +77,12 @@ def run_with_time(self, runtime=1, n_iterations=float("inf"), min_n_workers=1, i n_canceled += 1 break - self.logger.info('HBMASTER: Canceled %i remaining runs'%n_canceled) + self.logger.debug('HBMASTER: Canceled %i remaining runs'%n_canceled) # wait for remaining jobs while self.num_running_jobs > 0: self.thread_cond.wait(60) - self.logger.info('HBMASTER: Job finished: wait for remaining %i jobs'%self.num_running_jobs) + self.logger.debug('HBMASTER: Job finished: wait for remaining %i jobs'%self.num_running_jobs) self.thread_cond.release() diff --git a/autoPyTorch/pipeline/nodes/image/optimization_algorithm_no_timelimit.py b/autoPyTorch/pipeline/nodes/image/optimization_algorithm_no_timelimit.py index fd7c01165..0041ec1bf 100644 --- a/autoPyTorch/pipeline/nodes/image/optimization_algorithm_no_timelimit.py +++ b/autoPyTorch/pipeline/nodes/image/optimization_algorithm_no_timelimit.py @@ -95,8 +95,8 @@ def fit(self, pipeline_config, X_train, Y_train, X_valid, Y_valid, refit=None): # Get permutations self.permutations = self.get_permutations(n_budgets) - self.logger.info('BOHB-ConfigSpace:\n' + str(config_space)) - self.logger.info('Constant Hyperparameter:\n' + str(pprint.pformat(constants))) + self.logger.debug('BOHB-ConfigSpace:\n' + str(config_space)) + self.logger.debug('Constant Hyperparameter:\n' + str(pprint.pformat(constants))) run_id, task_id = pipeline_config['run_id'], pipeline_config['task_id'] @@ -284,7 +284,7 @@ def run_optimization_algorithm(self, pipeline_config, config_space, constant_hyp reduce_runtime = pipeline_config["max_budget"] if pipeline_config["budget_type"] == "time" else 0 HB.wait_for_workers(min_num_workers) - self.logger.info('Workers are ready!') + self.logger.debug('Workers are ready!') thread_read_write.append('runs.log', "{0}: {1} | {2}-{3}\n".format( str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")), @@ -314,7 +314,7 @@ def run_refit(self, pipeline_config, refit, constants, X_train, Y_train, X_valid full_config.update(constants) full_config.update(refit["hyperparameter_config"]) - self.logger.info('Refit-Config:\n' + str(pprint.pformat(full_config))) + self.logger.debug('Refit-Config:\n' + str(pprint.pformat(full_config))) class Job(): pass diff --git a/examples/basics/Auto-PyTorch Tutorial.ipynb b/examples/basics/Auto-PyTorch Tutorial.ipynb index c35c6f340..4ace093e1 100644 --- a/examples/basics/Auto-PyTorch Tutorial.ipynb +++ b/examples/basics/Auto-PyTorch Tutorial.ipynb @@ -94,7 +94,7 @@ "batch_loss_computation_techniques [standard, [standard, \n", " mixup] mixup] \n", "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "best_over_epochs False [True, \n", + "best_over_epochs False [True, \n", " False] \n", "\tinfo: Whether to report the best performance occurred to BOHB\n", "-----------------------------------------------------------------------------------------------------------------------------------------------\n", @@ -102,24 +102,24 @@ " epochs, \n", " training_time] \n", "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "categorical_features None None \n", + "categorical_features None None \n", "\tinfo: List of booleans that specifies for each feature whether it is categorical.\n", "-----------------------------------------------------------------------------------------------------------------------------------------------\n", "cross_validator none dict_keys(['none', 'k_fold', 'stratified \n", "\tinfo: Class inheriting from sklearn.model_selection.BaseCrossValidator. Ignored if validation data is given.\n", "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "cross_validator_args {} None \n", + "cross_validator_args {} None \n", "\tinfo: Args of cross validator. \n", "\t\tNote that random_state and shuffle are set by pipeline config options random_seed and shuffle, if not specified here.\n", "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "cuda True [True, \n", + "cuda True [True, \n", " False] \n", "-----------------------------------------------------------------------------------------------------------------------------------------------\n", "dataset_name None None \n", "-----------------------------------------------------------------------------------------------------------------------------------------------\n", "early_stopping_patience inf None \n", "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "early_stopping_reset_parameters False None \n", + "early_stopping_reset_parameters False None \n", "-----------------------------------------------------------------------------------------------------------------------------------------------\n", "embeddings [none, [none, \n", " learned] learned] \n", @@ -129,7 +129,7 @@ "-----------------------------------------------------------------------------------------------------------------------------------------------\n", "final_activation softmax [softmax] \n", "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "full_eval_each_epoch False [True, \n", + "full_eval_each_epoch False [True, \n", " False] \n", "\tinfo: Whether to evaluate everything every epoch. Results in more useful output\n", "-----------------------------------------------------------------------------------------------------------------------------------------------\n", @@ -214,7 +214,7 @@ " kernel_pca, kernel_pca, \n", " nystroem] nystroem] \n", "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "random_seed 3478936497 None \n", + "random_seed 137405662 None \n", "\tinfo: Make sure to specify the same seed for all workers.\n", "-----------------------------------------------------------------------------------------------------------------------------------------------\n", "refit_validation_split 0.0 [0, \n", @@ -226,9 +226,9 @@ "run_id 0 None \n", "\tinfo: Unique id for each run.\n", "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "run_worker_on_master_node True None \n", + "run_worker_on_master_node True None \n", "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "shuffle True [True, \n", + "shuffle True [True, \n", " False] \n", "\tinfo: Shuffle train and validation set\n", "-----------------------------------------------------------------------------------------------------------------------------------------------\n", @@ -246,9 +246,9 @@ "under_sampling_methods [none, [none, \n", " random] random] \n", "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "use_pynisher True None \n", + "use_pynisher True None \n", "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "use_tensorboard_logger False None \n", + "use_tensorboard_logger False None \n", "-----------------------------------------------------------------------------------------------------------------------------------------------\n", "validation_split 0.3 [0, \n", " 1] \n", From 62412cd53ed315e960838021303415678359991a Mon Sep 17 00:00:00 2001 From: Lucas Zimmer Date: Tue, 8 Oct 2019 14:09:49 +0200 Subject: [PATCH 04/13] Fixing maximum amount of datasets to 10 --- .../nodes/image/optimization_algorithm_no_timelimit.py | 2 +- examples/basics/Auto-PyTorch Tutorial.ipynb | 6 +++--- optional-requirements.txt | 2 -- requirements.txt | 1 + 4 files changed, 5 insertions(+), 6 deletions(-) diff --git a/autoPyTorch/pipeline/nodes/image/optimization_algorithm_no_timelimit.py b/autoPyTorch/pipeline/nodes/image/optimization_algorithm_no_timelimit.py index 0041ec1bf..a563b3f44 100644 --- a/autoPyTorch/pipeline/nodes/image/optimization_algorithm_no_timelimit.py +++ b/autoPyTorch/pipeline/nodes/image/optimization_algorithm_no_timelimit.py @@ -82,7 +82,7 @@ def fit(self, pipeline_config, X_train, Y_train, X_valid, Y_valid, refit=None): config_space, constants = remove_constant_hyperparameter(config_space) config_space.seed(pipeline_config['random_seed']) - self.n_datasets = X_train.shape[0] if X_train.shape[0]<100 else 1 + self.n_datasets = X_train.shape[0] if X_train.shape[0]<10 else 1 #Get number of budgets max_budget = pipeline_config["max_budget"] diff --git a/examples/basics/Auto-PyTorch Tutorial.ipynb b/examples/basics/Auto-PyTorch Tutorial.ipynb index 4ace093e1..90f022b7c 100644 --- a/examples/basics/Auto-PyTorch Tutorial.ipynb +++ b/examples/basics/Auto-PyTorch Tutorial.ipynb @@ -481,7 +481,7 @@ "source": [ "Make sure you specify *image_root_folders* if the paths to the images are not specified from your current working directory. You can also specify *images_shape* to up- or downscale images.\n", "\n", - "Using the flag *save_checkpoints=True* will save checkpoints to a specified directory:" + "Using the flag *save_checkpoints=True* will save checkpoints to the result directory:" ] }, { @@ -499,7 +499,7 @@ " max_budget=200,\n", " max_runtime=400,\n", " save_checkpoints=True,\n", - " images_root_folders=[os.path.abspath(\"../../datasets/example.csv\")])" + " images_root_folders=[os.path.abspath(\"../../datasets/example_images\")])" ] }, { @@ -545,7 +545,7 @@ " max_budget=2000,\n", " max_runtime=4000,\n", " default_dataset_download_dir=\"./datasets\",\n", - " images_root_folders=[\"./datasets\", \"./datasets\"],\n", + " images_root_folders=[\"./datasets\", \"./datasets/example_images\"],\n", " log_level=\"info\")" ] } diff --git a/optional-requirements.txt b/optional-requirements.txt index 060175b88..400f72608 100644 --- a/optional-requirements.txt +++ b/optional-requirements.txt @@ -1,4 +1,2 @@ SimpleITK -openml matplotlib -tensorboard_logger diff --git a/requirements.txt b/requirements.txt index 34c062db2..eaa9db14d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -14,3 +14,4 @@ fasteners torch torchvision tensorboard_logger +openml From a1ea8a4f61b559966b3a380018ea851f759471fb Mon Sep 17 00:00:00 2001 From: Lucas Zimmer Date: Tue, 8 Oct 2019 15:37:13 +0200 Subject: [PATCH 05/13] Clarifications --- examples/basics/Auto-PyTorch Tutorial.ipynb | 230 +++----------------- 1 file changed, 28 insertions(+), 202 deletions(-) diff --git a/examples/basics/Auto-PyTorch Tutorial.ipynb b/examples/basics/Auto-PyTorch Tutorial.ipynb index 90f022b7c..5d3839102 100644 --- a/examples/basics/Auto-PyTorch Tutorial.ipynb +++ b/examples/basics/Auto-PyTorch Tutorial.ipynb @@ -8,7 +8,9 @@ "\n", "This tutorial introduces the basic Auto-PyTorch API together with the classes for featurized and image data.\n", "So far, Auto-PyTorch covers classification and regression on featurized data as well as classification on image data.\n", - "For installing Auto-PyTorch, please refer to the github page." + "For installing Auto-PyTorch, please refer to the github page.\n", + "\n", + "**Note**: In this notebook data will be downloaded from the openml project for featurized tasks and CIFAR10 will be downloaded for image classification. Hence, an internet connection is required." ] }, { @@ -22,7 +24,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -35,7 +37,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -53,7 +55,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -69,196 +71,9 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Configure AutoNet with the following keyword arguments.\n", - "Pass these arguments to either the constructor or fit().\n", - "\n", - "name default choices type \n", - "===============================================================================================================================================\n", - "additional_logs [] [] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "additional_metrics [] [accuracy, \n", - " auc_metric, \n", - " pac_metric, \n", - " balanced_accuracy] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "algorithm bohb [bohb, \n", - " hyperband] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "batch_loss_computation_techniques [standard, [standard, \n", - " mixup] mixup] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "best_over_epochs False [True, \n", - " False] \n", - "\tinfo: Whether to report the best performance occurred to BOHB\n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "budget_type time [time, \n", - " epochs, \n", - " training_time] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "categorical_features None None \n", - "\tinfo: List of booleans that specifies for each feature whether it is categorical.\n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "cross_validator none dict_keys(['none', 'k_fold', 'stratified \n", - "\tinfo: Class inheriting from sklearn.model_selection.BaseCrossValidator. Ignored if validation data is given.\n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "cross_validator_args {} None \n", - "\tinfo: Args of cross validator. \n", - "\t\tNote that random_state and shuffle are set by pipeline config options random_seed and shuffle, if not specified here.\n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "cuda True [True, \n", - " False] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "dataset_name None None \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "early_stopping_patience inf None \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "early_stopping_reset_parameters False None \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "embeddings [none, [none, \n", - " learned] learned] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "eta 3 None \n", - "\tinfo: eta parameter of Hyperband.\n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "final_activation softmax [softmax] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "full_eval_each_epoch False [True, \n", - " False] \n", - "\tinfo: Whether to evaluate everything every epoch. Results in more useful output\n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "hyperparameter_search_space_updates None None [directory, \n", - " \n", - " median, median, \n", - " most_frequent] most_frequent] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "initialization_methods [default, [default, \n", - " sparse] sparse] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "initializer simple_initializer [simple_initializer] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "log_level warning [debug, \n", - " info, \n", - " warning, \n", - " error, \n", - " critical] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "loss_modules [cross_entropy, [cross_entropy, \n", - " cross_entropy_weighted] cross_entropy_weighted] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "lr_scheduler [cosine_annealing, [cosine_annealing, \n", - " cyclic, cyclic, \n", - " exponential, exponential, \n", - " step, step, \n", - " adapt, adapt, \n", - " plateau, plateau, \n", - " alternating_cosine, alternating_cosine, \n", - " none] none] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "max_budget 6000 None \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "max_runtime 24000 None \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "memory_limit_mb 1000000 None \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "min_budget 120 None \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "min_budget_for_cv 0 None \n", - "\tinfo: Specify minimum budget for cv. If budget is smaller use specified validation split.\n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "min_workers 1 None \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "network_interface_name eth0 None \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "networks [mlpnet, [mlpnet, \n", - " shapedmlpnet, shapedmlpnet, \n", - " resnet, resnet, \n", - " shapedresnet] shapedresnet] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "normalization_strategies [none, [none, \n", - " minmax, minmax, \n", - " standardize, standardize, \n", - " maxabs] maxabs] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "num_iterations inf None \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "optimize_metric accuracy [accuracy, \n", - " auc_metric, \n", - " pac_metric, \n", - " balanced_accuracy] \n", - "\tinfo: This is the meta train metric BOHB will try to optimize.\n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "optimizer [adam, [adam, \n", - " adamw, adamw, \n", - " sgd, sgd, \n", - " rmsprop] rmsprop] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "over_sampling_methods [none, [none, \n", - " random, random, \n", - " smote] smote] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "preprocessors [none, [none, \n", - " truncated_svd, truncated_svd, \n", - " power_transformer, power_transformer, \n", - " fast_ica, fast_ica, \n", - " kitchen_sinks, kitchen_sinks, \n", - " kernel_pca, kernel_pca, \n", - " nystroem] nystroem] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "random_seed 137405662 None \n", - "\tinfo: Make sure to specify the same seed for all workers.\n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "refit_validation_split 0.0 [0, \n", - " 1] \n", - "\tinfo: In range [0, 1). Part of train dataset used for validation in refit.\n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "result_logger_dir logs/ None directory \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "run_id 0 None \n", - "\tinfo: Unique id for each run.\n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "run_worker_on_master_node True None \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "shuffle True [True, \n", - " False] \n", - "\tinfo: Shuffle train and validation set\n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "target_size_strategies [none, [none, \n", - " upsample, upsample, \n", - " downsample, downsample, \n", - " average, average, \n", - " median] median] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "task_id -1 None \n", - "\tinfo: ID for each worker, if you run AutoNet on a cluster. Set to -1, if you run it locally. \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "torch_num_threads 1 None \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "under_sampling_methods [none, [none, \n", - " random] random] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "use_pynisher True None \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "use_tensorboard_logger False None \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "validation_split 0.3 [0, \n", - " 1] \n", - "\tinfo: In range [0, 1). Part of train dataset used for validation. Ignored in fit if cross validator or valid data given.\n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "working_dir . None directory \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n" - ] - } - ], + "outputs": [], "source": [ "# Get the current configuration as dict\n", "current_configuration = autonet.get_current_autonet_config()\n", @@ -283,7 +98,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": { "scrolled": true }, @@ -292,15 +107,25 @@ "import numpy as np\n", "import openml\n", "import json\n", + "from sklearn.model_selection import train_test_split\n", "\n", - "# Get some data from an openml task\n", - "task = openml.tasks.get_task(task_id=32)\n", + "# Get data from the openml task \"Supervised Classification on credit-g (https://www.openml.org/t/31)\"\n", + "task = openml.tasks.get_task(task_id=31)\n", "X, y = task.get_X_and_y()\n", "ind_train, ind_test = task.get_train_test_split_indices()\n", "X_train, Y_train = X[ind_train], y[ind_train]\n", "X_test, Y_test = X[ind_test], y[ind_test]" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Search for a configuration for 300 seconds and with 60-120 s time for fitting.\n", + "Use the validation_split parameter to specify a split size. You can also pass your own validation set\n", + "via X_val and Y_val. Use log_level=\"info\" or log_level=\"debug\" for more detailed output." + ] + }, { "cell_type": "code", "execution_count": null, @@ -309,8 +134,6 @@ }, "outputs": [], "source": [ - "# Search for a configuration for 300 seconds and with 60-120 s time for fitting\n", - "# (use log_level=\"info\" or log_level=\"debug\" for more detailed output)\n", "autonet = AutoNetClassification(config_preset=\"full_cs\", result_logger_dir=\"logs/\")\n", "results_fit = autonet.fit(X_train=X_train,\n", " Y_train=Y_train,\n", @@ -351,11 +174,11 @@ "# This samples a random hyperparameter configuration as an example\n", "hyperparameter_config = autonet.get_hyperparameter_search_space().sample_configuration().get_dictionary()\n", "\n", - "# Refit with sampled hyperparameter config for 10 epochs\n", + "# Refit with sampled hyperparameter config for 10 epochs. This time on the full dataset.\n", "results_refit = autonet.refit(X_train=X_train,\n", " Y_train=Y_train,\n", - " X_valid=X_test,\n", - " Y_valid=Y_test,\n", + " X_valid=None,\n", + " Y_valid=None,\n", " hyperparameter_config=hyperparameter_config,\n", " autonet_config=autonet.get_current_autonet_config(),\n", " budget=10)\n", @@ -379,7 +202,10 @@ "outputs": [], "source": [ "score = autonet.score(X_test=X_test, Y_test=Y_test)\n", - "pred = autonet.predict(X=X_test)" + "pred = autonet.predict(X=X_test)\n", + "\n", + "print(\"Model prediction:\", pred)\n", + "print(\"Accuracy score\", score)" ] }, { From c0451c3824bef8c5eac961f41dc0dad58d25d455 Mon Sep 17 00:00:00 2001 From: Lucas Zimmer Date: Tue, 8 Oct 2019 16:58:53 +0200 Subject: [PATCH 06/13] Clarifications in the tutorial --- examples/basics/Auto-PyTorch Tutorial.ipynb | 27 ++++++++++----------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/examples/basics/Auto-PyTorch Tutorial.ipynb b/examples/basics/Auto-PyTorch Tutorial.ipynb index 5d3839102..955945e24 100644 --- a/examples/basics/Auto-PyTorch Tutorial.ipynb +++ b/examples/basics/Auto-PyTorch Tutorial.ipynb @@ -50,7 +50,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Upon initialization of a class, you can specify its configuration. The *config_preset* allows to constrain the search space to one of *tiny_cs, medium_cs* or *full_cs*. These presets can be seen in *core/presets/*." + "Upon initialization of a class, you can specify its configuration. Later, you can override its configuration in each fit call. The *config_preset* allows to constrain the search space to one of *tiny_cs, medium_cs* or *full_cs*. These presets can be seen in *core/presets/*." ] }, { @@ -93,7 +93,7 @@ "source": [ "The most important methods for using Auto-PyTorch are **fit**, **refit**, **score** and **predict**.\n", "\n", - "**fit** is used to search for a configuration:" + "**fit** is used to search for a good configuration by fitting different parameter configurations. The incumbent configuration is then returned and stored in the class:" ] }, { @@ -151,21 +151,19 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "**refit** allows you to fit a configuration of your choice for a defined time:" + "**refit** allows you to fit a configuration of your choice for a defined time. You can specify a hyperparameter configuration to fit (if you do not specify a configuration the incumbent configuration from the last fit call will be used):" ] }, { "cell_type": "code", "execution_count": null, - "metadata": { - "scrolled": true - }, + "metadata": {}, "outputs": [], "source": [ "# Create an autonet, use tensorboard during fitting\n", "autonet_config = {\n", " \"result_logger_dir\" : \"logs/\",\n", - " \"budget_type\" : \"epochs\",\n", + " \"budget_type\" : \"time\",\n", " \"log_level\" : \"info\", \n", " \"use_tensorboard_logger\" : True\n", " }\n", @@ -174,14 +172,14 @@ "# This samples a random hyperparameter configuration as an example\n", "hyperparameter_config = autonet.get_hyperparameter_search_space().sample_configuration().get_dictionary()\n", "\n", - "# Refit with sampled hyperparameter config for 10 epochs. This time on the full dataset.\n", + "# Refit with sampled hyperparameter config for 120 s. This time on the full dataset.\n", "results_refit = autonet.refit(X_train=X_train,\n", " Y_train=Y_train,\n", " X_valid=None,\n", " Y_valid=None,\n", " hyperparameter_config=hyperparameter_config,\n", " autonet_config=autonet.get_current_autonet_config(),\n", - " budget=10)\n", + " budget=120)\n", "\n", "# Save json\n", "with open(\"logs/results_refit.json\", \"w\") as file:\n", @@ -201,10 +199,11 @@ "metadata": {}, "outputs": [], "source": [ + "# Print score of found configuration\n", "score = autonet.score(X_test=X_test, Y_test=Y_test)\n", "pred = autonet.predict(X=X_test)\n", "\n", - "print(\"Model prediction:\", pred)\n", + "print(\"Model prediction:\", pred[0:10])\n", "print(\"Accuracy score\", score)" ] }, @@ -212,7 +211,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Finall, you can also get the incumbent model as PyTorch Sequential model via" + "Finally, you can also get the incumbent model as PyTorch Sequential model via" ] }, { @@ -321,9 +320,9 @@ "autonet_image_classification.fit(X_train=X_train,\n", " Y_train=Y_train,\n", " images_shape=[3,32,32],\n", - " min_budget=100,\n", - " max_budget=200,\n", - " max_runtime=400,\n", + " min_budget=900,\n", + " max_budget=1200,\n", + " max_runtime=3000,\n", " save_checkpoints=True,\n", " images_root_folders=[os.path.abspath(\"../../datasets/example_images\")])" ] From 79bce9ade210b550a0e20eba60a0cbbe8285c487 Mon Sep 17 00:00:00 2001 From: Lucas Zimmer Date: Wed, 9 Oct 2019 08:49:26 +0200 Subject: [PATCH 07/13] Notebook updates --- examples/basics/Auto-PyTorch Tutorial.ipynb | 401 +++++++++++++++++++- 1 file changed, 388 insertions(+), 13 deletions(-) diff --git a/examples/basics/Auto-PyTorch Tutorial.ipynb b/examples/basics/Auto-PyTorch Tutorial.ipynb index 955945e24..affb2f5a9 100644 --- a/examples/basics/Auto-PyTorch Tutorial.ipynb +++ b/examples/basics/Auto-PyTorch Tutorial.ipynb @@ -24,7 +24,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": {}, "outputs": [], "source": [ @@ -37,7 +37,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ @@ -55,7 +55,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "metadata": {}, "outputs": [], "source": [ @@ -71,9 +71,196 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Configure AutoNet with the following keyword arguments.\n", + "Pass these arguments to either the constructor or fit().\n", + "\n", + "name default choices type \n", + "===============================================================================================================================================\n", + "additional_logs [] [] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "additional_metrics [] [accuracy, \n", + " auc_metric, \n", + " pac_metric, \n", + " balanced_accuracy] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "algorithm bohb [bohb, \n", + " hyperband] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "batch_loss_computation_techniques [standard, [standard, \n", + " mixup] mixup] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "best_over_epochs False [True, \n", + " False] \n", + "\tinfo: Whether to report the best performance occurred to BOHB\n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "budget_type time [time, \n", + " epochs, \n", + " training_time] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "categorical_features None None \n", + "\tinfo: List of booleans that specifies for each feature whether it is categorical.\n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "cross_validator none dict_keys(['none', 'k_fold', 'stratified \n", + "\tinfo: Class inheriting from sklearn.model_selection.BaseCrossValidator. Ignored if validation data is given.\n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "cross_validator_args {} None \n", + "\tinfo: Args of cross validator. \n", + "\t\tNote that random_state and shuffle are set by pipeline config options random_seed and shuffle, if not specified here.\n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "cuda True [True, \n", + " False] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "dataset_name None None \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "early_stopping_patience inf None \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "early_stopping_reset_parameters False None \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "embeddings [none, [none, \n", + " learned] learned] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "eta 3 None \n", + "\tinfo: eta parameter of Hyperband.\n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "final_activation softmax [softmax] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "full_eval_each_epoch False [True, \n", + " False] \n", + "\tinfo: Whether to evaluate everything every epoch. Results in more useful output\n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "hyperparameter_search_space_updates None None [directory, \n", + " \n", + " median, median, \n", + " most_frequent] most_frequent] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "initialization_methods [default, [default, \n", + " sparse] sparse] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "initializer simple_initializer [simple_initializer] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "log_level warning [debug, \n", + " info, \n", + " warning, \n", + " error, \n", + " critical] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "loss_modules [cross_entropy, [cross_entropy, \n", + " cross_entropy_weighted] cross_entropy_weighted] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "lr_scheduler [cosine_annealing, [cosine_annealing, \n", + " cyclic, cyclic, \n", + " exponential, exponential, \n", + " step, step, \n", + " adapt, adapt, \n", + " plateau, plateau, \n", + " alternating_cosine, alternating_cosine, \n", + " none] none] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "max_budget 6000 None \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "max_runtime 24000 None \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "memory_limit_mb 1000000 None \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "min_budget 120 None \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "min_budget_for_cv 0 None \n", + "\tinfo: Specify minimum budget for cv. If budget is smaller use specified validation split.\n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "min_workers 1 None \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "network_interface_name eth0 None \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "networks [mlpnet, [mlpnet, \n", + " shapedmlpnet, shapedmlpnet, \n", + " resnet, resnet, \n", + " shapedresnet] shapedresnet] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "normalization_strategies [none, [none, \n", + " minmax, minmax, \n", + " standardize, standardize, \n", + " maxabs] maxabs] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "num_iterations inf None \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "optimize_metric accuracy [accuracy, \n", + " auc_metric, \n", + " pac_metric, \n", + " balanced_accuracy] \n", + "\tinfo: This is the meta train metric BOHB will try to optimize.\n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "optimizer [adam, [adam, \n", + " adamw, adamw, \n", + " sgd, sgd, \n", + " rmsprop] rmsprop] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "over_sampling_methods [none, [none, \n", + " random, random, \n", + " smote] smote] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "preprocessors [none, [none, \n", + " truncated_svd, truncated_svd, \n", + " power_transformer, power_transformer, \n", + " fast_ica, fast_ica, \n", + " kitchen_sinks, kitchen_sinks, \n", + " kernel_pca, kernel_pca, \n", + " nystroem] nystroem] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "random_seed 3405296868 None \n", + "\tinfo: Make sure to specify the same seed for all workers.\n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "refit_validation_split 0.0 [0, \n", + " 1] \n", + "\tinfo: In range [0, 1). Part of train dataset used for validation in refit.\n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "result_logger_dir logs/ None directory \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "run_id 0 None \n", + "\tinfo: Unique id for each run.\n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "run_worker_on_master_node True None \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "shuffle True [True, \n", + " False] \n", + "\tinfo: Shuffle train and validation set\n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "target_size_strategies [none, [none, \n", + " upsample, upsample, \n", + " downsample, downsample, \n", + " average, average, \n", + " median] median] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "task_id -1 None \n", + "\tinfo: ID for each worker, if you run AutoNet on a cluster. Set to -1, if you run it locally. \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "torch_num_threads 1 None \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "under_sampling_methods [none, [none, \n", + " random] random] \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "use_pynisher True None \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "use_tensorboard_logger False None \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "validation_split 0.3 [0, \n", + " 1] \n", + "\tinfo: In range [0, 1). Part of train dataset used for validation. Ignored in fit if cross validator or valid data given.\n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n", + "working_dir . None directory \n", + "-----------------------------------------------------------------------------------------------------------------------------------------------\n" + ] + } + ], "source": [ "# Get the current configuration as dict\n", "current_configuration = autonet.get_current_autonet_config()\n", @@ -98,7 +285,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "metadata": { "scrolled": true }, @@ -128,11 +315,28 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 6, "metadata": { "scrolled": true }, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/zimmerl/anaconda3/lib/python3.7/site-packages/sklearn/kernel_approximation.py:561: UserWarning: n_components > n_samples. This is not possible.\n", + "n_components was set to n_samples, which results in inefficient evaluation of the full kernel.\n", + " warnings.warn(\"n_components > n_samples. This is not possible.\\n\"\n", + "/home/zimmerl/anaconda3/lib/python3.7/site-packages/numpy/core/_methods.py:121: RuntimeWarning: overflow encountered in multiply\n", + " x = um.multiply(x, x, out=x)\n", + "/home/zimmerl/anaconda3/lib/python3.7/site-packages/sklearn/preprocessing/data.py:2798: RuntimeWarning: divide by zero encountered in log\n", + " loglike = -n_samples / 2 * np.log(x_trans.var())\n", + "/home/zimmerl/anaconda3/lib/python3.7/site-packages/sklearn/kernel_approximation.py:561: UserWarning: n_components > n_samples. This is not possible.\n", + "n_components was set to n_samples, which results in inefficient evaluation of the full kernel.\n", + " warnings.warn(\"n_components > n_samples. This is not possible.\\n\"\n" + ] + } + ], "source": [ "autonet = AutoNetClassification(config_preset=\"full_cs\", result_logger_dir=\"logs/\")\n", "results_fit = autonet.fit(X_train=X_train,\n", @@ -156,9 +360,27 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 7, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[INFO] [17:05:13:autonet] Start autonet with config:\n", + "{'embeddings': ['none'], 'lr_scheduler': ['cosine_annealing', 'plateau'], 'networks': ['shapedresnet'], 'over_sampling_methods': ['smote'], 'preprocessors': ['none', 'truncated_svd', 'power_transformer'], 'target_size_strategies': ['none', 'upsample', 'median'], 'result_logger_dir': 'logs/', 'budget_type': 'time', 'log_level': 'info', 'use_tensorboard_logger': True, 'hyperparameter_search_space_updates': None, 'categorical_features': None, 'dataset_name': None, 'run_id': '0', 'task_id': -1, 'algorithm': 'bohb', 'eta': 3, 'min_workers': 1, 'working_dir': '.', 'network_interface_name': 'eth0', 'memory_limit_mb': 1000000, 'run_worker_on_master_node': True, 'use_pynisher': True, 'validation_split': 0.3, 'refit_validation_split': 0.0, 'cross_validator': 'none', 'cross_validator_args': {}, 'min_budget_for_cv': 0, 'shuffle': True, 'imputation_strategies': ['mean', 'median', 'most_frequent'], 'normalization_strategies': ['none', 'minmax', 'standardize', 'maxabs'], 'under_sampling_methods': ['none', 'random'], 'final_activation': 'softmax', 'initialization_methods': ['default', 'sparse'], 'initializer': 'simple_initializer', 'optimizer': ['adam', 'adamw', 'sgd', 'rmsprop'], 'additional_logs': [], 'optimize_metric': 'accuracy', 'additional_metrics': [], 'loss_modules': ['cross_entropy', 'cross_entropy_weighted'], 'batch_loss_computation_techniques': ['standard', 'mixup'], 'cuda': True, 'torch_num_threads': 1, 'full_eval_each_epoch': False, 'best_over_epochs': False, 'early_stopping_patience': inf, 'early_stopping_reset_parameters': False, 'random_seed': 3405296868, 'min_budget': 120, 'max_budget': 6000, 'max_runtime': 24000, 'num_iterations': inf, 'cv_splits': 1, 'increase_number_of_trained_datasets': False}\n", + "[INFO] [17:05:13:autonet] Start Refitting\n", + "[INFO] [17:05:13:autonet] [AutoNet] No validation set given and either no cross validator given or budget too low for CV. Continue by splitting 0 of training data.\n", + "[INFO] [17:05:13:autonet] [AutoNet] CV split 0 of 1\n", + "[INFO] [17:05:13:autonet] Reduced initial budget 119.93970322608948 to cv budget 119.93916440010071 compensate for 0.0005388259887695312\n", + "[INFO] [17:07:03:autonet] Finished train with budget 119.93916440010071: Preprocessing took 0s, Training took 109s, Wrap up took 0s. Total time consumption in s: 109\n", + "[INFO] [17:07:03:autonet] [AutoNet] Done with current split!\n", + "[INFO] [17:07:03:autonet] Aggregate the results across the splits\n", + "[INFO] [17:07:03:autonet] Process 1 additional result(s)\n", + "[INFO] [17:07:03:autonet] Done Refitting\n" + ] + } + ], "source": [ "# Create an autonet, use tensorboard during fitting\n", "autonet_config = {\n", @@ -195,9 +417,27 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 8, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Model prediction: [[1.]\n", + " [1.]\n", + " [1.]\n", + " [1.]\n", + " [1.]\n", + " [1.]\n", + " [1.]\n", + " [1.]\n", + " [1.]\n", + " [1.]]\n", + "Accuracy score 30.0\n" + ] + } + ], "source": [ "# Print score of found configuration\n", "score = autonet.score(X_test=X_test, Y_test=Y_test)\n", @@ -220,7 +460,142 @@ "metadata": { "scrolled": true }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Sequential(\n", + " (0): Linear(in_features=19, out_features=27, bias=True)\n", + " (1): Sequential(\n", + " (0): ResBlock(\n", + " (layers): Sequential(\n", + " (0): BatchNorm1d(27, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): Sigmoid()\n", + " (2): Linear(in_features=27, out_features=27, bias=True)\n", + " (3): BatchNorm1d(27, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): Sigmoid()\n", + " (5): Linear(in_features=27, out_features=27, bias=True)\n", + " )\n", + " (shake_shake_layers): Sequential(\n", + " (0): BatchNorm1d(27, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): Sigmoid()\n", + " (2): Linear(in_features=27, out_features=27, bias=True)\n", + " (3): BatchNorm1d(27, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): Sigmoid()\n", + " (5): Linear(in_features=27, out_features=27, bias=True)\n", + " )\n", + " )\n", + " )\n", + " (2): Sequential(\n", + " (0): ResBlock(\n", + " (layers): Sequential(\n", + " (0): BatchNorm1d(27, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): Sigmoid()\n", + " (2): Linear(in_features=27, out_features=27, bias=True)\n", + " (3): BatchNorm1d(27, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): Sigmoid()\n", + " (5): Linear(in_features=27, out_features=27, bias=True)\n", + " )\n", + " (shake_shake_layers): Sequential(\n", + " (0): BatchNorm1d(27, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): Sigmoid()\n", + " (2): Linear(in_features=27, out_features=27, bias=True)\n", + " (3): BatchNorm1d(27, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): Sigmoid()\n", + " (5): Linear(in_features=27, out_features=27, bias=True)\n", + " )\n", + " )\n", + " )\n", + " (3): Sequential(\n", + " (0): ResBlock(\n", + " (layers): Sequential(\n", + " (0): BatchNorm1d(27, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): Sigmoid()\n", + " (2): Linear(in_features=27, out_features=27, bias=True)\n", + " (3): BatchNorm1d(27, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): Sigmoid()\n", + " (5): Linear(in_features=27, out_features=27, bias=True)\n", + " )\n", + " (shake_shake_layers): Sequential(\n", + " (0): BatchNorm1d(27, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): Sigmoid()\n", + " (2): Linear(in_features=27, out_features=27, bias=True)\n", + " (3): BatchNorm1d(27, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): Sigmoid()\n", + " (5): Linear(in_features=27, out_features=27, bias=True)\n", + " )\n", + " )\n", + " )\n", + " (4): Sequential(\n", + " (0): ResBlock(\n", + " (layers): Sequential(\n", + " (0): BatchNorm1d(27, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): Sigmoid()\n", + " (2): Linear(in_features=27, out_features=27, bias=True)\n", + " (3): BatchNorm1d(27, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): Sigmoid()\n", + " (5): Linear(in_features=27, out_features=27, bias=True)\n", + " )\n", + " (shake_shake_layers): Sequential(\n", + " (0): BatchNorm1d(27, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): Sigmoid()\n", + " (2): Linear(in_features=27, out_features=27, bias=True)\n", + " (3): BatchNorm1d(27, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): Sigmoid()\n", + " (5): Linear(in_features=27, out_features=27, bias=True)\n", + " )\n", + " )\n", + " )\n", + " (5): Sequential(\n", + " (0): ResBlock(\n", + " (shortcut): Linear(in_features=27, out_features=19, bias=True)\n", + " (start_norm): Sequential(\n", + " (0): BatchNorm1d(27, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): Sigmoid()\n", + " )\n", + " (layers): Sequential(\n", + " (0): Linear(in_features=27, out_features=19, bias=True)\n", + " (1): BatchNorm1d(19, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Sigmoid()\n", + " (3): Linear(in_features=19, out_features=19, bias=True)\n", + " )\n", + " (shake_shake_layers): Sequential(\n", + " (0): Linear(in_features=27, out_features=19, bias=True)\n", + " (1): BatchNorm1d(19, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Sigmoid()\n", + " (3): Linear(in_features=19, out_features=19, bias=True)\n", + " )\n", + " )\n", + " )\n", + " (6): Sequential(\n", + " (0): ResBlock(\n", + " (shortcut): Linear(in_features=19, out_features=11, bias=True)\n", + " (start_norm): Sequential(\n", + " (0): BatchNorm1d(19, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): Sigmoid()\n", + " )\n", + " (layers): Sequential(\n", + " (0): Linear(in_features=19, out_features=11, bias=True)\n", + " (1): BatchNorm1d(11, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Sigmoid()\n", + " (3): Linear(in_features=11, out_features=11, bias=True)\n", + " )\n", + " (shake_shake_layers): Sequential(\n", + " (0): Linear(in_features=19, out_features=11, bias=True)\n", + " (1): BatchNorm1d(11, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): Sigmoid()\n", + " (3): Linear(in_features=11, out_features=11, bias=True)\n", + " )\n", + " )\n", + " )\n", + " (7): BatchNorm1d(11, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (8): Sigmoid()\n", + " (9): Linear(in_features=11, out_features=2, bias=True)\n", + ")\n" + ] + } + ], "source": [ "pytorch_model = autonet.get_pytorch_model()\n", "print(pytorch_model)" From eed5954a83e2516bd65e3ca28ce392e09560d79a Mon Sep 17 00:00:00 2001 From: Lucas Zimmer Date: Wed, 9 Oct 2019 12:46:54 +0200 Subject: [PATCH 08/13] Added infos for config options --- .gitignore | 3 + .../components/networks/image/mobilenet.py | 2 - .../components/training/image/trainer.py | 3 +- autoPyTorch/core/api.py | 17 +- .../nodes/image/create_dataset_info.py | 4 +- .../nodes/image/create_image_dataloader.py | 2 +- .../nodes/image/image_augmentation.py | 10 +- .../pipeline/nodes/image/multiple_datasets.py | 4 +- .../optimization_algorithm_no_timelimit.py | 8 +- .../pipeline/nodes/image/simple_train_node.py | 2 +- .../pipeline/nodes/image/single_dataset.py | 4 +- .../pipeline/nodes/optimization_algorithm.py | 10 +- examples/basics/Auto-PyTorch Tutorial.ipynb | 901 +++++++++++------- 13 files changed, 582 insertions(+), 388 deletions(-) diff --git a/.gitignore b/.gitignore index f0086e3ce..20255fcf3 100644 --- a/.gitignore +++ b/.gitignore @@ -45,3 +45,6 @@ test_predictions_for_ensemble.npy # testing tests.ipynb + +# venv +env/ diff --git a/autoPyTorch/components/networks/image/mobilenet.py b/autoPyTorch/components/networks/image/mobilenet.py index 7d5a888de..a2190b1a3 100644 --- a/autoPyTorch/components/networks/image/mobilenet.py +++ b/autoPyTorch/components/networks/image/mobilenet.py @@ -194,8 +194,6 @@ def _cfg(url='', **kwargs): self.model.default_cfg = _cfg(url='', input_size=in_features, pool_size=(10, 10), crop_pct=0.904, num_classes=out_features) - self.layers = nn.Sequential(self.model.forward_features) - def forward(self, x): # make sure channels first x = self.model(x) diff --git a/autoPyTorch/components/training/image/trainer.py b/autoPyTorch/components/training/image/trainer.py index c8653752d..49a2a466f 100644 --- a/autoPyTorch/components/training/image/trainer.py +++ b/autoPyTorch/components/training/image/trainer.py @@ -69,7 +69,6 @@ def train(self, epoch, train_loader, metrics): metric_results = [0] * len(metrics) start_time = time.time() for step, (data, targets) in enumerate(train_loader): - # import matplotlib.pyplot as plt # img = plt.imshow(data.numpy()[0,1,:]) # plt.show() @@ -125,6 +124,8 @@ def train(self, epoch, train_loader, metrics): budget_exceeded = True break + if N==0: # Fixes a bug during initialization + N=1 if self.images_plot_count > 0: import tensorboard_logger as tl diff --git a/autoPyTorch/core/api.py b/autoPyTorch/core/api.py index 0b1e55bdc..fe680a37d 100644 --- a/autoPyTorch/core/api.py +++ b/autoPyTorch/core/api.py @@ -269,17 +269,24 @@ def score(self, X_test, Y_test, return_loss_value=False): return metric(torch.from_numpy(Y_pred.astype(np.float32)), torch.from_numpy(Y_test.astype(np.float32))) def get_pytorch_model(self): - """Returns a pytorch sequential model of the current incumbent configuration + """Returns a pytorch sequential model of the current incumbent configuration. Not possible for all models. Arguments: Returns: model -- PyTorch sequential model of the current incumbent configuration """ - if NetworkSelector.get_name() in self.pipeline: - return self.pipeline[NetworkSelector.get_name()].fit_output["network"].layers - else: - return self.pipeline[NetworkSelectorDatasetInfo.get_name()].fit_output["network"].layers + try: + if NetworkSelector.get_name() in self.pipeline: + return self.pipeline[NetworkSelector.get_name()].fit_output["network"].layers + else: + return self.pipeline[NetworkSelectorDatasetInfo.get_name()].fit_output["network"].layers + except: + print("Can not get PyTorch Sequential model for incumbent config. Returning Auto-PyTorch model") + if NetworkSelector.get_name() in self.pipeline: + return self.pipeline[NetworkSelector.get_name()].fit_output["network"] + else: + return self.pipeline[NetworkSelectorDatasetInfo.get_name()].fit_output["network"] def initialize_from_checkpoint(self, hyperparameter_config, checkpoint, in_features, out_features, final_activation=None): """ diff --git a/autoPyTorch/pipeline/nodes/image/create_dataset_info.py b/autoPyTorch/pipeline/nodes/image/create_dataset_info.py index 4feee5fb4..8c88e8875 100644 --- a/autoPyTorch/pipeline/nodes/image/create_dataset_info.py +++ b/autoPyTorch/pipeline/nodes/image/create_dataset_info.py @@ -77,8 +77,8 @@ def predict(self, pipeline_config, X): def get_pipeline_config_options(self): options = [ ConfigOption(name="file_extensions", default=['.png', '.jpg', '.JPEG', '.pgm'], type=str, list=True), - ConfigOption(name="images_shape", default=[3, 32, 32], type=int, list=True), - ConfigOption(name="images_root_folders", default=[ConfigFileParser.get_autonet_home()], type='directory', list=True), + ConfigOption(name="images_shape", default=[3, 32, 32], type=int, list=True, info="Image size input to the networks, images will be rescaled to this."), + ConfigOption(name="images_root_folders", default=[ConfigFileParser.get_autonet_home()], type='directory', list=True, info="Directory relative to which image paths are given."), ConfigOption(name="max_class_size", default=None, type=int), ] return options diff --git a/autoPyTorch/pipeline/nodes/image/create_image_dataloader.py b/autoPyTorch/pipeline/nodes/image/create_image_dataloader.py index 987be3418..91f58d278 100644 --- a/autoPyTorch/pipeline/nodes/image/create_image_dataloader.py +++ b/autoPyTorch/pipeline/nodes/image/create_image_dataloader.py @@ -63,7 +63,7 @@ def fit(self, pipeline_config, hyperparameter_config, X, Y, train_indices, valid def get_pipeline_config_options(self): options = [ - ConfigOption("default_dataset_download_dir", default=ConfigFileParser.get_autonet_home(), type='directory'), + ConfigOption("default_dataset_download_dir", default=ConfigFileParser.get_autonet_home(), type='directory', info="Directory default datasets will be downloaded to."), ConfigOption("dataloader_worker", default=1, type=int), ConfigOption("dataloader_cache_size_mb", default=0, type=int) ] diff --git a/autoPyTorch/pipeline/nodes/image/image_augmentation.py b/autoPyTorch/pipeline/nodes/image/image_augmentation.py index 26eafd369..9690ce206 100644 --- a/autoPyTorch/pipeline/nodes/image/image_augmentation.py +++ b/autoPyTorch/pipeline/nodes/image/image_augmentation.py @@ -210,10 +210,12 @@ def compute_mean_std(self, pipeline_config, hyperparameter_config, X, Y, train_i std = std + data.std(2).sum(0) nb_samples += batch_samples - mean /= nb_samples - std /= nb_samples - - mean, std = mean.numpy().tolist(), std.numpy().tolist() + if nb_samples > 0.: + mean /= nb_samples + std /= nb_samples + mean, std = mean.numpy().tolist(), std.numpy().tolist() + else: + mean, std = [mean], [std] log.debug('MEAN: ' + str(mean) + ' -- STD: ' + str(std)) diff --git a/autoPyTorch/pipeline/nodes/image/multiple_datasets.py b/autoPyTorch/pipeline/nodes/image/multiple_datasets.py index 80ed233b1..1b1adaf74 100644 --- a/autoPyTorch/pipeline/nodes/image/multiple_datasets.py +++ b/autoPyTorch/pipeline/nodes/image/multiple_datasets.py @@ -107,9 +107,9 @@ def predict(self, pipeline_config, X): def get_pipeline_config_options(self): options = [ - ConfigOption('dataset_order', default=None, type=int, list=True), + ConfigOption('dataset_order', default=None, type=int, list=True, info="Order in which datasets are considered."), #autonet.refit sets this to false to avoid refit budget issues - ConfigOption('increase_number_of_trained_datasets', default=True, type=to_bool) + ConfigOption('increase_number_of_trained_datasets', default=True, type=to_bool, info="Wether to increase the number of considered datasets with each successive halfing iteration.") ] return options diff --git a/autoPyTorch/pipeline/nodes/image/optimization_algorithm_no_timelimit.py b/autoPyTorch/pipeline/nodes/image/optimization_algorithm_no_timelimit.py index a563b3f44..7b7af6892 100644 --- a/autoPyTorch/pipeline/nodes/image/optimization_algorithm_no_timelimit.py +++ b/autoPyTorch/pipeline/nodes/image/optimization_algorithm_no_timelimit.py @@ -151,16 +151,16 @@ def get_pipeline_config_options(self): ConfigOption("task_id", default=-1, type=int, info="ID for each worker, if you run AutoNet on a cluster. Set to -1, if you run it locally. "), ConfigOption("algorithm", default="bohb", type=str, choices=list(self.algorithms.keys())), ConfigOption("budget_type", default="time", type=str, choices=['time', 'epochs']), - ConfigOption("min_budget", default=lambda c: 120 if c['budget_type'] == 'time' else 5, type=float, depends=True), - ConfigOption("max_budget", default=lambda c: 6000 if c['budget_type'] == 'time' else 150, type=float, depends=True), + ConfigOption("min_budget", default=lambda c: 120 if c['budget_type'] == 'time' else 5, type=float, depends=True, info="Min budget for fitting configurations."), + ConfigOption("max_budget", default=lambda c: 6000 if c['budget_type'] == 'time' else 150, type=float, depends=True, info="Max budget for fitting configurations."), ConfigOption("max_runtime", default=lambda c: ((-int(np.log(c["min_budget"] / c["max_budget"]) / np.log(c["eta"])) + 1) * c["max_budget"]) if c["budget_type"] == "time" else float("inf"), - type=float, depends=True), + type=float, depends=True, info="Total time for the run."), ConfigOption("num_iterations", default=lambda c: (-int(np.log(c["min_budget"] / c["max_budget"]) / np.log(c["eta"])) + 1) if c["budget_type"] == "epochs" else float("inf"), - type=float, depends=True), + type=float, depends=True, info="Number of successive halving iterations"), ConfigOption("eta", default=3, type=float, info='eta parameter of Hyperband.'), ConfigOption("min_workers", default=1, type=int), ConfigOption("working_dir", default=".", type="directory"), diff --git a/autoPyTorch/pipeline/nodes/image/simple_train_node.py b/autoPyTorch/pipeline/nodes/image/simple_train_node.py index 6ccddbdeb..cb88d26ea 100644 --- a/autoPyTorch/pipeline/nodes/image/simple_train_node.py +++ b/autoPyTorch/pipeline/nodes/image/simple_train_node.py @@ -295,7 +295,7 @@ def get_pipeline_config_options(self): type=str, list=True, choices=list(self.batch_loss_computation_techniques.keys())), ConfigOption("minimize", default=self.default_minimize_value, type=to_bool, choices=[True, False]), ConfigOption("cuda", default=True, type=to_bool, choices=[True, False]), - ConfigOption("save_checkpoints", default=False, type=to_bool, choices=[True, False]), + ConfigOption("save_checkpoints", default=False, type=to_bool, choices=[True, False], info="Wether to save state dicts as checkpoints."), ConfigOption("tensorboard_min_log_interval", default=30, type=int), ConfigOption("tensorboard_images_count", default=0, type=int), ConfigOption("evaluate_on_train_data", default=True, type=to_bool), diff --git a/autoPyTorch/pipeline/nodes/image/single_dataset.py b/autoPyTorch/pipeline/nodes/image/single_dataset.py index 6d8cd8417..0509518e5 100644 --- a/autoPyTorch/pipeline/nodes/image/single_dataset.py +++ b/autoPyTorch/pipeline/nodes/image/single_dataset.py @@ -26,10 +26,10 @@ def predict(self, pipeline_config, X): def get_pipeline_config_options(self): options = [ - ConfigOption('dataset_order', default=None, type=int, list=True), + ConfigOption('dataset_order', default=None, type=int, list=True, info="Only used for multiple datasets."), #autonet.refit sets this to false to avoid refit budget issues - ConfigOption('increase_number_of_trained_datasets', default=False, type=to_bool) + ConfigOption('increase_number_of_trained_datasets', default=False, type=to_bool, info="Only used for multiple datasets.") ] return options diff --git a/autoPyTorch/pipeline/nodes/optimization_algorithm.py b/autoPyTorch/pipeline/nodes/optimization_algorithm.py index 1358e9ae7..095f8dc6e 100644 --- a/autoPyTorch/pipeline/nodes/optimization_algorithm.py +++ b/autoPyTorch/pipeline/nodes/optimization_algorithm.py @@ -158,18 +158,18 @@ def get_pipeline_config_options(self): options = [ ConfigOption("run_id", default="0", type=str, info="Unique id for each run."), ConfigOption("task_id", default=-1, type=int, info="ID for each worker, if you run AutoNet on a cluster. Set to -1, if you run it locally. "), - ConfigOption("algorithm", default="bohb", type=str, choices=list(self.algorithms.keys())), + ConfigOption("algorithm", default="bohb", type=str, choices=list(self.algorithms.keys()), info="Algorithm to use for config sampling."), ConfigOption("budget_type", default="time", type=str, choices=list(self.budget_types.keys())), - ConfigOption("min_budget", default=lambda c: self.budget_types[c["budget_type"]].default_min_budget, type=float, depends=True), - ConfigOption("max_budget", default=lambda c: self.budget_types[c["budget_type"]].default_max_budget, type=float, depends=True), + ConfigOption("min_budget", default=lambda c: self.budget_types[c["budget_type"]].default_min_budget, type=float, depends=True, info="Min budget for fitting configurations."), + ConfigOption("max_budget", default=lambda c: self.budget_types[c["budget_type"]].default_max_budget, type=float, depends=True, info="Max budget for fitting configurations."), ConfigOption("max_runtime", default=lambda c: ((-int(np.log(c["min_budget"] / c["max_budget"]) / np.log(c["eta"])) + 1) * c["max_budget"]) if c["budget_type"] == "time" else float("inf"), - type=float, depends=True), + type=float, depends=True, info="Total time for the run."), ConfigOption("num_iterations", default=lambda c: (-int(np.log(c["min_budget"] / c["max_budget"]) / np.log(c["eta"])) + 1) if c["budget_type"] == "epochs" else float("inf"), - type=float, depends=True), + type=float, depends=True, info="Number of successive halving iterations."), ConfigOption("eta", default=3, type=float, info='eta parameter of Hyperband.'), ConfigOption("min_workers", default=1, type=int), ConfigOption("working_dir", default=".", type="directory"), diff --git a/examples/basics/Auto-PyTorch Tutorial.ipynb b/examples/basics/Auto-PyTorch Tutorial.ipynb index affb2f5a9..fdf760344 100644 --- a/examples/basics/Auto-PyTorch Tutorial.ipynb +++ b/examples/basics/Auto-PyTorch Tutorial.ipynb @@ -10,7 +10,7 @@ "So far, Auto-PyTorch covers classification and regression on featurized data as well as classification on image data.\n", "For installing Auto-PyTorch, please refer to the github page.\n", "\n", - "**Note**: In this notebook data will be downloaded from the openml project for featurized tasks and CIFAR10 will be downloaded for image classification. Hence, an internet connection is required." + "**Disclaimer**: In this notebook, data will be downloaded from the openml project for featurized tasks and CIFAR10 will be downloaded for image classification. Hence, an internet connection is required." ] }, { @@ -24,7 +24,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ @@ -37,11 +37,14 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 5, "metadata": {}, "outputs": [], "source": [ + "# Other imports for later usage\n", + "import pandas as pd\n", "import numpy as np\n", + "import os as os\n", "import openml\n", "import json" ] @@ -55,11 +58,11 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 6, "metadata": {}, "outputs": [], "source": [ - "autonet = AutoNetClassification(config_preset=\"full_cs\", result_logger_dir=\"logs/\")" + "autonet = AutoNetClassification(config_preset=\"tiny_cs\", result_logger_dir=\"logs/\")" ] }, { @@ -71,196 +74,11 @@ }, { "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Configure AutoNet with the following keyword arguments.\n", - "Pass these arguments to either the constructor or fit().\n", - "\n", - "name default choices type \n", - "===============================================================================================================================================\n", - "additional_logs [] [] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "additional_metrics [] [accuracy, \n", - " auc_metric, \n", - " pac_metric, \n", - " balanced_accuracy] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "algorithm bohb [bohb, \n", - " hyperband] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "batch_loss_computation_techniques [standard, [standard, \n", - " mixup] mixup] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "best_over_epochs False [True, \n", - " False] \n", - "\tinfo: Whether to report the best performance occurred to BOHB\n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "budget_type time [time, \n", - " epochs, \n", - " training_time] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "categorical_features None None \n", - "\tinfo: List of booleans that specifies for each feature whether it is categorical.\n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "cross_validator none dict_keys(['none', 'k_fold', 'stratified \n", - "\tinfo: Class inheriting from sklearn.model_selection.BaseCrossValidator. Ignored if validation data is given.\n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "cross_validator_args {} None \n", - "\tinfo: Args of cross validator. \n", - "\t\tNote that random_state and shuffle are set by pipeline config options random_seed and shuffle, if not specified here.\n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "cuda True [True, \n", - " False] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "dataset_name None None \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "early_stopping_patience inf None \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "early_stopping_reset_parameters False None \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "embeddings [none, [none, \n", - " learned] learned] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "eta 3 None \n", - "\tinfo: eta parameter of Hyperband.\n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "final_activation softmax [softmax] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "full_eval_each_epoch False [True, \n", - " False] \n", - "\tinfo: Whether to evaluate everything every epoch. Results in more useful output\n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "hyperparameter_search_space_updates None None [directory, \n", - " \n", - " median, median, \n", - " most_frequent] most_frequent] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "initialization_methods [default, [default, \n", - " sparse] sparse] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "initializer simple_initializer [simple_initializer] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "log_level warning [debug, \n", - " info, \n", - " warning, \n", - " error, \n", - " critical] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "loss_modules [cross_entropy, [cross_entropy, \n", - " cross_entropy_weighted] cross_entropy_weighted] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "lr_scheduler [cosine_annealing, [cosine_annealing, \n", - " cyclic, cyclic, \n", - " exponential, exponential, \n", - " step, step, \n", - " adapt, adapt, \n", - " plateau, plateau, \n", - " alternating_cosine, alternating_cosine, \n", - " none] none] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "max_budget 6000 None \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "max_runtime 24000 None \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "memory_limit_mb 1000000 None \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "min_budget 120 None \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "min_budget_for_cv 0 None \n", - "\tinfo: Specify minimum budget for cv. If budget is smaller use specified validation split.\n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "min_workers 1 None \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "network_interface_name eth0 None \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "networks [mlpnet, [mlpnet, \n", - " shapedmlpnet, shapedmlpnet, \n", - " resnet, resnet, \n", - " shapedresnet] shapedresnet] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "normalization_strategies [none, [none, \n", - " minmax, minmax, \n", - " standardize, standardize, \n", - " maxabs] maxabs] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "num_iterations inf None \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "optimize_metric accuracy [accuracy, \n", - " auc_metric, \n", - " pac_metric, \n", - " balanced_accuracy] \n", - "\tinfo: This is the meta train metric BOHB will try to optimize.\n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "optimizer [adam, [adam, \n", - " adamw, adamw, \n", - " sgd, sgd, \n", - " rmsprop] rmsprop] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "over_sampling_methods [none, [none, \n", - " random, random, \n", - " smote] smote] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "preprocessors [none, [none, \n", - " truncated_svd, truncated_svd, \n", - " power_transformer, power_transformer, \n", - " fast_ica, fast_ica, \n", - " kitchen_sinks, kitchen_sinks, \n", - " kernel_pca, kernel_pca, \n", - " nystroem] nystroem] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "random_seed 3405296868 None \n", - "\tinfo: Make sure to specify the same seed for all workers.\n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "refit_validation_split 0.0 [0, \n", - " 1] \n", - "\tinfo: In range [0, 1). Part of train dataset used for validation in refit.\n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "result_logger_dir logs/ None directory \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "run_id 0 None \n", - "\tinfo: Unique id for each run.\n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "run_worker_on_master_node True None \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "shuffle True [True, \n", - " False] \n", - "\tinfo: Shuffle train and validation set\n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "target_size_strategies [none, [none, \n", - " upsample, upsample, \n", - " downsample, downsample, \n", - " average, average, \n", - " median] median] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "task_id -1 None \n", - "\tinfo: ID for each worker, if you run AutoNet on a cluster. Set to -1, if you run it locally. \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "torch_num_threads 1 None \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "under_sampling_methods [none, [none, \n", - " random] random] \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "use_pynisher True None \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "use_tensorboard_logger False None \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "validation_split 0.3 [0, \n", - " 1] \n", - "\tinfo: In range [0, 1). Part of train dataset used for validation. Ignored in fit if cross validator or valid data given.\n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n", - "working_dir . None directory \n", - "-----------------------------------------------------------------------------------------------------------------------------------------------\n" - ] - } - ], + "execution_count": 7, + "metadata": { + "scrolled": true + }, + "outputs": [], "source": [ "# Get the current configuration as dict\n", "current_configuration = autonet.get_current_autonet_config()\n", @@ -268,8 +86,8 @@ "# Get the ConfigSpace object with all hyperparameters, conditions, default values and default ranges\n", "hyperparameter_search_space = autonet.get_hyperparameter_search_space()\n", "\n", - "# Print all possible configuration options\n", - "autonet.print_help()" + "# Print all possible configuration options \n", + "#autonet.print_help()" ] }, { @@ -278,24 +96,19 @@ "scrolled": true }, "source": [ - "The most important methods for using Auto-PyTorch are **fit**, **refit**, **score** and **predict**.\n", + "The most important methods for using Auto-PyTorch are ***fit***, ***refit***, ***score*** and ***predict***.\n", "\n", - "**fit** is used to search for a good configuration by fitting different parameter configurations. The incumbent configuration is then returned and stored in the class:" + "First, we get some data:" ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 8, "metadata": { "scrolled": true }, "outputs": [], "source": [ - "import numpy as np\n", - "import openml\n", - "import json\n", - "from sklearn.model_selection import train_test_split\n", - "\n", "# Get data from the openml task \"Supervised Classification on credit-g (https://www.openml.org/t/31)\"\n", "task = openml.tasks.get_task(task_id=31)\n", "X, y = task.get_X_and_y()\n", @@ -308,45 +121,43 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Search for a configuration for 300 seconds and with 60-120 s time for fitting.\n", - "Use the validation_split parameter to specify a split size. You can also pass your own validation set\n", - "via X_val and Y_val. Use log_level=\"info\" or log_level=\"debug\" for more detailed output." + "***fit*** is used to search for a good configuration by fitting configurations chosen by the algorithm (by default BOHB). The incumbent configuration is then returned and stored in the class.\n", + "\n", + "We recommend to have a look at the possible configuration options first. Some of the most important options allow you to set the budget type (epochs or time), run id and task id for cluster usage, tensorboard logging, seed and more.\n", + "\n", + "Here we search for a configuration for 300 seconds with 60-100 s time for fitting each individual configuration.\n", + "Use the *validation_split* parameter to specify a split size. You can also pass your own validation set\n", + "via *X_val* and *Y_val*. Use *log_level=\"info\"* or *log_level=\"debug\"* for more detailed output." ] }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 9, "metadata": { - "scrolled": true + "scrolled": false }, "outputs": [ { - "name": "stderr", + "name": "stdout", "output_type": "stream", "text": [ - "/home/zimmerl/anaconda3/lib/python3.7/site-packages/sklearn/kernel_approximation.py:561: UserWarning: n_components > n_samples. This is not possible.\n", - "n_components was set to n_samples, which results in inefficient evaluation of the full kernel.\n", - " warnings.warn(\"n_components > n_samples. This is not possible.\\n\"\n", - "/home/zimmerl/anaconda3/lib/python3.7/site-packages/numpy/core/_methods.py:121: RuntimeWarning: overflow encountered in multiply\n", - " x = um.multiply(x, x, out=x)\n", - "/home/zimmerl/anaconda3/lib/python3.7/site-packages/sklearn/preprocessing/data.py:2798: RuntimeWarning: divide by zero encountered in log\n", - " loglike = -n_samples / 2 * np.log(x_trans.var())\n", - "/home/zimmerl/anaconda3/lib/python3.7/site-packages/sklearn/kernel_approximation.py:561: UserWarning: n_components > n_samples. This is not possible.\n", - "n_components was set to n_samples, which results in inefficient evaluation of the full kernel.\n", - " warnings.warn(\"n_components > n_samples. This is not possible.\\n\"\n" + "The Box-Cox transformation can only be applied to strictly positive data\n", + "Using yeo-johnson instead\n" ] } ], "source": [ - "autonet = AutoNetClassification(config_preset=\"full_cs\", result_logger_dir=\"logs/\")\n", + "autonet = AutoNetClassification(config_preset=\"tiny_cs\", result_logger_dir=\"logs/\")\n", + "# Fit (note that the settings are for demonstration, you might need larger budgets)\n", "results_fit = autonet.fit(X_train=X_train,\n", " Y_train=Y_train,\n", " validation_split=0.3,\n", " max_runtime=300,\n", " min_budget=60,\n", - " max_budget=120)\n", + " max_budget=100,\n", + " refit=True)\n", "\n", - "# Save json\n", + "# Save fit results as json\n", "with open(\"logs/results_fit.json\", \"w\") as file:\n", " json.dump(results_fit, file)" ] @@ -355,43 +166,48 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "**refit** allows you to fit a configuration of your choice for a defined time. You can specify a hyperparameter configuration to fit (if you do not specify a configuration the incumbent configuration from the last fit call will be used):" + "***refit*** allows you to fit a configuration of your choice for a defined time. By default, the incumbent configuration is refitted during a *fit* call using the *max_budget*. However, *refit* might be useful if you want to fit on the full dataset or even another dataset or if you just want to fit a model without searching.\n", + "\n", + "You can specify a hyperparameter configuration to fit (if you do not specify a configuration the incumbent configuration from the last fit call will be used):" ] }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - "[INFO] [17:05:13:autonet] Start autonet with config:\n", - "{'embeddings': ['none'], 'lr_scheduler': ['cosine_annealing', 'plateau'], 'networks': ['shapedresnet'], 'over_sampling_methods': ['smote'], 'preprocessors': ['none', 'truncated_svd', 'power_transformer'], 'target_size_strategies': ['none', 'upsample', 'median'], 'result_logger_dir': 'logs/', 'budget_type': 'time', 'log_level': 'info', 'use_tensorboard_logger': True, 'hyperparameter_search_space_updates': None, 'categorical_features': None, 'dataset_name': None, 'run_id': '0', 'task_id': -1, 'algorithm': 'bohb', 'eta': 3, 'min_workers': 1, 'working_dir': '.', 'network_interface_name': 'eth0', 'memory_limit_mb': 1000000, 'run_worker_on_master_node': True, 'use_pynisher': True, 'validation_split': 0.3, 'refit_validation_split': 0.0, 'cross_validator': 'none', 'cross_validator_args': {}, 'min_budget_for_cv': 0, 'shuffle': True, 'imputation_strategies': ['mean', 'median', 'most_frequent'], 'normalization_strategies': ['none', 'minmax', 'standardize', 'maxabs'], 'under_sampling_methods': ['none', 'random'], 'final_activation': 'softmax', 'initialization_methods': ['default', 'sparse'], 'initializer': 'simple_initializer', 'optimizer': ['adam', 'adamw', 'sgd', 'rmsprop'], 'additional_logs': [], 'optimize_metric': 'accuracy', 'additional_metrics': [], 'loss_modules': ['cross_entropy', 'cross_entropy_weighted'], 'batch_loss_computation_techniques': ['standard', 'mixup'], 'cuda': True, 'torch_num_threads': 1, 'full_eval_each_epoch': False, 'best_over_epochs': False, 'early_stopping_patience': inf, 'early_stopping_reset_parameters': False, 'random_seed': 3405296868, 'min_budget': 120, 'max_budget': 6000, 'max_runtime': 24000, 'num_iterations': inf, 'cv_splits': 1, 'increase_number_of_trained_datasets': False}\n", - "[INFO] [17:05:13:autonet] Start Refitting\n", - "[INFO] [17:05:13:autonet] [AutoNet] No validation set given and either no cross validator given or budget too low for CV. Continue by splitting 0 of training data.\n", - "[INFO] [17:05:13:autonet] [AutoNet] CV split 0 of 1\n", - "[INFO] [17:05:13:autonet] Reduced initial budget 119.93970322608948 to cv budget 119.93916440010071 compensate for 0.0005388259887695312\n", - "[INFO] [17:07:03:autonet] Finished train with budget 119.93916440010071: Preprocessing took 0s, Training took 109s, Wrap up took 0s. Total time consumption in s: 109\n", - "[INFO] [17:07:03:autonet] [AutoNet] Done with current split!\n", - "[INFO] [17:07:03:autonet] Aggregate the results across the splits\n", - "[INFO] [17:07:03:autonet] Process 1 additional result(s)\n", - "[INFO] [17:07:03:autonet] Done Refitting\n" + "[INFO] [12:46:30:autonet] Start autonet with config:\n", + "{'embeddings': ['none'], 'lr_scheduler': ['cosine_annealing', 'plateau'], 'networks': ['shapedresnet'], 'over_sampling_methods': ['smote'], 'preprocessors': ['none', 'truncated_svd', 'power_transformer'], 'target_size_strategies': ['none', 'upsample', 'median'], 'result_logger_dir': 'logs/', 'budget_type': 'epochs', 'log_level': 'info', 'use_tensorboard_logger': True, 'validation_split': 0.0, 'hyperparameter_search_space_updates': None, 'categorical_features': None, 'dataset_name': None, 'run_id': '0', 'task_id': -1, 'algorithm': 'bohb', 'eta': 3, 'min_workers': 1, 'working_dir': '.', 'network_interface_name': 'eth0', 'memory_limit_mb': 1000000, 'run_worker_on_master_node': True, 'use_pynisher': True, 'refit_validation_split': 0.0, 'cross_validator': 'none', 'cross_validator_args': {}, 'min_budget_for_cv': 0, 'shuffle': True, 'imputation_strategies': ['mean', 'median', 'most_frequent'], 'normalization_strategies': ['none', 'minmax', 'standardize', 'maxabs'], 'under_sampling_methods': ['none', 'random'], 'final_activation': 'softmax', 'initialization_methods': ['default', 'sparse'], 'initializer': 'simple_initializer', 'optimizer': ['adam', 'adamw', 'sgd', 'rmsprop'], 'additional_logs': [], 'optimize_metric': 'accuracy', 'additional_metrics': [], 'loss_modules': ['cross_entropy', 'cross_entropy_weighted'], 'batch_loss_computation_techniques': ['standard', 'mixup'], 'cuda': True, 'torch_num_threads': 1, 'full_eval_each_epoch': False, 'best_over_epochs': False, 'early_stopping_patience': inf, 'early_stopping_reset_parameters': False, 'random_seed': 930567008, 'min_budget': 5, 'max_budget': 150, 'max_runtime': inf, 'num_iterations': 4, 'cv_splits': 1, 'increase_number_of_trained_datasets': False}\n", + "[INFO] [12:46:30:autonet] Start Refitting\n", + "[INFO] [12:46:30:autonet] [AutoNet] No validation set given and either no cross validator given or budget too low for CV. Continue by splitting 0 of training data.\n", + "[INFO] [12:46:30:autonet] [AutoNet] CV split 0 of 1\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The Box-Cox transformation can only be applied to strictly positive data\n", + "Using yeo-johnson instead\n" ] } ], "source": [ - "# Create an autonet, use tensorboard during fitting\n", + "# Create an autonet\n", "autonet_config = {\n", " \"result_logger_dir\" : \"logs/\",\n", - " \"budget_type\" : \"time\",\n", + " \"budget_type\" : \"epochs\",\n", " \"log_level\" : \"info\", \n", - " \"use_tensorboard_logger\" : True\n", + " \"use_tensorboard_logger\" : True,\n", + " \"validation_split\" : 0.0\n", " }\n", "autonet = AutoNetClassification(**autonet_config)\n", "\n", - "# This samples a random hyperparameter configuration as an example\n", + "# Sample a random hyperparameter configuration as an example\n", "hyperparameter_config = autonet.get_hyperparameter_search_space().sample_configuration().get_dictionary()\n", "\n", "# Refit with sampled hyperparameter config for 120 s. This time on the full dataset.\n", @@ -401,7 +217,7 @@ " Y_valid=None,\n", " hyperparameter_config=hyperparameter_config,\n", " autonet_config=autonet.get_current_autonet_config(),\n", - " budget=120)\n", + " budget=50)\n", "\n", "# Save json\n", "with open(\"logs/results_refit.json\", \"w\") as file:\n", @@ -412,7 +228,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "**pred** returns the predictions of the incumbent model. **score** can be used to evaluate the model on a test set. " + "***pred*** returns the predictions of the incumbent model. ***score*** can be used to evaluate the model on a test set. " ] }, { @@ -424,22 +240,22 @@ "name": "stdout", "output_type": "stream", "text": [ - "Model prediction: [[1.]\n", - " [1.]\n", - " [1.]\n", - " [1.]\n", - " [1.]\n", - " [1.]\n", - " [1.]\n", - " [1.]\n", - " [1.]\n", - " [1.]]\n", - "Accuracy score 30.0\n" + "Model prediction: [[0.]\n", + " [0.]\n", + " [0.]\n", + " [0.]\n", + " [0.]\n", + " [0.]\n", + " [0.]\n", + " [0.]\n", + " [0.]\n", + " [0.]]\n", + "Accuracy score 67.0\n" ] } ], "source": [ - "# Print score of found configuration\n", + "# See how the random configuration performs (often it just predicts 0)\n", "score = autonet.score(X_test=X_test, Y_test=Y_test)\n", "pred = autonet.predict(X=X_test)\n", "\n", @@ -456,7 +272,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 9, "metadata": { "scrolled": true }, @@ -466,132 +282,278 @@ "output_type": "stream", "text": [ "Sequential(\n", - " (0): Linear(in_features=19, out_features=27, bias=True)\n", + " (0): Linear(in_features=20, out_features=54, bias=True)\n", " (1): Sequential(\n", " (0): ResBlock(\n", + " (shortcut): Linear(in_features=54, out_features=44, bias=True)\n", + " (start_norm): Sequential(\n", + " (0): BatchNorm1d(54, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): ReLU()\n", + " )\n", " (layers): Sequential(\n", - " (0): BatchNorm1d(27, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): Sigmoid()\n", - " (2): Linear(in_features=27, out_features=27, bias=True)\n", - " (3): BatchNorm1d(27, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): Sigmoid()\n", - " (5): Linear(in_features=27, out_features=27, bias=True)\n", + " (0): Linear(in_features=54, out_features=44, bias=True)\n", + " (1): BatchNorm1d(44, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU()\n", + " (3): Linear(in_features=44, out_features=44, bias=True)\n", " )\n", - " (shake_shake_layers): Sequential(\n", - " (0): BatchNorm1d(27, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): Sigmoid()\n", - " (2): Linear(in_features=27, out_features=27, bias=True)\n", - " (3): BatchNorm1d(27, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): Sigmoid()\n", - " (5): Linear(in_features=27, out_features=27, bias=True)\n", + " )\n", + " (1): ResBlock(\n", + " (layers): Sequential(\n", + " (0): BatchNorm1d(44, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): ReLU()\n", + " (2): Linear(in_features=44, out_features=44, bias=True)\n", + " (3): BatchNorm1d(44, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): ReLU()\n", + " (5): Linear(in_features=44, out_features=44, bias=True)\n", + " )\n", + " )\n", + " (2): ResBlock(\n", + " (layers): Sequential(\n", + " (0): BatchNorm1d(44, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): ReLU()\n", + " (2): Linear(in_features=44, out_features=44, bias=True)\n", + " (3): BatchNorm1d(44, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): ReLU()\n", + " (5): Linear(in_features=44, out_features=44, bias=True)\n", " )\n", " )\n", " )\n", " (2): Sequential(\n", " (0): ResBlock(\n", " (layers): Sequential(\n", - " (0): BatchNorm1d(27, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): Sigmoid()\n", - " (2): Linear(in_features=27, out_features=27, bias=True)\n", - " (3): BatchNorm1d(27, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): Sigmoid()\n", - " (5): Linear(in_features=27, out_features=27, bias=True)\n", + " (0): BatchNorm1d(44, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): ReLU()\n", + " (2): Linear(in_features=44, out_features=44, bias=True)\n", + " (3): BatchNorm1d(44, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): ReLU()\n", + " (5): Linear(in_features=44, out_features=44, bias=True)\n", " )\n", - " (shake_shake_layers): Sequential(\n", - " (0): BatchNorm1d(27, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): Sigmoid()\n", - " (2): Linear(in_features=27, out_features=27, bias=True)\n", - " (3): BatchNorm1d(27, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): Sigmoid()\n", - " (5): Linear(in_features=27, out_features=27, bias=True)\n", + " )\n", + " (1): ResBlock(\n", + " (layers): Sequential(\n", + " (0): BatchNorm1d(44, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): ReLU()\n", + " (2): Linear(in_features=44, out_features=44, bias=True)\n", + " (3): BatchNorm1d(44, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): ReLU()\n", + " (5): Linear(in_features=44, out_features=44, bias=True)\n", + " )\n", + " )\n", + " (2): ResBlock(\n", + " (layers): Sequential(\n", + " (0): BatchNorm1d(44, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): ReLU()\n", + " (2): Linear(in_features=44, out_features=44, bias=True)\n", + " (3): BatchNorm1d(44, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): ReLU()\n", + " (5): Linear(in_features=44, out_features=44, bias=True)\n", " )\n", " )\n", " )\n", " (3): Sequential(\n", " (0): ResBlock(\n", + " (shortcut): Linear(in_features=44, out_features=34, bias=True)\n", + " (start_norm): Sequential(\n", + " (0): BatchNorm1d(44, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): ReLU()\n", + " )\n", + " (layers): Sequential(\n", + " (0): Linear(in_features=44, out_features=34, bias=True)\n", + " (1): BatchNorm1d(34, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU()\n", + " (3): Linear(in_features=34, out_features=34, bias=True)\n", + " )\n", + " )\n", + " (1): ResBlock(\n", " (layers): Sequential(\n", - " (0): BatchNorm1d(27, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): Sigmoid()\n", - " (2): Linear(in_features=27, out_features=27, bias=True)\n", - " (3): BatchNorm1d(27, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): Sigmoid()\n", - " (5): Linear(in_features=27, out_features=27, bias=True)\n", + " (0): BatchNorm1d(34, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): ReLU()\n", + " (2): Linear(in_features=34, out_features=34, bias=True)\n", + " (3): BatchNorm1d(34, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): ReLU()\n", + " (5): Linear(in_features=34, out_features=34, bias=True)\n", " )\n", - " (shake_shake_layers): Sequential(\n", - " (0): BatchNorm1d(27, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): Sigmoid()\n", - " (2): Linear(in_features=27, out_features=27, bias=True)\n", - " (3): BatchNorm1d(27, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): Sigmoid()\n", - " (5): Linear(in_features=27, out_features=27, bias=True)\n", + " )\n", + " (2): ResBlock(\n", + " (layers): Sequential(\n", + " (0): BatchNorm1d(34, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): ReLU()\n", + " (2): Linear(in_features=34, out_features=34, bias=True)\n", + " (3): BatchNorm1d(34, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): ReLU()\n", + " (5): Linear(in_features=34, out_features=34, bias=True)\n", " )\n", " )\n", " )\n", " (4): Sequential(\n", " (0): ResBlock(\n", " (layers): Sequential(\n", - " (0): BatchNorm1d(27, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): Sigmoid()\n", - " (2): Linear(in_features=27, out_features=27, bias=True)\n", - " (3): BatchNorm1d(27, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): Sigmoid()\n", - " (5): Linear(in_features=27, out_features=27, bias=True)\n", + " (0): BatchNorm1d(34, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): ReLU()\n", + " (2): Linear(in_features=34, out_features=34, bias=True)\n", + " (3): BatchNorm1d(34, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): ReLU()\n", + " (5): Linear(in_features=34, out_features=34, bias=True)\n", + " )\n", + " )\n", + " (1): ResBlock(\n", + " (layers): Sequential(\n", + " (0): BatchNorm1d(34, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): ReLU()\n", + " (2): Linear(in_features=34, out_features=34, bias=True)\n", + " (3): BatchNorm1d(34, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): ReLU()\n", + " (5): Linear(in_features=34, out_features=34, bias=True)\n", " )\n", - " (shake_shake_layers): Sequential(\n", - " (0): BatchNorm1d(27, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): Sigmoid()\n", - " (2): Linear(in_features=27, out_features=27, bias=True)\n", - " (3): BatchNorm1d(27, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): Sigmoid()\n", - " (5): Linear(in_features=27, out_features=27, bias=True)\n", + " )\n", + " (2): ResBlock(\n", + " (layers): Sequential(\n", + " (0): BatchNorm1d(34, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): ReLU()\n", + " (2): Linear(in_features=34, out_features=34, bias=True)\n", + " (3): BatchNorm1d(34, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): ReLU()\n", + " (5): Linear(in_features=34, out_features=34, bias=True)\n", " )\n", " )\n", " )\n", " (5): Sequential(\n", " (0): ResBlock(\n", - " (shortcut): Linear(in_features=27, out_features=19, bias=True)\n", + " (shortcut): Linear(in_features=34, out_features=24, bias=True)\n", " (start_norm): Sequential(\n", - " (0): BatchNorm1d(27, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): Sigmoid()\n", + " (0): BatchNorm1d(34, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): ReLU()\n", " )\n", " (layers): Sequential(\n", - " (0): Linear(in_features=27, out_features=19, bias=True)\n", - " (1): BatchNorm1d(19, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): Sigmoid()\n", - " (3): Linear(in_features=19, out_features=19, bias=True)\n", + " (0): Linear(in_features=34, out_features=24, bias=True)\n", + " (1): BatchNorm1d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU()\n", + " (3): Linear(in_features=24, out_features=24, bias=True)\n", " )\n", - " (shake_shake_layers): Sequential(\n", - " (0): Linear(in_features=27, out_features=19, bias=True)\n", - " (1): BatchNorm1d(19, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): Sigmoid()\n", - " (3): Linear(in_features=19, out_features=19, bias=True)\n", + " )\n", + " (1): ResBlock(\n", + " (layers): Sequential(\n", + " (0): BatchNorm1d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): ReLU()\n", + " (2): Linear(in_features=24, out_features=24, bias=True)\n", + " (3): BatchNorm1d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): ReLU()\n", + " (5): Linear(in_features=24, out_features=24, bias=True)\n", + " )\n", + " )\n", + " (2): ResBlock(\n", + " (layers): Sequential(\n", + " (0): BatchNorm1d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): ReLU()\n", + " (2): Linear(in_features=24, out_features=24, bias=True)\n", + " (3): BatchNorm1d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): ReLU()\n", + " (5): Linear(in_features=24, out_features=24, bias=True)\n", " )\n", " )\n", " )\n", " (6): Sequential(\n", " (0): ResBlock(\n", - " (shortcut): Linear(in_features=19, out_features=11, bias=True)\n", + " (layers): Sequential(\n", + " (0): BatchNorm1d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): ReLU()\n", + " (2): Linear(in_features=24, out_features=24, bias=True)\n", + " (3): BatchNorm1d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): ReLU()\n", + " (5): Linear(in_features=24, out_features=24, bias=True)\n", + " )\n", + " )\n", + " (1): ResBlock(\n", + " (layers): Sequential(\n", + " (0): BatchNorm1d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): ReLU()\n", + " (2): Linear(in_features=24, out_features=24, bias=True)\n", + " (3): BatchNorm1d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): ReLU()\n", + " (5): Linear(in_features=24, out_features=24, bias=True)\n", + " )\n", + " )\n", + " (2): ResBlock(\n", + " (layers): Sequential(\n", + " (0): BatchNorm1d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): ReLU()\n", + " (2): Linear(in_features=24, out_features=24, bias=True)\n", + " (3): BatchNorm1d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): ReLU()\n", + " (5): Linear(in_features=24, out_features=24, bias=True)\n", + " )\n", + " )\n", + " )\n", + " (7): Sequential(\n", + " (0): ResBlock(\n", + " (shortcut): Linear(in_features=24, out_features=14, bias=True)\n", " (start_norm): Sequential(\n", - " (0): BatchNorm1d(19, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): Sigmoid()\n", + " (0): BatchNorm1d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): ReLU()\n", " )\n", " (layers): Sequential(\n", - " (0): Linear(in_features=19, out_features=11, bias=True)\n", - " (1): BatchNorm1d(11, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): Sigmoid()\n", - " (3): Linear(in_features=11, out_features=11, bias=True)\n", + " (0): Linear(in_features=24, out_features=14, bias=True)\n", + " (1): BatchNorm1d(14, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU()\n", + " (3): Linear(in_features=14, out_features=14, bias=True)\n", " )\n", - " (shake_shake_layers): Sequential(\n", - " (0): Linear(in_features=19, out_features=11, bias=True)\n", - " (1): BatchNorm1d(11, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): Sigmoid()\n", - " (3): Linear(in_features=11, out_features=11, bias=True)\n", + " )\n", + " (1): ResBlock(\n", + " (layers): Sequential(\n", + " (0): BatchNorm1d(14, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): ReLU()\n", + " (2): Linear(in_features=14, out_features=14, bias=True)\n", + " (3): BatchNorm1d(14, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): ReLU()\n", + " (5): Linear(in_features=14, out_features=14, bias=True)\n", + " )\n", + " )\n", + " (2): ResBlock(\n", + " (layers): Sequential(\n", + " (0): BatchNorm1d(14, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): ReLU()\n", + " (2): Linear(in_features=14, out_features=14, bias=True)\n", + " (3): BatchNorm1d(14, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): ReLU()\n", + " (5): Linear(in_features=14, out_features=14, bias=True)\n", " )\n", " )\n", " )\n", - " (7): BatchNorm1d(11, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (8): Sigmoid()\n", - " (9): Linear(in_features=11, out_features=2, bias=True)\n", + " (8): Sequential(\n", + " (0): ResBlock(\n", + " (layers): Sequential(\n", + " (0): BatchNorm1d(14, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): ReLU()\n", + " (2): Linear(in_features=14, out_features=14, bias=True)\n", + " (3): BatchNorm1d(14, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): ReLU()\n", + " (5): Linear(in_features=14, out_features=14, bias=True)\n", + " )\n", + " )\n", + " (1): ResBlock(\n", + " (layers): Sequential(\n", + " (0): BatchNorm1d(14, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): ReLU()\n", + " (2): Linear(in_features=14, out_features=14, bias=True)\n", + " (3): BatchNorm1d(14, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): ReLU()\n", + " (5): Linear(in_features=14, out_features=14, bias=True)\n", + " )\n", + " )\n", + " (2): ResBlock(\n", + " (layers): Sequential(\n", + " (0): BatchNorm1d(14, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): ReLU()\n", + " (2): Linear(in_features=14, out_features=14, bias=True)\n", + " (3): BatchNorm1d(14, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): ReLU()\n", + " (5): Linear(in_features=14, out_features=14, bias=True)\n", + " )\n", + " )\n", + " )\n", + " (9): BatchNorm1d(14, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (10): ReLU()\n", + " (11): Linear(in_features=14, out_features=2, bias=True)\n", ")\n" ] } @@ -621,9 +583,17 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Could not find BOHB_Multi_KDE, replacing with object\n" + ] + } + ], "source": [ "# Load classes\n", "autonet_image_classification = AutoNetImageClassification(config_preset=\"full_cs\", result_logger_dir=\"logs/\")\n", @@ -634,19 +604,17 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "For passing your image data to fit, your have two options:\n", + "For passing your image data, you have two options (note that arrays are expected):\n", "\n", - "I) Via path to a comma-separated value file, which contains the paths to the images and the image labels (note header is assumed to be None):" + "I) Via a path to a comma-separated value file, which in turn contains the paths to the images and the image labels (note header is assumed to be None):" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ - "import os\n", - "\n", "csv_dir = os.path.abspath(\"../../datasets/example.csv\")\n", "\n", "X_train = np.array([csv_dir])\n", @@ -662,12 +630,10 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "metadata": {}, "outputs": [], "source": [ - "import pandas as pd\n", - "\n", "df = pd.read_csv(csv_dir, header=None)\n", "X_train = df.values[:,0]\n", "Y_train = df.values[:,1]" @@ -688,18 +654,19 @@ "cell_type": "code", "execution_count": null, "metadata": { - "scrolled": false + "scrolled": true }, "outputs": [], "source": [ "autonet_image_classification.fit(X_train=X_train,\n", " Y_train=Y_train,\n", " images_shape=[3,32,32],\n", - " min_budget=900,\n", - " max_budget=1200,\n", - " max_runtime=3000,\n", + " min_budget=100,\n", + " max_budget=300,\n", + " max_runtime=600,\n", " save_checkpoints=True,\n", - " images_root_folders=[os.path.abspath(\"../../datasets/example_images\")])" + " images_root_folders=[os.path.abspath(\"../../datasets/example_images\")],\n", + " log_level=\"info\")" ] }, { @@ -711,19 +678,235 @@ }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 6, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[INFO] [11:32:27:autonet] Start autonet with config:\n", + "{'additional_logs': [],\n", + " 'additional_metrics': [],\n", + " 'algorithm': 'bohb',\n", + " 'batch_loss_computation_techniques': ['standard', 'mixup'],\n", + " 'budget_type': 'time',\n", + " 'cuda': True,\n", + " 'cv_splits': 1,\n", + " 'dataloader_cache_size_mb': 0,\n", + " 'dataloader_worker': 1,\n", + " 'dataset_order': None,\n", + " 'default_dataset_download_dir': './datasets',\n", + " 'eta': 3,\n", + " 'evaluate_on_train_data': True,\n", + " 'file_extensions': ['.png', '.jpg', '.JPEG', '.pgm'],\n", + " 'final_activation': 'softmax',\n", + " 'global_results_dir': None,\n", + " 'half_num_cv_splits_below_budget': 0,\n", + " 'hyperparameter_search_space_updates': None,\n", + " 'images_root_folders': ['./datasets'],\n", + " 'images_shape': [3, 32, 32],\n", + " 'increase_number_of_trained_datasets': False,\n", + " 'keep_only_incumbent_checkpoints': True,\n", + " 'log_level': 'info',\n", + " 'loss_modules': ['cross_entropy', 'cross_entropy_weighted'],\n", + " 'lr_scheduler': ['cosine_annealing',\n", + " 'cyclic',\n", + " 'step',\n", + " 'adapt',\n", + " 'plateau',\n", + " 'alternating_cosine',\n", + " 'exponential',\n", + " 'none'],\n", + " 'max_budget': 900,\n", + " 'max_class_size': None,\n", + " 'max_runtime': 1800,\n", + " 'memory_limit_mb': 1000000,\n", + " 'min_budget': 600,\n", + " 'min_budget_for_cv': 0,\n", + " 'min_workers': 1,\n", + " 'minimize': False,\n", + " 'network_interface_name': 'eth0',\n", + " 'networks': ['densenet',\n", + " 'densenet_flexible',\n", + " 'resnet',\n", + " 'resnet152',\n", + " 'darts',\n", + " 'mobilenet'],\n", + " 'num_iterations': inf,\n", + " 'optimize_metric': 'accuracy',\n", + " 'optimizer': ['adam', 'adamw', 'sgd', 'rmsprop'],\n", + " 'random_seed': 2214195982,\n", + " 'result_logger_dir': 'logs/',\n", + " 'run_id': '0',\n", + " 'save_checkpoints': False,\n", + " 'shuffle': True,\n", + " 'task_id': -1,\n", + " 'tensorboard_images_count': 0,\n", + " 'tensorboard_min_log_interval': 30,\n", + " 'use_stratified_cv_split': True,\n", + " 'use_tensorboard_logger': False,\n", + " 'validation_split': 0.0,\n", + " 'working_dir': '.'}\n", + "[INFO] [11:32:27:autonet] [AutoNet] Start bohb\n", + "[INFO] [11:32:27:hpbandster.run_0.worker.mlgpu14.4500.-1] WORKER: start listening for jobs\n", + "[INFO] [11:32:27:hpbandster] DISPATCHER: started the 'discover_worker' thread\n", + "[INFO] [11:32:27:hpbandster] DISPATCHER: started the 'job_runner' thread\n", + "[INFO] [11:32:27:hpbandster] DISPATCHER: Pyro daemon running on 10.5.166.105:43149\n", + "[INFO] [11:32:27:hpbandster] DISPATCHER: discovered new worker, hpbandster.run_0.worker.mlgpu14.4500.-1139990687835968\n", + "[INFO] [11:32:27:hpbandster] HBMASTER: adjusted queue size to (0, 1)\n", + "[INFO] [11:32:27:hpbandster] HBMASTER: starting run at 1570613547.6513581\n", + "[INFO] [11:32:27:hpbandster.run_0.worker.mlgpu14.4500.-1] WORKER: start processing job (0, 0, 0)\n", + "[INFO] [11:32:27:autonet] Fit optimization pipeline\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0 50000\n", + "Files already downloaded and verified\n", + "Files already downloaded and verified\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[INFO] [11:47:45:autonet] Finished train with budget 900.0s, Training took 900s, Wrap up took 0s, Init took 0s, Train took 900s, Validation took 0s, Log functions took 0s, Cumulative time 900s.\n", + "Total time consumption in s: 900\n", + "[INFO] [11:47:46:autonet] Training ['resnet152'] with budget 900.0 resulted in score: -59.46610467158413 took 918.7896795272827 seconds\n", + "[INFO] [11:47:46:hpbandster.run_0.worker.mlgpu14.4500.-1] WORKER: registered result for job (0, 0, 0) with dispatcher\n", + "[INFO] [11:47:46:hpbandster] HBMASTER: Timelimit reached: wait for remaining 0 jobs\n", + "[INFO] [11:47:46:hpbandster] DISPATCHER: Dispatcher shutting down\n", + "[INFO] [11:47:46:hpbandster] DISPATCHER: shut down complete\n", + "[INFO] [11:47:47:autonet] Start autonet with config:\n", + "{'additional_logs': [],\n", + " 'additional_metrics': [],\n", + " 'algorithm': 'bohb',\n", + " 'batch_loss_computation_techniques': ['standard', 'mixup'],\n", + " 'budget_type': 'time',\n", + " 'cuda': True,\n", + " 'cv_splits': 1,\n", + " 'dataloader_cache_size_mb': 0,\n", + " 'dataloader_worker': 1,\n", + " 'dataset_order': None,\n", + " 'default_dataset_download_dir': './datasets',\n", + " 'eta': 3,\n", + " 'evaluate_on_train_data': True,\n", + " 'file_extensions': ['.png', '.jpg', '.JPEG', '.pgm'],\n", + " 'final_activation': 'softmax',\n", + " 'global_results_dir': None,\n", + " 'half_num_cv_splits_below_budget': 0,\n", + " 'hyperparameter_search_space_updates': None,\n", + " 'images_root_folders': ['./datasets'],\n", + " 'images_shape': [3, 32, 32],\n", + " 'increase_number_of_trained_datasets': False,\n", + " 'keep_only_incumbent_checkpoints': True,\n", + " 'log_level': 'info',\n", + " 'loss_modules': ['cross_entropy', 'cross_entropy_weighted'],\n", + " 'lr_scheduler': ['cosine_annealing',\n", + " 'cyclic',\n", + " 'step',\n", + " 'adapt',\n", + " 'plateau',\n", + " 'alternating_cosine',\n", + " 'exponential',\n", + " 'none'],\n", + " 'max_budget': 900,\n", + " 'max_class_size': None,\n", + " 'max_runtime': 1800,\n", + " 'memory_limit_mb': 1000000,\n", + " 'min_budget': 600,\n", + " 'min_budget_for_cv': 0,\n", + " 'min_workers': 1,\n", + " 'minimize': False,\n", + " 'network_interface_name': 'eth0',\n", + " 'networks': ['densenet',\n", + " 'densenet_flexible',\n", + " 'resnet',\n", + " 'resnet152',\n", + " 'darts',\n", + " 'mobilenet'],\n", + " 'num_iterations': inf,\n", + " 'optimize_metric': 'accuracy',\n", + " 'optimizer': ['adam', 'adamw', 'sgd', 'rmsprop'],\n", + " 'random_seed': 2214195982,\n", + " 'result_logger_dir': 'logs/',\n", + " 'run_id': '0',\n", + " 'save_checkpoints': False,\n", + " 'shuffle': True,\n", + " 'task_id': -1,\n", + " 'tensorboard_images_count': 0,\n", + " 'tensorboard_min_log_interval': 30,\n", + " 'use_stratified_cv_split': True,\n", + " 'use_tensorboard_logger': False,\n", + " 'validation_split': 0.0,\n", + " 'working_dir': '.'}\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0 50000\n", + "Files already downloaded and verified\n", + "Files already downloaded and verified\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[INFO] [12:03:05:autonet] Finished train with budget 900.0s, Training took 900s, Wrap up took 0s, Init took 0s, Train took 900s, Validation took 0s, Log functions took 0s, Cumulative time 900s.\n", + "Total time consumption in s: 900\n" + ] + }, + { + "data": { + "text/plain": [ + "{'loss': -59.46610467158413,\n", + " 'optimized_hyperparameter_config': {'NetworkSelectorDatasetInfo:darts:auxiliary': False,\n", + " 'NetworkSelectorDatasetInfo:darts:drop_path_prob': 0.1,\n", + " 'NetworkSelectorDatasetInfo:darts:init_channels': 36,\n", + " 'NetworkSelectorDatasetInfo:darts:layers': 20,\n", + " 'CreateImageDataLoader:batch_size': 73,\n", + " 'ImageAugmentation:augment': False,\n", + " 'ImageAugmentation:cutout': False,\n", + " 'LossModuleSelectorIndices:loss_module': 'cross_entropy_weighted',\n", + " 'NetworkSelectorDatasetInfo:network': 'resnet152',\n", + " 'OptimizerSelector:optimizer': 'sgd',\n", + " 'SimpleLearningrateSchedulerSelector:lr_scheduler': 'adapt',\n", + " 'SimpleTrainNode:batch_loss_computation_technique': 'standard',\n", + " 'OptimizerSelector:sgd:learning_rate': 0.005042807661492666,\n", + " 'OptimizerSelector:sgd:momentum': 0.1748329156598709,\n", + " 'OptimizerSelector:sgd:weight_decay': 0.07558471538402955,\n", + " 'SimpleLearningrateSchedulerSelector:adapt:T_max': 563,\n", + " 'SimpleLearningrateSchedulerSelector:adapt:T_mult': 1.879929125554272,\n", + " 'SimpleLearningrateSchedulerSelector:adapt:patience': 2,\n", + " 'SimpleLearningrateSchedulerSelector:adapt:threshold': 0.43953248007742884},\n", + " 'budget': 900.0,\n", + " 'info': {}}" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "path_to_cifar_csv = os.path.abspath(\"../../datasets/CIFAR10.csv\")\n", "\n", "autonet_image_classification.fit(X_train=np.array([path_to_cifar_csv]),\n", " Y_train=np.array([0]),\n", - " min_budget=900,\n", - " max_budget=1200,\n", - " max_runtime=3000,\n", + " min_budget=600,\n", + " max_budget=900,\n", + " max_runtime=1800,\n", " default_dataset_download_dir=\"./datasets\",\n", - " images_root_folders=[\"./datasets\"])" + " images_root_folders=[\"./datasets\"],\n", + " log_level=\"info\")" ] }, { @@ -766,7 +949,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.3" + "version": "3.6.8" } }, "nbformat": 4, From 3d9ff6e5a9470c434f5c13d6300bbdffbdefe7c6 Mon Sep 17 00:00:00 2001 From: Lucas Zimmer Date: Wed, 9 Oct 2019 12:52:30 +0200 Subject: [PATCH 09/13] Added help for Configoptions --- examples/basics/Auto-PyTorch Tutorial.ipynb | 377 +++++++++----------- 1 file changed, 175 insertions(+), 202 deletions(-) diff --git a/examples/basics/Auto-PyTorch Tutorial.ipynb b/examples/basics/Auto-PyTorch Tutorial.ipynb index fdf760344..686eff1e1 100644 --- a/examples/basics/Auto-PyTorch Tutorial.ipynb +++ b/examples/basics/Auto-PyTorch Tutorial.ipynb @@ -173,7 +173,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 10, "metadata": {}, "outputs": [ { @@ -194,6 +194,17 @@ "The Box-Cox transformation can only be applied to strictly positive data\n", "Using yeo-johnson instead\n" ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[INFO] [12:47:04:autonet] Finished train with budget 50.0: Preprocessing took 0s, Training took 33s, Wrap up took 0s. Total time consumption in s: 33\n", + "[INFO] [12:47:04:autonet] [AutoNet] Done with current split!\n", + "[INFO] [12:47:04:autonet] Aggregate the results across the splits\n", + "[INFO] [12:47:04:autonet] Process 1 additional result(s)\n", + "[INFO] [12:47:04:autonet] Done Refitting\n" + ] } ], "source": [ @@ -233,7 +244,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 11, "metadata": {}, "outputs": [ { @@ -243,14 +254,14 @@ "Model prediction: [[0.]\n", " [0.]\n", " [0.]\n", + " [1.]\n", " [0.]\n", " [0.]\n", - " [0.]\n", - " [0.]\n", - " [0.]\n", + " [1.]\n", + " [1.]\n", " [0.]\n", " [0.]]\n", - "Accuracy score 67.0\n" + "Accuracy score 80.0\n" ] } ], @@ -272,7 +283,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 12, "metadata": { "scrolled": true }, @@ -282,278 +293,240 @@ "output_type": "stream", "text": [ "Sequential(\n", - " (0): Linear(in_features=20, out_features=54, bias=True)\n", + " (0): Linear(in_features=20, out_features=276, bias=True)\n", " (1): Sequential(\n", " (0): ResBlock(\n", - " (shortcut): Linear(in_features=54, out_features=44, bias=True)\n", - " (start_norm): Sequential(\n", - " (0): BatchNorm1d(54, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): ReLU()\n", - " )\n", " (layers): Sequential(\n", - " (0): Linear(in_features=54, out_features=44, bias=True)\n", - " (1): BatchNorm1d(44, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): ReLU()\n", - " (3): Linear(in_features=44, out_features=44, bias=True)\n", + " (0): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): Sigmoid()\n", + " (2): Linear(in_features=276, out_features=276, bias=True)\n", + " (3): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): Sigmoid()\n", + " (5): Dropout(p=0.36734979590227845, inplace=False)\n", + " (6): Linear(in_features=276, out_features=276, bias=True)\n", " )\n", " )\n", " (1): ResBlock(\n", " (layers): Sequential(\n", - " (0): BatchNorm1d(44, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): ReLU()\n", - " (2): Linear(in_features=44, out_features=44, bias=True)\n", - " (3): BatchNorm1d(44, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): ReLU()\n", - " (5): Linear(in_features=44, out_features=44, bias=True)\n", + " (0): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): Sigmoid()\n", + " (2): Linear(in_features=276, out_features=276, bias=True)\n", + " (3): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): Sigmoid()\n", + " (5): Dropout(p=0.36734979590227845, inplace=False)\n", + " (6): Linear(in_features=276, out_features=276, bias=True)\n", " )\n", " )\n", " (2): ResBlock(\n", " (layers): Sequential(\n", - " (0): BatchNorm1d(44, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): ReLU()\n", - " (2): Linear(in_features=44, out_features=44, bias=True)\n", - " (3): BatchNorm1d(44, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): ReLU()\n", - " (5): Linear(in_features=44, out_features=44, bias=True)\n", + " (0): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): Sigmoid()\n", + " (2): Linear(in_features=276, out_features=276, bias=True)\n", + " (3): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): Sigmoid()\n", + " (5): Dropout(p=0.36734979590227845, inplace=False)\n", + " (6): Linear(in_features=276, out_features=276, bias=True)\n", + " )\n", + " )\n", + " (3): ResBlock(\n", + " (layers): Sequential(\n", + " (0): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): Sigmoid()\n", + " (2): Linear(in_features=276, out_features=276, bias=True)\n", + " (3): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): Sigmoid()\n", + " (5): Dropout(p=0.36734979590227845, inplace=False)\n", + " (6): Linear(in_features=276, out_features=276, bias=True)\n", " )\n", " )\n", " )\n", " (2): Sequential(\n", " (0): ResBlock(\n", " (layers): Sequential(\n", - " (0): BatchNorm1d(44, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): ReLU()\n", - " (2): Linear(in_features=44, out_features=44, bias=True)\n", - " (3): BatchNorm1d(44, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): ReLU()\n", - " (5): Linear(in_features=44, out_features=44, bias=True)\n", + " (0): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): Sigmoid()\n", + " (2): Linear(in_features=276, out_features=276, bias=True)\n", + " (3): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): Sigmoid()\n", + " (5): Dropout(p=0.7346995918045569, inplace=False)\n", + " (6): Linear(in_features=276, out_features=276, bias=True)\n", " )\n", " )\n", " (1): ResBlock(\n", " (layers): Sequential(\n", - " (0): BatchNorm1d(44, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): ReLU()\n", - " (2): Linear(in_features=44, out_features=44, bias=True)\n", - " (3): BatchNorm1d(44, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): ReLU()\n", - " (5): Linear(in_features=44, out_features=44, bias=True)\n", + " (0): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): Sigmoid()\n", + " (2): Linear(in_features=276, out_features=276, bias=True)\n", + " (3): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): Sigmoid()\n", + " (5): Dropout(p=0.7346995918045569, inplace=False)\n", + " (6): Linear(in_features=276, out_features=276, bias=True)\n", " )\n", " )\n", " (2): ResBlock(\n", " (layers): Sequential(\n", - " (0): BatchNorm1d(44, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): ReLU()\n", - " (2): Linear(in_features=44, out_features=44, bias=True)\n", - " (3): BatchNorm1d(44, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): ReLU()\n", - " (5): Linear(in_features=44, out_features=44, bias=True)\n", + " (0): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): Sigmoid()\n", + " (2): Linear(in_features=276, out_features=276, bias=True)\n", + " (3): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): Sigmoid()\n", + " (5): Dropout(p=0.7346995918045569, inplace=False)\n", + " (6): Linear(in_features=276, out_features=276, bias=True)\n", + " )\n", + " )\n", + " (3): ResBlock(\n", + " (layers): Sequential(\n", + " (0): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): Sigmoid()\n", + " (2): Linear(in_features=276, out_features=276, bias=True)\n", + " (3): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): Sigmoid()\n", + " (5): Dropout(p=0.7346995918045569, inplace=False)\n", + " (6): Linear(in_features=276, out_features=276, bias=True)\n", " )\n", " )\n", " )\n", " (3): Sequential(\n", " (0): ResBlock(\n", - " (shortcut): Linear(in_features=44, out_features=34, bias=True)\n", - " (start_norm): Sequential(\n", - " (0): BatchNorm1d(44, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): ReLU()\n", - " )\n", " (layers): Sequential(\n", - " (0): Linear(in_features=44, out_features=34, bias=True)\n", - " (1): BatchNorm1d(34, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): ReLU()\n", - " (3): Linear(in_features=34, out_features=34, bias=True)\n", + " (0): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): Sigmoid()\n", + " (2): Linear(in_features=276, out_features=276, bias=True)\n", + " (3): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): Sigmoid()\n", + " (5): Dropout(p=0.4900446277336395, inplace=False)\n", + " (6): Linear(in_features=276, out_features=276, bias=True)\n", " )\n", " )\n", " (1): ResBlock(\n", " (layers): Sequential(\n", - " (0): BatchNorm1d(34, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): ReLU()\n", - " (2): Linear(in_features=34, out_features=34, bias=True)\n", - " (3): BatchNorm1d(34, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): ReLU()\n", - " (5): Linear(in_features=34, out_features=34, bias=True)\n", + " (0): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): Sigmoid()\n", + " (2): Linear(in_features=276, out_features=276, bias=True)\n", + " (3): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): Sigmoid()\n", + " (5): Dropout(p=0.4900446277336395, inplace=False)\n", + " (6): Linear(in_features=276, out_features=276, bias=True)\n", " )\n", " )\n", " (2): ResBlock(\n", " (layers): Sequential(\n", - " (0): BatchNorm1d(34, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): ReLU()\n", - " (2): Linear(in_features=34, out_features=34, bias=True)\n", - " (3): BatchNorm1d(34, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): ReLU()\n", - " (5): Linear(in_features=34, out_features=34, bias=True)\n", - " )\n", - " )\n", - " )\n", - " (4): Sequential(\n", - " (0): ResBlock(\n", - " (layers): Sequential(\n", - " (0): BatchNorm1d(34, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): ReLU()\n", - " (2): Linear(in_features=34, out_features=34, bias=True)\n", - " (3): BatchNorm1d(34, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): ReLU()\n", - " (5): Linear(in_features=34, out_features=34, bias=True)\n", - " )\n", - " )\n", - " (1): ResBlock(\n", - " (layers): Sequential(\n", - " (0): BatchNorm1d(34, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): ReLU()\n", - " (2): Linear(in_features=34, out_features=34, bias=True)\n", - " (3): BatchNorm1d(34, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): ReLU()\n", - " (5): Linear(in_features=34, out_features=34, bias=True)\n", + " (0): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): Sigmoid()\n", + " (2): Linear(in_features=276, out_features=276, bias=True)\n", + " (3): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): Sigmoid()\n", + " (5): Dropout(p=0.4900446277336395, inplace=False)\n", + " (6): Linear(in_features=276, out_features=276, bias=True)\n", " )\n", " )\n", - " (2): ResBlock(\n", + " (3): ResBlock(\n", " (layers): Sequential(\n", - " (0): BatchNorm1d(34, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): ReLU()\n", - " (2): Linear(in_features=34, out_features=34, bias=True)\n", - " (3): BatchNorm1d(34, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): ReLU()\n", - " (5): Linear(in_features=34, out_features=34, bias=True)\n", + " (0): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): Sigmoid()\n", + " (2): Linear(in_features=276, out_features=276, bias=True)\n", + " (3): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): Sigmoid()\n", + " (5): Dropout(p=0.4900446277336395, inplace=False)\n", + " (6): Linear(in_features=276, out_features=276, bias=True)\n", " )\n", " )\n", " )\n", - " (5): Sequential(\n", + " (4): Sequential(\n", " (0): ResBlock(\n", - " (shortcut): Linear(in_features=34, out_features=24, bias=True)\n", - " (start_norm): Sequential(\n", - " (0): BatchNorm1d(34, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): ReLU()\n", - " )\n", " (layers): Sequential(\n", - " (0): Linear(in_features=34, out_features=24, bias=True)\n", - " (1): BatchNorm1d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): ReLU()\n", - " (3): Linear(in_features=24, out_features=24, bias=True)\n", + " (0): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): Sigmoid()\n", + " (2): Linear(in_features=276, out_features=276, bias=True)\n", + " (3): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): Sigmoid()\n", + " (5): Dropout(p=0.24538966366272202, inplace=False)\n", + " (6): Linear(in_features=276, out_features=276, bias=True)\n", " )\n", " )\n", " (1): ResBlock(\n", " (layers): Sequential(\n", - " (0): BatchNorm1d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): ReLU()\n", - " (2): Linear(in_features=24, out_features=24, bias=True)\n", - " (3): BatchNorm1d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): ReLU()\n", - " (5): Linear(in_features=24, out_features=24, bias=True)\n", + " (0): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): Sigmoid()\n", + " (2): Linear(in_features=276, out_features=276, bias=True)\n", + " (3): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): Sigmoid()\n", + " (5): Dropout(p=0.24538966366272202, inplace=False)\n", + " (6): Linear(in_features=276, out_features=276, bias=True)\n", " )\n", " )\n", " (2): ResBlock(\n", " (layers): Sequential(\n", - " (0): BatchNorm1d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): ReLU()\n", - " (2): Linear(in_features=24, out_features=24, bias=True)\n", - " (3): BatchNorm1d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): ReLU()\n", - " (5): Linear(in_features=24, out_features=24, bias=True)\n", - " )\n", - " )\n", - " )\n", - " (6): Sequential(\n", - " (0): ResBlock(\n", - " (layers): Sequential(\n", - " (0): BatchNorm1d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): ReLU()\n", - " (2): Linear(in_features=24, out_features=24, bias=True)\n", - " (3): BatchNorm1d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): ReLU()\n", - " (5): Linear(in_features=24, out_features=24, bias=True)\n", - " )\n", - " )\n", - " (1): ResBlock(\n", - " (layers): Sequential(\n", - " (0): BatchNorm1d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): ReLU()\n", - " (2): Linear(in_features=24, out_features=24, bias=True)\n", - " (3): BatchNorm1d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): ReLU()\n", - " (5): Linear(in_features=24, out_features=24, bias=True)\n", + " (0): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): Sigmoid()\n", + " (2): Linear(in_features=276, out_features=276, bias=True)\n", + " (3): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): Sigmoid()\n", + " (5): Dropout(p=0.24538966366272202, inplace=False)\n", + " (6): Linear(in_features=276, out_features=276, bias=True)\n", " )\n", " )\n", - " (2): ResBlock(\n", + " (3): ResBlock(\n", " (layers): Sequential(\n", - " (0): BatchNorm1d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): ReLU()\n", - " (2): Linear(in_features=24, out_features=24, bias=True)\n", - " (3): BatchNorm1d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): ReLU()\n", - " (5): Linear(in_features=24, out_features=24, bias=True)\n", + " (0): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): Sigmoid()\n", + " (2): Linear(in_features=276, out_features=276, bias=True)\n", + " (3): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): Sigmoid()\n", + " (5): Dropout(p=0.24538966366272202, inplace=False)\n", + " (6): Linear(in_features=276, out_features=276, bias=True)\n", " )\n", " )\n", " )\n", - " (7): Sequential(\n", + " (5): Sequential(\n", " (0): ResBlock(\n", - " (shortcut): Linear(in_features=24, out_features=14, bias=True)\n", - " (start_norm): Sequential(\n", - " (0): BatchNorm1d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): ReLU()\n", - " )\n", " (layers): Sequential(\n", - " (0): Linear(in_features=24, out_features=14, bias=True)\n", - " (1): BatchNorm1d(14, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (2): ReLU()\n", - " (3): Linear(in_features=14, out_features=14, bias=True)\n", + " (0): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): Sigmoid()\n", + " (2): Linear(in_features=276, out_features=276, bias=True)\n", + " (3): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): Sigmoid()\n", + " (5): Dropout(p=0.0, inplace=False)\n", + " (6): Linear(in_features=276, out_features=276, bias=True)\n", " )\n", " )\n", " (1): ResBlock(\n", " (layers): Sequential(\n", - " (0): BatchNorm1d(14, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): ReLU()\n", - " (2): Linear(in_features=14, out_features=14, bias=True)\n", - " (3): BatchNorm1d(14, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): ReLU()\n", - " (5): Linear(in_features=14, out_features=14, bias=True)\n", + " (0): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): Sigmoid()\n", + " (2): Linear(in_features=276, out_features=276, bias=True)\n", + " (3): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): Sigmoid()\n", + " (5): Dropout(p=0.0, inplace=False)\n", + " (6): Linear(in_features=276, out_features=276, bias=True)\n", " )\n", " )\n", " (2): ResBlock(\n", " (layers): Sequential(\n", - " (0): BatchNorm1d(14, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): ReLU()\n", - " (2): Linear(in_features=14, out_features=14, bias=True)\n", - " (3): BatchNorm1d(14, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): ReLU()\n", - " (5): Linear(in_features=14, out_features=14, bias=True)\n", - " )\n", - " )\n", - " )\n", - " (8): Sequential(\n", - " (0): ResBlock(\n", - " (layers): Sequential(\n", - " (0): BatchNorm1d(14, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): ReLU()\n", - " (2): Linear(in_features=14, out_features=14, bias=True)\n", - " (3): BatchNorm1d(14, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): ReLU()\n", - " (5): Linear(in_features=14, out_features=14, bias=True)\n", + " (0): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): Sigmoid()\n", + " (2): Linear(in_features=276, out_features=276, bias=True)\n", + " (3): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): Sigmoid()\n", + " (5): Dropout(p=0.0, inplace=False)\n", + " (6): Linear(in_features=276, out_features=276, bias=True)\n", " )\n", " )\n", - " (1): ResBlock(\n", - " (layers): Sequential(\n", - " (0): BatchNorm1d(14, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): ReLU()\n", - " (2): Linear(in_features=14, out_features=14, bias=True)\n", - " (3): BatchNorm1d(14, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): ReLU()\n", - " (5): Linear(in_features=14, out_features=14, bias=True)\n", - " )\n", - " )\n", - " (2): ResBlock(\n", + " (3): ResBlock(\n", " (layers): Sequential(\n", - " (0): BatchNorm1d(14, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): ReLU()\n", - " (2): Linear(in_features=14, out_features=14, bias=True)\n", - " (3): BatchNorm1d(14, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): ReLU()\n", - " (5): Linear(in_features=14, out_features=14, bias=True)\n", + " (0): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): Sigmoid()\n", + " (2): Linear(in_features=276, out_features=276, bias=True)\n", + " (3): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): Sigmoid()\n", + " (5): Dropout(p=0.0, inplace=False)\n", + " (6): Linear(in_features=276, out_features=276, bias=True)\n", " )\n", " )\n", " )\n", - " (9): BatchNorm1d(14, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (10): ReLU()\n", - " (11): Linear(in_features=14, out_features=2, bias=True)\n", + " (6): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (7): Sigmoid()\n", + " (8): Linear(in_features=276, out_features=2, bias=True)\n", ")\n" ] } From 18ad0d220e52557217b4e9332b8ccbaaa7f5230c Mon Sep 17 00:00:00 2001 From: Lucas Zimmer Date: Wed, 9 Oct 2019 14:17:13 +0200 Subject: [PATCH 10/13] Notebook output added, small fixes --- examples/basics/Auto-PyTorch Tutorial.ipynb | 613 ++++++-------------- examples/basics/__init__.py | 0 examples/basics/ensemble.py | 4 +- 3 files changed, 177 insertions(+), 440 deletions(-) delete mode 100644 examples/basics/__init__.py diff --git a/examples/basics/Auto-PyTorch Tutorial.ipynb b/examples/basics/Auto-PyTorch Tutorial.ipynb index 686eff1e1..c0e6887eb 100644 --- a/examples/basics/Auto-PyTorch Tutorial.ipynb +++ b/examples/basics/Auto-PyTorch Tutorial.ipynb @@ -24,7 +24,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 1, "metadata": {}, "outputs": [], "source": [ @@ -37,7 +37,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ @@ -58,7 +58,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 3, "metadata": {}, "outputs": [], "source": [ @@ -74,7 +74,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 4, "metadata": { "scrolled": true }, @@ -103,7 +103,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 5, "metadata": { "scrolled": true }, @@ -132,20 +132,11 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 6, "metadata": { "scrolled": false }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "The Box-Cox transformation can only be applied to strictly positive data\n", - "Using yeo-johnson instead\n" - ] - } - ], + "outputs": [], "source": [ "autonet = AutoNetClassification(config_preset=\"tiny_cs\", result_logger_dir=\"logs/\")\n", "# Fit (note that the settings are for demonstration, you might need larger budgets)\n", @@ -173,37 +164,23 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 7, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - "[INFO] [12:46:30:autonet] Start autonet with config:\n", - "{'embeddings': ['none'], 'lr_scheduler': ['cosine_annealing', 'plateau'], 'networks': ['shapedresnet'], 'over_sampling_methods': ['smote'], 'preprocessors': ['none', 'truncated_svd', 'power_transformer'], 'target_size_strategies': ['none', 'upsample', 'median'], 'result_logger_dir': 'logs/', 'budget_type': 'epochs', 'log_level': 'info', 'use_tensorboard_logger': True, 'validation_split': 0.0, 'hyperparameter_search_space_updates': None, 'categorical_features': None, 'dataset_name': None, 'run_id': '0', 'task_id': -1, 'algorithm': 'bohb', 'eta': 3, 'min_workers': 1, 'working_dir': '.', 'network_interface_name': 'eth0', 'memory_limit_mb': 1000000, 'run_worker_on_master_node': True, 'use_pynisher': True, 'refit_validation_split': 0.0, 'cross_validator': 'none', 'cross_validator_args': {}, 'min_budget_for_cv': 0, 'shuffle': True, 'imputation_strategies': ['mean', 'median', 'most_frequent'], 'normalization_strategies': ['none', 'minmax', 'standardize', 'maxabs'], 'under_sampling_methods': ['none', 'random'], 'final_activation': 'softmax', 'initialization_methods': ['default', 'sparse'], 'initializer': 'simple_initializer', 'optimizer': ['adam', 'adamw', 'sgd', 'rmsprop'], 'additional_logs': [], 'optimize_metric': 'accuracy', 'additional_metrics': [], 'loss_modules': ['cross_entropy', 'cross_entropy_weighted'], 'batch_loss_computation_techniques': ['standard', 'mixup'], 'cuda': True, 'torch_num_threads': 1, 'full_eval_each_epoch': False, 'best_over_epochs': False, 'early_stopping_patience': inf, 'early_stopping_reset_parameters': False, 'random_seed': 930567008, 'min_budget': 5, 'max_budget': 150, 'max_runtime': inf, 'num_iterations': 4, 'cv_splits': 1, 'increase_number_of_trained_datasets': False}\n", - "[INFO] [12:46:30:autonet] Start Refitting\n", - "[INFO] [12:46:30:autonet] [AutoNet] No validation set given and either no cross validator given or budget too low for CV. Continue by splitting 0 of training data.\n", - "[INFO] [12:46:30:autonet] [AutoNet] CV split 0 of 1\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "The Box-Cox transformation can only be applied to strictly positive data\n", - "Using yeo-johnson instead\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[INFO] [12:47:04:autonet] Finished train with budget 50.0: Preprocessing took 0s, Training took 33s, Wrap up took 0s. Total time consumption in s: 33\n", - "[INFO] [12:47:04:autonet] [AutoNet] Done with current split!\n", - "[INFO] [12:47:04:autonet] Aggregate the results across the splits\n", - "[INFO] [12:47:04:autonet] Process 1 additional result(s)\n", - "[INFO] [12:47:04:autonet] Done Refitting\n" + "[INFO] [13:07:21:autonet] Start autonet with config:\n", + "{'embeddings': ['none'], 'lr_scheduler': ['cosine_annealing', 'plateau'], 'networks': ['shapedresnet'], 'over_sampling_methods': ['smote'], 'preprocessors': ['none', 'truncated_svd', 'power_transformer'], 'target_size_strategies': ['none', 'upsample', 'median'], 'result_logger_dir': 'logs/', 'budget_type': 'epochs', 'log_level': 'info', 'use_tensorboard_logger': True, 'validation_split': 0.0, 'hyperparameter_search_space_updates': None, 'categorical_features': None, 'dataset_name': None, 'run_id': '0', 'task_id': -1, 'algorithm': 'bohb', 'eta': 3, 'min_workers': 1, 'working_dir': '.', 'network_interface_name': 'eth0', 'memory_limit_mb': 1000000, 'run_worker_on_master_node': True, 'use_pynisher': True, 'refit_validation_split': 0.0, 'cross_validator': 'none', 'cross_validator_args': {}, 'min_budget_for_cv': 0, 'shuffle': True, 'imputation_strategies': ['mean', 'median', 'most_frequent'], 'normalization_strategies': ['none', 'minmax', 'standardize', 'maxabs'], 'under_sampling_methods': ['none', 'random'], 'final_activation': 'softmax', 'initialization_methods': ['default', 'sparse'], 'initializer': 'simple_initializer', 'optimizer': ['adam', 'adamw', 'sgd', 'rmsprop'], 'additional_logs': [], 'optimize_metric': 'accuracy', 'additional_metrics': [], 'loss_modules': ['cross_entropy', 'cross_entropy_weighted'], 'batch_loss_computation_techniques': ['standard', 'mixup'], 'cuda': True, 'torch_num_threads': 1, 'full_eval_each_epoch': False, 'best_over_epochs': False, 'early_stopping_patience': inf, 'early_stopping_reset_parameters': False, 'random_seed': 1103059814, 'min_budget': 5, 'max_budget': 150, 'max_runtime': inf, 'num_iterations': 4, 'cv_splits': 1, 'increase_number_of_trained_datasets': False}\n", + "[INFO] [13:07:21:autonet] Start Refitting\n", + "[INFO] [13:07:21:autonet] [AutoNet] No validation set given and either no cross validator given or budget too low for CV. Continue by splitting 0 of training data.\n", + "[INFO] [13:07:21:autonet] [AutoNet] CV split 0 of 1\n", + "[INFO] [13:07:25:autonet] Finished train with budget 50.0: Preprocessing took 0s, Training took 4s, Wrap up took 0s. Total time consumption in s: 4\n", + "[INFO] [13:07:25:autonet] [AutoNet] Done with current split!\n", + "[INFO] [13:07:25:autonet] Aggregate the results across the splits\n", + "[INFO] [13:07:25:autonet] Process 1 additional result(s)\n", + "[INFO] [13:07:25:autonet] Done Refitting\n" ] } ], @@ -244,7 +221,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 8, "metadata": {}, "outputs": [ { @@ -254,14 +231,14 @@ "Model prediction: [[0.]\n", " [0.]\n", " [0.]\n", - " [1.]\n", " [0.]\n", " [0.]\n", - " [1.]\n", - " [1.]\n", " [0.]\n", - " [0.]]\n", - "Accuracy score 80.0\n" + " [0.]\n", + " [0.]\n", + " [0.]\n", + " [1.]]\n", + "Accuracy score 74.0\n" ] } ], @@ -283,9 +260,9 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 9, "metadata": { - "scrolled": true + "scrolled": false }, "outputs": [ { @@ -293,240 +270,91 @@ "output_type": "stream", "text": [ "Sequential(\n", - " (0): Linear(in_features=20, out_features=276, bias=True)\n", + " (0): Linear(in_features=20, out_features=34, bias=True)\n", " (1): Sequential(\n", " (0): ResBlock(\n", - " (layers): Sequential(\n", - " (0): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): Sigmoid()\n", - " (2): Linear(in_features=276, out_features=276, bias=True)\n", - " (3): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): Sigmoid()\n", - " (5): Dropout(p=0.36734979590227845, inplace=False)\n", - " (6): Linear(in_features=276, out_features=276, bias=True)\n", + " (shortcut): Linear(in_features=34, out_features=48, bias=True)\n", + " (start_norm): Sequential(\n", + " (0): BatchNorm1d(34, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): ReLU()\n", " )\n", - " )\n", - " (1): ResBlock(\n", " (layers): Sequential(\n", - " (0): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): Sigmoid()\n", - " (2): Linear(in_features=276, out_features=276, bias=True)\n", - " (3): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): Sigmoid()\n", - " (5): Dropout(p=0.36734979590227845, inplace=False)\n", - " (6): Linear(in_features=276, out_features=276, bias=True)\n", + " (0): Linear(in_features=34, out_features=48, bias=True)\n", + " (1): BatchNorm1d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU()\n", + " (3): Dropout(p=0.4477955154159557, inplace=False)\n", + " (4): Linear(in_features=48, out_features=48, bias=True)\n", " )\n", " )\n", - " (2): ResBlock(\n", - " (layers): Sequential(\n", - " (0): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): Sigmoid()\n", - " (2): Linear(in_features=276, out_features=276, bias=True)\n", - " (3): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): Sigmoid()\n", - " (5): Dropout(p=0.36734979590227845, inplace=False)\n", - " (6): Linear(in_features=276, out_features=276, bias=True)\n", - " )\n", - " )\n", - " (3): ResBlock(\n", + " (1): ResBlock(\n", " (layers): Sequential(\n", - " (0): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): Sigmoid()\n", - " (2): Linear(in_features=276, out_features=276, bias=True)\n", - " (3): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): Sigmoid()\n", - " (5): Dropout(p=0.36734979590227845, inplace=False)\n", - " (6): Linear(in_features=276, out_features=276, bias=True)\n", + " (0): BatchNorm1d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): ReLU()\n", + " (2): Linear(in_features=48, out_features=48, bias=True)\n", + " (3): BatchNorm1d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): ReLU()\n", + " (5): Dropout(p=0.4477955154159557, inplace=False)\n", + " (6): Linear(in_features=48, out_features=48, bias=True)\n", " )\n", " )\n", " )\n", " (2): Sequential(\n", " (0): ResBlock(\n", - " (layers): Sequential(\n", - " (0): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): Sigmoid()\n", - " (2): Linear(in_features=276, out_features=276, bias=True)\n", - " (3): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): Sigmoid()\n", - " (5): Dropout(p=0.7346995918045569, inplace=False)\n", - " (6): Linear(in_features=276, out_features=276, bias=True)\n", - " )\n", - " )\n", - " (1): ResBlock(\n", - " (layers): Sequential(\n", - " (0): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): Sigmoid()\n", - " (2): Linear(in_features=276, out_features=276, bias=True)\n", - " (3): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): Sigmoid()\n", - " (5): Dropout(p=0.7346995918045569, inplace=False)\n", - " (6): Linear(in_features=276, out_features=276, bias=True)\n", + " (shortcut): Linear(in_features=48, out_features=62, bias=True)\n", + " (start_norm): Sequential(\n", + " (0): BatchNorm1d(48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): ReLU()\n", " )\n", - " )\n", - " (2): ResBlock(\n", " (layers): Sequential(\n", - " (0): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): Sigmoid()\n", - " (2): Linear(in_features=276, out_features=276, bias=True)\n", - " (3): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): Sigmoid()\n", - " (5): Dropout(p=0.7346995918045569, inplace=False)\n", - " (6): Linear(in_features=276, out_features=276, bias=True)\n", + " (0): Linear(in_features=48, out_features=62, bias=True)\n", + " (1): BatchNorm1d(62, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU()\n", + " (3): Dropout(p=0.22389775770797785, inplace=False)\n", + " (4): Linear(in_features=62, out_features=62, bias=True)\n", " )\n", " )\n", - " (3): ResBlock(\n", + " (1): ResBlock(\n", " (layers): Sequential(\n", - " (0): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): Sigmoid()\n", - " (2): Linear(in_features=276, out_features=276, bias=True)\n", - " (3): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): Sigmoid()\n", - " (5): Dropout(p=0.7346995918045569, inplace=False)\n", - " (6): Linear(in_features=276, out_features=276, bias=True)\n", + " (0): BatchNorm1d(62, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): ReLU()\n", + " (2): Linear(in_features=62, out_features=62, bias=True)\n", + " (3): BatchNorm1d(62, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): ReLU()\n", + " (5): Dropout(p=0.22389775770797785, inplace=False)\n", + " (6): Linear(in_features=62, out_features=62, bias=True)\n", " )\n", " )\n", " )\n", " (3): Sequential(\n", " (0): ResBlock(\n", - " (layers): Sequential(\n", - " (0): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): Sigmoid()\n", - " (2): Linear(in_features=276, out_features=276, bias=True)\n", - " (3): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): Sigmoid()\n", - " (5): Dropout(p=0.4900446277336395, inplace=False)\n", - " (6): Linear(in_features=276, out_features=276, bias=True)\n", + " (shortcut): Linear(in_features=62, out_features=79, bias=True)\n", + " (start_norm): Sequential(\n", + " (0): BatchNorm1d(62, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): ReLU()\n", " )\n", - " )\n", - " (1): ResBlock(\n", " (layers): Sequential(\n", - " (0): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): Sigmoid()\n", - " (2): Linear(in_features=276, out_features=276, bias=True)\n", - " (3): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): Sigmoid()\n", - " (5): Dropout(p=0.4900446277336395, inplace=False)\n", - " (6): Linear(in_features=276, out_features=276, bias=True)\n", - " )\n", - " )\n", - " (2): ResBlock(\n", - " (layers): Sequential(\n", - " (0): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): Sigmoid()\n", - " (2): Linear(in_features=276, out_features=276, bias=True)\n", - " (3): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): Sigmoid()\n", - " (5): Dropout(p=0.4900446277336395, inplace=False)\n", - " (6): Linear(in_features=276, out_features=276, bias=True)\n", - " )\n", - " )\n", - " (3): ResBlock(\n", - " (layers): Sequential(\n", - " (0): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): Sigmoid()\n", - " (2): Linear(in_features=276, out_features=276, bias=True)\n", - " (3): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): Sigmoid()\n", - " (5): Dropout(p=0.4900446277336395, inplace=False)\n", - " (6): Linear(in_features=276, out_features=276, bias=True)\n", - " )\n", - " )\n", - " )\n", - " (4): Sequential(\n", - " (0): ResBlock(\n", - " (layers): Sequential(\n", - " (0): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): Sigmoid()\n", - " (2): Linear(in_features=276, out_features=276, bias=True)\n", - " (3): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): Sigmoid()\n", - " (5): Dropout(p=0.24538966366272202, inplace=False)\n", - " (6): Linear(in_features=276, out_features=276, bias=True)\n", + " (0): Linear(in_features=62, out_features=79, bias=True)\n", + " (1): BatchNorm1d(79, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (2): ReLU()\n", + " (3): Dropout(p=0.0, inplace=False)\n", + " (4): Linear(in_features=79, out_features=79, bias=True)\n", " )\n", " )\n", " (1): ResBlock(\n", " (layers): Sequential(\n", - " (0): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): Sigmoid()\n", - " (2): Linear(in_features=276, out_features=276, bias=True)\n", - " (3): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): Sigmoid()\n", - " (5): Dropout(p=0.24538966366272202, inplace=False)\n", - " (6): Linear(in_features=276, out_features=276, bias=True)\n", - " )\n", - " )\n", - " (2): ResBlock(\n", - " (layers): Sequential(\n", - " (0): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): Sigmoid()\n", - " (2): Linear(in_features=276, out_features=276, bias=True)\n", - " (3): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): Sigmoid()\n", - " (5): Dropout(p=0.24538966366272202, inplace=False)\n", - " (6): Linear(in_features=276, out_features=276, bias=True)\n", - " )\n", - " )\n", - " (3): ResBlock(\n", - " (layers): Sequential(\n", - " (0): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): Sigmoid()\n", - " (2): Linear(in_features=276, out_features=276, bias=True)\n", - " (3): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): Sigmoid()\n", - " (5): Dropout(p=0.24538966366272202, inplace=False)\n", - " (6): Linear(in_features=276, out_features=276, bias=True)\n", - " )\n", - " )\n", - " )\n", - " (5): Sequential(\n", - " (0): ResBlock(\n", - " (layers): Sequential(\n", - " (0): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): Sigmoid()\n", - " (2): Linear(in_features=276, out_features=276, bias=True)\n", - " (3): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): Sigmoid()\n", - " (5): Dropout(p=0.0, inplace=False)\n", - " (6): Linear(in_features=276, out_features=276, bias=True)\n", - " )\n", - " )\n", - " (1): ResBlock(\n", - " (layers): Sequential(\n", - " (0): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): Sigmoid()\n", - " (2): Linear(in_features=276, out_features=276, bias=True)\n", - " (3): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): Sigmoid()\n", - " (5): Dropout(p=0.0, inplace=False)\n", - " (6): Linear(in_features=276, out_features=276, bias=True)\n", - " )\n", - " )\n", - " (2): ResBlock(\n", - " (layers): Sequential(\n", - " (0): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): Sigmoid()\n", - " (2): Linear(in_features=276, out_features=276, bias=True)\n", - " (3): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): Sigmoid()\n", - " (5): Dropout(p=0.0, inplace=False)\n", - " (6): Linear(in_features=276, out_features=276, bias=True)\n", - " )\n", - " )\n", - " (3): ResBlock(\n", - " (layers): Sequential(\n", - " (0): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (1): Sigmoid()\n", - " (2): Linear(in_features=276, out_features=276, bias=True)\n", - " (3): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (4): Sigmoid()\n", + " (0): BatchNorm1d(79, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (1): ReLU()\n", + " (2): Linear(in_features=79, out_features=79, bias=True)\n", + " (3): BatchNorm1d(79, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (4): ReLU()\n", " (5): Dropout(p=0.0, inplace=False)\n", - " (6): Linear(in_features=276, out_features=276, bias=True)\n", + " (6): Linear(in_features=79, out_features=79, bias=True)\n", " )\n", " )\n", " )\n", - " (6): BatchNorm1d(276, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", - " (7): Sigmoid()\n", - " (8): Linear(in_features=276, out_features=2, bias=True)\n", + " (4): BatchNorm1d(79, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (5): ReLU()\n", + " (6): Linear(in_features=79, out_features=2, bias=True)\n", ")\n" ] } @@ -556,7 +384,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 4, "metadata": {}, "outputs": [ { @@ -625,21 +453,62 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 6, "metadata": { - "scrolled": true + "scrolled": false }, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "{'loss': -84.48275862068965,\n", + " 'optimized_hyperparameter_config': {'NetworkSelectorDatasetInfo:darts:auxiliary': False,\n", + " 'NetworkSelectorDatasetInfo:darts:drop_path_prob': 0.1,\n", + " 'NetworkSelectorDatasetInfo:darts:init_channels': 36,\n", + " 'NetworkSelectorDatasetInfo:darts:layers': 20,\n", + " 'CreateImageDataLoader:batch_size': 58,\n", + " 'ImageAugmentation:augment': True,\n", + " 'ImageAugmentation:cutout': True,\n", + " 'LossModuleSelectorIndices:loss_module': 'cross_entropy',\n", + " 'NetworkSelectorDatasetInfo:network': 'densenet',\n", + " 'OptimizerSelector:optimizer': 'adam',\n", + " 'SimpleLearningrateSchedulerSelector:lr_scheduler': 'adapt',\n", + " 'SimpleTrainNode:batch_loss_computation_technique': 'standard',\n", + " 'ImageAugmentation:autoaugment': True,\n", + " 'ImageAugmentation:cutout_holes': 2,\n", + " 'ImageAugmentation:fastautoaugment': True,\n", + " 'ImageAugmentation:length': 17,\n", + " 'NetworkSelectorDatasetInfo:densenet:blocks': 4,\n", + " 'NetworkSelectorDatasetInfo:densenet:growth_rate': 28,\n", + " 'NetworkSelectorDatasetInfo:densenet:layer_in_block_1': 8,\n", + " 'NetworkSelectorDatasetInfo:densenet:layer_in_block_2': 16,\n", + " 'NetworkSelectorDatasetInfo:densenet:layer_in_block_3': 49,\n", + " 'NetworkSelectorDatasetInfo:densenet:use_dropout': False,\n", + " 'OptimizerSelector:adam:learning_rate': 0.00012377327234853046,\n", + " 'OptimizerSelector:adam:weight_decay': 0.06147134718475827,\n", + " 'SimpleLearningrateSchedulerSelector:adapt:T_max': 450,\n", + " 'SimpleLearningrateSchedulerSelector:adapt:T_mult': 1.189428111774201,\n", + " 'SimpleLearningrateSchedulerSelector:adapt:patience': 4,\n", + " 'SimpleLearningrateSchedulerSelector:adapt:threshold': 0.02209366315824298,\n", + " 'NetworkSelectorDatasetInfo:densenet:layer_in_block_4': 63},\n", + " 'budget': 400.0,\n", + " 'info': {}}" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "autonet_image_classification.fit(X_train=X_train,\n", " Y_train=Y_train,\n", " images_shape=[3,32,32],\n", - " min_budget=100,\n", - " max_budget=300,\n", + " min_budget=200,\n", + " max_budget=400,\n", " max_runtime=600,\n", " save_checkpoints=True,\n", - " images_root_folders=[os.path.abspath(\"../../datasets/example_images\")],\n", - " log_level=\"info\")" + " images_root_folders=[os.path.abspath(\"../../datasets/example_images\")])" ] }, { @@ -651,220 +520,90 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 5, "metadata": { - "scrolled": true + "scrolled": false }, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[INFO] [11:32:27:autonet] Start autonet with config:\n", - "{'additional_logs': [],\n", - " 'additional_metrics': [],\n", - " 'algorithm': 'bohb',\n", - " 'batch_loss_computation_techniques': ['standard', 'mixup'],\n", - " 'budget_type': 'time',\n", - " 'cuda': True,\n", - " 'cv_splits': 1,\n", - " 'dataloader_cache_size_mb': 0,\n", - " 'dataloader_worker': 1,\n", - " 'dataset_order': None,\n", - " 'default_dataset_download_dir': './datasets',\n", - " 'eta': 3,\n", - " 'evaluate_on_train_data': True,\n", - " 'file_extensions': ['.png', '.jpg', '.JPEG', '.pgm'],\n", - " 'final_activation': 'softmax',\n", - " 'global_results_dir': None,\n", - " 'half_num_cv_splits_below_budget': 0,\n", - " 'hyperparameter_search_space_updates': None,\n", - " 'images_root_folders': ['./datasets'],\n", - " 'images_shape': [3, 32, 32],\n", - " 'increase_number_of_trained_datasets': False,\n", - " 'keep_only_incumbent_checkpoints': True,\n", - " 'log_level': 'info',\n", - " 'loss_modules': ['cross_entropy', 'cross_entropy_weighted'],\n", - " 'lr_scheduler': ['cosine_annealing',\n", - " 'cyclic',\n", - " 'step',\n", - " 'adapt',\n", - " 'plateau',\n", - " 'alternating_cosine',\n", - " 'exponential',\n", - " 'none'],\n", - " 'max_budget': 900,\n", - " 'max_class_size': None,\n", - " 'max_runtime': 1800,\n", - " 'memory_limit_mb': 1000000,\n", - " 'min_budget': 600,\n", - " 'min_budget_for_cv': 0,\n", - " 'min_workers': 1,\n", - " 'minimize': False,\n", - " 'network_interface_name': 'eth0',\n", - " 'networks': ['densenet',\n", - " 'densenet_flexible',\n", - " 'resnet',\n", - " 'resnet152',\n", - " 'darts',\n", - " 'mobilenet'],\n", - " 'num_iterations': inf,\n", - " 'optimize_metric': 'accuracy',\n", - " 'optimizer': ['adam', 'adamw', 'sgd', 'rmsprop'],\n", - " 'random_seed': 2214195982,\n", - " 'result_logger_dir': 'logs/',\n", - " 'run_id': '0',\n", - " 'save_checkpoints': False,\n", - " 'shuffle': True,\n", - " 'task_id': -1,\n", - " 'tensorboard_images_count': 0,\n", - " 'tensorboard_min_log_interval': 30,\n", - " 'use_stratified_cv_split': True,\n", - " 'use_tensorboard_logger': False,\n", - " 'validation_split': 0.0,\n", - " 'working_dir': '.'}\n", - "[INFO] [11:32:27:autonet] [AutoNet] Start bohb\n", - "[INFO] [11:32:27:hpbandster.run_0.worker.mlgpu14.4500.-1] WORKER: start listening for jobs\n", - "[INFO] [11:32:27:hpbandster] DISPATCHER: started the 'discover_worker' thread\n", - "[INFO] [11:32:27:hpbandster] DISPATCHER: started the 'job_runner' thread\n", - "[INFO] [11:32:27:hpbandster] DISPATCHER: Pyro daemon running on 10.5.166.105:43149\n", - "[INFO] [11:32:27:hpbandster] DISPATCHER: discovered new worker, hpbandster.run_0.worker.mlgpu14.4500.-1139990687835968\n", - "[INFO] [11:32:27:hpbandster] HBMASTER: adjusted queue size to (0, 1)\n", - "[INFO] [11:32:27:hpbandster] HBMASTER: starting run at 1570613547.6513581\n", - "[INFO] [11:32:27:hpbandster.run_0.worker.mlgpu14.4500.-1] WORKER: start processing job (0, 0, 0)\n", - "[INFO] [11:32:27:autonet] Fit optimization pipeline\n" - ] - }, { "name": "stdout", "output_type": "stream", "text": [ "0 50000\n", - "Files already downloaded and verified\n", - "Files already downloaded and verified\n" + "Downloading https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz to ./datasets/cifar-10-python.tar.gz\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "[INFO] [11:47:45:autonet] Finished train with budget 900.0s, Training took 900s, Wrap up took 0s, Init took 0s, Train took 900s, Validation took 0s, Log functions took 0s, Cumulative time 900s.\n", - "Total time consumption in s: 900\n", - "[INFO] [11:47:46:autonet] Training ['resnet152'] with budget 900.0 resulted in score: -59.46610467158413 took 918.7896795272827 seconds\n", - "[INFO] [11:47:46:hpbandster.run_0.worker.mlgpu14.4500.-1] WORKER: registered result for job (0, 0, 0) with dispatcher\n", - "[INFO] [11:47:46:hpbandster] HBMASTER: Timelimit reached: wait for remaining 0 jobs\n", - "[INFO] [11:47:46:hpbandster] DISPATCHER: Dispatcher shutting down\n", - "[INFO] [11:47:46:hpbandster] DISPATCHER: shut down complete\n", - "[INFO] [11:47:47:autonet] Start autonet with config:\n", - "{'additional_logs': [],\n", - " 'additional_metrics': [],\n", - " 'algorithm': 'bohb',\n", - " 'batch_loss_computation_techniques': ['standard', 'mixup'],\n", - " 'budget_type': 'time',\n", - " 'cuda': True,\n", - " 'cv_splits': 1,\n", - " 'dataloader_cache_size_mb': 0,\n", - " 'dataloader_worker': 1,\n", - " 'dataset_order': None,\n", - " 'default_dataset_download_dir': './datasets',\n", - " 'eta': 3,\n", - " 'evaluate_on_train_data': True,\n", - " 'file_extensions': ['.png', '.jpg', '.JPEG', '.pgm'],\n", - " 'final_activation': 'softmax',\n", - " 'global_results_dir': None,\n", - " 'half_num_cv_splits_below_budget': 0,\n", - " 'hyperparameter_search_space_updates': None,\n", - " 'images_root_folders': ['./datasets'],\n", - " 'images_shape': [3, 32, 32],\n", - " 'increase_number_of_trained_datasets': False,\n", - " 'keep_only_incumbent_checkpoints': True,\n", - " 'log_level': 'info',\n", - " 'loss_modules': ['cross_entropy', 'cross_entropy_weighted'],\n", - " 'lr_scheduler': ['cosine_annealing',\n", - " 'cyclic',\n", - " 'step',\n", - " 'adapt',\n", - " 'plateau',\n", - " 'alternating_cosine',\n", - " 'exponential',\n", - " 'none'],\n", - " 'max_budget': 900,\n", - " 'max_class_size': None,\n", - " 'max_runtime': 1800,\n", - " 'memory_limit_mb': 1000000,\n", - " 'min_budget': 600,\n", - " 'min_budget_for_cv': 0,\n", - " 'min_workers': 1,\n", - " 'minimize': False,\n", - " 'network_interface_name': 'eth0',\n", - " 'networks': ['densenet',\n", - " 'densenet_flexible',\n", - " 'resnet',\n", - " 'resnet152',\n", - " 'darts',\n", - " 'mobilenet'],\n", - " 'num_iterations': inf,\n", - " 'optimize_metric': 'accuracy',\n", - " 'optimizer': ['adam', 'adamw', 'sgd', 'rmsprop'],\n", - " 'random_seed': 2214195982,\n", - " 'result_logger_dir': 'logs/',\n", - " 'run_id': '0',\n", - " 'save_checkpoints': False,\n", - " 'shuffle': True,\n", - " 'task_id': -1,\n", - " 'tensorboard_images_count': 0,\n", - " 'tensorboard_min_log_interval': 30,\n", - " 'use_stratified_cv_split': True,\n", - " 'use_tensorboard_logger': False,\n", - " 'validation_split': 0.0,\n", - " 'working_dir': '.'}\n" + "100.0%" ] }, { "name": "stdout", "output_type": "stream", "text": [ + "Extracting ./datasets/cifar-10-python.tar.gz to ./datasets\n", + "Files already downloaded and verified\n", "0 50000\n", "Files already downloaded and verified\n", "Files already downloaded and verified\n" ] }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[INFO] [12:03:05:autonet] Finished train with budget 900.0s, Training took 900s, Wrap up took 0s, Init took 0s, Train took 900s, Validation took 0s, Log functions took 0s, Cumulative time 900s.\n", - "Total time consumption in s: 900\n" - ] - }, { "data": { "text/plain": [ - "{'loss': -59.46610467158413,\n", + "{'loss': -31.03144184934964,\n", " 'optimized_hyperparameter_config': {'NetworkSelectorDatasetInfo:darts:auxiliary': False,\n", " 'NetworkSelectorDatasetInfo:darts:drop_path_prob': 0.1,\n", " 'NetworkSelectorDatasetInfo:darts:init_channels': 36,\n", " 'NetworkSelectorDatasetInfo:darts:layers': 20,\n", - " 'CreateImageDataLoader:batch_size': 73,\n", - " 'ImageAugmentation:augment': False,\n", + " 'CreateImageDataLoader:batch_size': 55,\n", + " 'ImageAugmentation:augment': True,\n", " 'ImageAugmentation:cutout': False,\n", - " 'LossModuleSelectorIndices:loss_module': 'cross_entropy_weighted',\n", - " 'NetworkSelectorDatasetInfo:network': 'resnet152',\n", + " 'LossModuleSelectorIndices:loss_module': 'cross_entropy',\n", + " 'NetworkSelectorDatasetInfo:network': 'darts',\n", " 'OptimizerSelector:optimizer': 'sgd',\n", " 'SimpleLearningrateSchedulerSelector:lr_scheduler': 'adapt',\n", - " 'SimpleTrainNode:batch_loss_computation_technique': 'standard',\n", - " 'OptimizerSelector:sgd:learning_rate': 0.005042807661492666,\n", - " 'OptimizerSelector:sgd:momentum': 0.1748329156598709,\n", - " 'OptimizerSelector:sgd:weight_decay': 0.07558471538402955,\n", - " 'SimpleLearningrateSchedulerSelector:adapt:T_max': 563,\n", - " 'SimpleLearningrateSchedulerSelector:adapt:T_mult': 1.879929125554272,\n", - " 'SimpleLearningrateSchedulerSelector:adapt:patience': 2,\n", - " 'SimpleLearningrateSchedulerSelector:adapt:threshold': 0.43953248007742884},\n", + " 'SimpleTrainNode:batch_loss_computation_technique': 'mixup',\n", + " 'ImageAugmentation:autoaugment': False,\n", + " 'ImageAugmentation:fastautoaugment': True,\n", + " 'NetworkSelectorDatasetInfo:darts:edge_normal_0': 'avg_pool_3x3',\n", + " 'NetworkSelectorDatasetInfo:darts:edge_normal_1': 'dil_conv_5x5',\n", + " 'NetworkSelectorDatasetInfo:darts:edge_reduce_0': 'avg_pool_3x3',\n", + " 'NetworkSelectorDatasetInfo:darts:edge_reduce_1': 'dil_conv_5x5',\n", + " 'NetworkSelectorDatasetInfo:darts:inputs_node_normal_3': '1_2',\n", + " 'NetworkSelectorDatasetInfo:darts:inputs_node_normal_4': '0_2',\n", + " 'NetworkSelectorDatasetInfo:darts:inputs_node_normal_5': '3_4',\n", + " 'NetworkSelectorDatasetInfo:darts:inputs_node_reduce_3': '0_1',\n", + " 'NetworkSelectorDatasetInfo:darts:inputs_node_reduce_4': '0_1',\n", + " 'NetworkSelectorDatasetInfo:darts:inputs_node_reduce_5': '0_1',\n", + " 'OptimizerSelector:sgd:learning_rate': 0.056126455704317305,\n", + " 'OptimizerSelector:sgd:momentum': 0.2554615131836397,\n", + " 'OptimizerSelector:sgd:weight_decay': 0.0064933176160527645,\n", + " 'SimpleLearningrateSchedulerSelector:adapt:T_max': 815,\n", + " 'SimpleLearningrateSchedulerSelector:adapt:T_mult': 1.2809915617006586,\n", + " 'SimpleLearningrateSchedulerSelector:adapt:patience': 3,\n", + " 'SimpleLearningrateSchedulerSelector:adapt:threshold': 0.36553138862173745,\n", + " 'SimpleTrainNode:mixup:alpha': 0.9499156113310157,\n", + " 'NetworkSelectorDatasetInfo:darts:edge_normal_12': 'sep_conv_3x3',\n", + " 'NetworkSelectorDatasetInfo:darts:edge_normal_13': 'sep_conv_3x3',\n", + " 'NetworkSelectorDatasetInfo:darts:edge_normal_3': 'sep_conv_5x5',\n", + " 'NetworkSelectorDatasetInfo:darts:edge_normal_4': 'skip_connect',\n", + " 'NetworkSelectorDatasetInfo:darts:edge_normal_5': 'max_pool_3x3',\n", + " 'NetworkSelectorDatasetInfo:darts:edge_normal_7': 'avg_pool_3x3',\n", + " 'NetworkSelectorDatasetInfo:darts:edge_reduce_10': 'sep_conv_5x5',\n", + " 'NetworkSelectorDatasetInfo:darts:edge_reduce_2': 'sep_conv_5x5',\n", + " 'NetworkSelectorDatasetInfo:darts:edge_reduce_3': 'max_pool_3x3',\n", + " 'NetworkSelectorDatasetInfo:darts:edge_reduce_5': 'avg_pool_3x3',\n", + " 'NetworkSelectorDatasetInfo:darts:edge_reduce_6': 'sep_conv_3x3',\n", + " 'NetworkSelectorDatasetInfo:darts:edge_reduce_9': 'skip_connect'},\n", " 'budget': 900.0,\n", " 'info': {}}" ] }, - "execution_count": 6, + "execution_count": 5, "metadata": {}, "output_type": "execute_result" } @@ -878,8 +617,7 @@ " max_budget=900,\n", " max_runtime=1800,\n", " default_dataset_download_dir=\"./datasets\",\n", - " images_root_folders=[\"./datasets\"],\n", - " log_level=\"info\")" + " images_root_folders=[\"./datasets\"])" ] }, { @@ -901,8 +639,7 @@ " max_budget=2000,\n", " max_runtime=4000,\n", " default_dataset_download_dir=\"./datasets\",\n", - " images_root_folders=[\"./datasets\", \"./datasets/example_images\"],\n", - " log_level=\"info\")" + " images_root_folders=[\"./datasets\", \"./datasets/example_images\"])" ] } ], diff --git a/examples/basics/__init__.py b/examples/basics/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/examples/basics/ensemble.py b/examples/basics/ensemble.py index 93d70f31f..d52ed054f 100644 --- a/examples/basics/ensemble.py +++ b/examples/basics/ensemble.py @@ -11,7 +11,7 @@ dm = DataManager() dm.generate_classification(num_classes=3, num_features=21, num_samples=1500) -# Note: every parameter has a default value, you do not have to specify anything. The given parameter allow a fast test. +# Note: every parameter has a default value, you do not have to specify anything. The given parameters allow for a fast test. autonet = AutoNetEnsemble(AutoNetClassification, budget_type='epochs', min_budget=1, max_budget=9, num_iterations=1, log_level='debug') @@ -19,4 +19,4 @@ ensemble_only_consider_n_best=3) print(res) -print("Score:", autonet.score(X_test=dm.X_train, Y_test=dm.Y_train)) \ No newline at end of file +print("Score:", autonet.score(X_test=dm.X_train, Y_test=dm.Y_train)) From a8bc0ebf7eea375d6cc1f483b2d197f992b20269 Mon Sep 17 00:00:00 2001 From: Lucas Zimmer Date: Wed, 9 Oct 2019 15:55:17 +0200 Subject: [PATCH 11/13] Added cross-entropy as optimization metric --- autoPyTorch/components/metrics/__init__.py | 3 +-- .../components/metrics/standard_metrics.py | 6 ++++++ .../autonet_feature_classification.py | 4 +++- .../autonet_image_classification.py | 13 ++++++++++-- .../bohb_multi_kde_ext.py | 20 ------------------- .../image_classification/medium_cs.txt | 2 +- .../medium_cs.txt | 2 +- .../optimization_algorithm_no_timelimit.py | 2 -- 8 files changed, 23 insertions(+), 29 deletions(-) delete mode 100644 autoPyTorch/core/hpbandster_extensions/bohb_multi_kde_ext.py diff --git a/autoPyTorch/components/metrics/__init__.py b/autoPyTorch/components/metrics/__init__.py index cba08f437..fc023c87a 100644 --- a/autoPyTorch/components/metrics/__init__.py +++ b/autoPyTorch/components/metrics/__init__.py @@ -1,4 +1,3 @@ from autoPyTorch.components.metrics.balanced_accuracy import balanced_accuracy from autoPyTorch.components.metrics.pac_score import pac_metric -from autoPyTorch.components.metrics.standard_metrics import accuracy, auc_metric, mean_distance, multilabel_accuracy -from autoPyTorch.components.metrics.standard_metrics import top1, top3, top5 +from autoPyTorch.components.metrics.standard_metrics import accuracy, auc_metric, mean_distance, multilabel_accuracy, cross_entropy, top1, top3, top5 diff --git a/autoPyTorch/components/metrics/standard_metrics.py b/autoPyTorch/components/metrics/standard_metrics.py index d9ed2b7a9..b87541151 100644 --- a/autoPyTorch/components/metrics/standard_metrics.py +++ b/autoPyTorch/components/metrics/standard_metrics.py @@ -8,6 +8,12 @@ def accuracy(y_true, y_pred): def auc_metric(y_true, y_pred): return (2 * metrics.roc_auc_score(y_true, y_pred) - 1) +def cross_entropy(y_true, y_pred): + if y_true==1: + return -np.log(y_pred) + else: + return -np.log(1-y_pred) + def top1(y_pred, y_true): return topN(y_pred, y_true, 1) diff --git a/autoPyTorch/core/autonet_classes/autonet_feature_classification.py b/autoPyTorch/core/autonet_classes/autonet_feature_classification.py index 175317fbe..a6183e1f9 100644 --- a/autoPyTorch/core/autonet_classes/autonet_feature_classification.py +++ b/autoPyTorch/core/autonet_classes/autonet_feature_classification.py @@ -19,7 +19,7 @@ def _apply_default_pipeline_settings(pipeline): import torch.nn as nn from sklearn.model_selection import StratifiedKFold - from autoPyTorch.components.metrics import accuracy, auc_metric, pac_metric, balanced_accuracy + from autoPyTorch.components.metrics import accuracy, auc_metric, pac_metric, balanced_accuracy, cross_entropy from autoPyTorch.components.preprocessing.loss_weight_strategies import LossWeightStrategyWeighted AutoNetFeatureData._apply_default_pipeline_settings(pipeline) @@ -41,6 +41,8 @@ def _apply_default_pipeline_settings(pipeline): requires_target_class_labels=False) metric_selector.add_metric('balanced_accuracy', balanced_accuracy, loss_transform=True, requires_target_class_labels=True) + metric_selector.add_metric('cross_entropy', cross_entropy, loss_transform=True, + requires_target_class_labels=False) resample_selector = pipeline[ResamplingStrategySelector.get_name()] resample_selector.add_over_sampling_method('random', RandomOverSamplingWithReplacement) diff --git a/autoPyTorch/core/autonet_classes/autonet_image_classification.py b/autoPyTorch/core/autonet_classes/autonet_image_classification.py index b9be0ec1a..d9173aba2 100644 --- a/autoPyTorch/core/autonet_classes/autonet_image_classification.py +++ b/autoPyTorch/core/autonet_classes/autonet_image_classification.py @@ -12,7 +12,7 @@ def _apply_default_pipeline_settings(pipeline): from autoPyTorch.pipeline.nodes.image.cross_validation_indices import CrossValidationIndices from autoPyTorch.pipeline.nodes.image.loss_module_selector_indices import LossModuleSelectorIndices from autoPyTorch.pipeline.nodes.image.network_selector_datasetinfo import NetworkSelectorDatasetInfo - from autoPyTorch.components.metrics.standard_metrics import accuracy + from autoPyTorch.components.metrics import accuracy, auc_metric, pac_metric, balanced_accuracy, cross_entropy from autoPyTorch.components.preprocessing.loss_weight_strategies import LossWeightStrategyWeighted AutoNetImageData._apply_default_pipeline_settings(pipeline) @@ -25,7 +25,16 @@ def _apply_default_pipeline_settings(pipeline): loss_selector.add_loss_module('cross_entropy_weighted', nn.CrossEntropyLoss, LossWeightStrategyWeighted(), True) metric_selector = pipeline[MetricSelector.get_name()] - metric_selector.add_metric('accuracy', accuracy) + metric_selector.add_metric('accuracy', accuracy, loss_transform=True, + requires_target_class_labels=False) + metric_selector.add_metric('auc_metric', auc_metric, loss_transform=True, + requires_target_class_labels=False) + metric_selector.add_metric('pac_metric', pac_metric, loss_transform=True, + requires_target_class_labels=False) + metric_selector.add_metric('balanced_accuracy', balanced_accuracy, loss_transform=True, + requires_target_class_labels=True) + metric_selector.add_metric('cross_entropy', cross_entropy, loss_transform=True, + requires_target_class_labels=False) train_node = pipeline[SimpleTrainNode.get_name()] train_node.default_minimize_value = False diff --git a/autoPyTorch/core/hpbandster_extensions/bohb_multi_kde_ext.py b/autoPyTorch/core/hpbandster_extensions/bohb_multi_kde_ext.py deleted file mode 100644 index bc26e1629..000000000 --- a/autoPyTorch/core/hpbandster_extensions/bohb_multi_kde_ext.py +++ /dev/null @@ -1,20 +0,0 @@ -try: - from hpbandster.optimizers.bohb_multi_kde import BOHB_Multi_KDE -except: - print("Could not find BOHB_Multi_KDE, replacing with object") - BOHB_Multi_KDE = object -from autoPyTorch.core.hpbandster_extensions.run_with_time import run_with_time - -class BOHBMultiKDEExt(BOHB_Multi_KDE): - def run_until(self, runtime=1, n_iterations=float("inf"), min_n_workers=1, iteration_kwargs = {},): - """ - Parameters: - ----------- - runtime: int - time for this run in seconds - n_iterations: - the number of hyperband iterations to run - min_n_workers: int - minimum number of workers before starting the run - """ - return run_with_time(self, runtime, n_iterations, min_n_workers, iteration_kwargs) diff --git a/autoPyTorch/core/presets/image_classification/medium_cs.txt b/autoPyTorch/core/presets/image_classification/medium_cs.txt index f5d2fda04..f9fb6096f 100644 --- a/autoPyTorch/core/presets/image_classification/medium_cs.txt +++ b/autoPyTorch/core/presets/image_classification/medium_cs.txt @@ -1,4 +1,4 @@ lr_scheduler=[cosine_annealing, step] networks=[resnet, mobilenet] -batch_loss_computation_techniques=[mixup] +batch_loss_computation_techniques=[standard, mixup] optimizer=[adamw, sgd] diff --git a/autoPyTorch/core/presets/image_classification_multiple_datasets/medium_cs.txt b/autoPyTorch/core/presets/image_classification_multiple_datasets/medium_cs.txt index f5d2fda04..f9fb6096f 100644 --- a/autoPyTorch/core/presets/image_classification_multiple_datasets/medium_cs.txt +++ b/autoPyTorch/core/presets/image_classification_multiple_datasets/medium_cs.txt @@ -1,4 +1,4 @@ lr_scheduler=[cosine_annealing, step] networks=[resnet, mobilenet] -batch_loss_computation_techniques=[mixup] +batch_loss_computation_techniques=[standard, mixup] optimizer=[adamw, sgd] diff --git a/autoPyTorch/pipeline/nodes/image/optimization_algorithm_no_timelimit.py b/autoPyTorch/pipeline/nodes/image/optimization_algorithm_no_timelimit.py index 7b7af6892..30af87980 100644 --- a/autoPyTorch/pipeline/nodes/image/optimization_algorithm_no_timelimit.py +++ b/autoPyTorch/pipeline/nodes/image/optimization_algorithm_no_timelimit.py @@ -22,7 +22,6 @@ from autoPyTorch.utils.config.config_option import ConfigOption, to_bool from autoPyTorch.core.hpbandster_extensions.bohb_ext import BOHBExt -from autoPyTorch.core.hpbandster_extensions.bohb_multi_kde_ext import BOHBMultiKDEExt from autoPyTorch.core.hpbandster_extensions.hyperband_ext import HyperBandExt from autoPyTorch.core.worker_no_timelimit import ModuleWorkerNoTimeLimit @@ -68,7 +67,6 @@ def __init__(self, optimization_pipeline_nodes): self.algorithms = dict() self.algorithms["bohb"] = BOHBExt self.algorithms["hyperband"] = HyperBandExt - self.algorithms["bohb_multi_kde"] = BOHBMultiKDEExt self.logger = logging.getLogger('autonet') From 7408d2498819b1781e64e93925c437dd30281f8e Mon Sep 17 00:00:00 2001 From: Lucas Zimmer Date: Wed, 9 Oct 2019 16:29:46 +0200 Subject: [PATCH 12/13] Update README --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 90738ea56..877270a64 100644 --- a/README.md +++ b/README.md @@ -33,6 +33,8 @@ $ python setup.py install ## Examples +For a detailed tutorial, please refer to the jupyter notebook in https://github.com/automl/Auto-PyTorch/tree/master/examples/basics. + In a nutshell: ```py @@ -112,7 +114,7 @@ search_space_updates.append(node_name="NetworkSelector", autoPyTorch = AutoNetClassification(hyperparameter_search_space_updates=search_space_updates) ``` -Enable ensemble building: +Enable ensemble building (for featurized data): ```py from autoPyTorch import AutoNetEnsemble From 8cd4e0e1d4606c5b379fbb3bfe7e25ab8af7a5ff Mon Sep 17 00:00:00 2001 From: Lucas Zimmer Date: Wed, 9 Oct 2019 16:34:26 +0200 Subject: [PATCH 13/13] Update README --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 877270a64..27a28271f 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ Copyright (C) 2019 [AutoML Group Freiburg](http://www.automl.org/) This a very early pre-alpha version of our upcoming Auto-PyTorch. -So far, Auto-PyTorch only supports featurized data and image data. +So far, Auto-PyTorch supports featurized data (classification, regression) and image data (classification). ## Installation